blob: 0fef8daa52f694cf300f10241fa54cc8517fa49c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct
9 * Removed kswapd_ctl limits, and swap out as many pages as needed
10 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
11 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
12 * Multiqueue VM started 5.8.00, Rik van Riel.
13 */
14
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070015#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/mm.h>
Ingo Molnar5b3cc152017-02-02 20:43:54 +010018#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel_stat.h>
22#include <linux/swap.h>
23#include <linux/pagemap.h>
24#include <linux/init.h>
25#include <linux/highmem.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070026#include <linux/vmpressure.h>
Andrew Mortone129b5c2006-09-27 01:50:00 -070027#include <linux/vmstat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/file.h>
29#include <linux/writeback.h>
30#include <linux/blkdev.h>
31#include <linux/buffer_head.h> /* for try_to_release_page(),
32 buffer_heads_over_limit */
33#include <linux/mm_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/backing-dev.h>
35#include <linux/rmap.h>
36#include <linux/topology.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
Mel Gorman3e7d3442011-01-13 15:45:56 -080039#include <linux/compaction.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/notifier.h>
41#include <linux/rwsem.h>
Rafael J. Wysocki248a0302006-03-22 00:09:04 -080042#include <linux/delay.h>
Yasunori Goto3218ae12006-06-27 02:53:33 -070043#include <linux/kthread.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080044#include <linux/freezer.h>
Balbir Singh66e17072008-02-07 00:13:56 -080045#include <linux/memcontrol.h>
Keika Kobayashi873b4772008-07-25 01:48:52 -070046#include <linux/delayacct.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070047#include <linux/sysctl.h>
KOSAKI Motohiro929bea72011-04-14 15:22:12 -070048#include <linux/oom.h>
Kuo-Hsin Yang64e3d122018-11-06 13:23:24 +000049#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070050#include <linux/prefetch.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070051#include <linux/printk.h>
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080052#include <linux/dax.h>
Johannes Weinereb414682018-10-26 15:06:27 -070053#include <linux/psi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <asm/tlbflush.h>
56#include <asm/div64.h>
57
58#include <linux/swapops.h>
Rafael Aquini117aad12013-09-30 13:45:16 -070059#include <linux/balloon_compaction.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Nick Piggin0f8053a2006-03-22 00:08:33 -080061#include "internal.h"
62
Mel Gorman33906bc2010-08-09 17:19:16 -070063#define CREATE_TRACE_POINTS
64#include <trace/events/vmscan.h>
65
xiaofeng35dafe72021-04-15 15:02:58 +080066#undef CREATE_TRACE_POINTS
67#include <trace/hooks/vmscan.h>
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069struct scan_control {
KOSAKI Motohiro22fba332009-12-14 17:59:10 -080070 /* How many pages shrink_list() should reclaim */
71 unsigned long nr_to_reclaim;
72
Johannes Weineree814fe2014-08-06 16:06:19 -070073 /*
74 * Nodemask of nodes allowed by the caller. If NULL, all nodes
75 * are scanned.
76 */
77 nodemask_t *nodemask;
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -070078
KOSAKI Motohiro5f53e762010-05-24 14:32:37 -070079 /*
Johannes Weinerf16015f2012-01-12 17:17:52 -080080 * The memory cgroup that hit its limit and as a result is the
81 * primary target of this reclaim invocation.
82 */
83 struct mem_cgroup *target_mem_cgroup;
Balbir Singh66e17072008-02-07 00:13:56 -080084
Johannes Weiner7cf111b2020-06-03 16:03:06 -070085 /*
86 * Scan pressure balancing between anon and file LRUs
87 */
88 unsigned long anon_cost;
89 unsigned long file_cost;
90
Johannes Weinerb91ac372019-11-30 17:56:02 -080091 /* Can active pages be deactivated as part of reclaim? */
92#define DEACTIVATE_ANON 1
93#define DEACTIVATE_FILE 2
94 unsigned int may_deactivate:2;
95 unsigned int force_deactivate:1;
96 unsigned int skipped_deactivate:1;
97
Johannes Weiner1276ad62017-02-24 14:56:11 -080098 /* Writepage batching in laptop mode; RECLAIM_WRITE */
Johannes Weineree814fe2014-08-06 16:06:19 -070099 unsigned int may_writepage:1;
100
101 /* Can mapped pages be reclaimed? */
102 unsigned int may_unmap:1;
103
104 /* Can pages be swapped as part of reclaim? */
105 unsigned int may_swap:1;
106
Yisheng Xied6622f62017-05-03 14:53:57 -0700107 /*
108 * Cgroups are not reclaimed below their configured memory.low,
109 * unless we threaten to OOM. If any cgroups are skipped due to
110 * memory.low and nothing was reclaimed, go back for memory.low.
111 */
112 unsigned int memcg_low_reclaim:1;
113 unsigned int memcg_low_skipped:1;
Johannes Weiner241994ed2015-02-11 15:26:06 -0800114
Johannes Weineree814fe2014-08-06 16:06:19 -0700115 unsigned int hibernation_mode:1;
116
117 /* One of the zones is ready for compaction */
118 unsigned int compaction_ready:1;
119
Johannes Weinerb91ac372019-11-30 17:56:02 -0800120 /* There is easily reclaimable cold cache in the current node */
121 unsigned int cache_trim_mode:1;
122
Johannes Weiner53138ce2019-11-30 17:55:56 -0800123 /* The file pages on the current node are dangerously low */
124 unsigned int file_is_tiny:1;
125
Greg Thelenbb451fd2018-08-17 15:45:19 -0700126 /* Allocation order */
127 s8 order;
128
129 /* Scan (total_size >> priority) pages at once */
130 s8 priority;
131
132 /* The highest zone to isolate pages for reclaim from */
133 s8 reclaim_idx;
134
135 /* This context's GFP mask */
136 gfp_t gfp_mask;
137
Johannes Weineree814fe2014-08-06 16:06:19 -0700138 /* Incremented by the number of inactive pages that were scanned */
139 unsigned long nr_scanned;
140
141 /* Number of pages freed so far during a call to shrink_zones() */
142 unsigned long nr_reclaimed;
Andrey Ryabinind108c772018-04-10 16:27:59 -0700143
144 struct {
145 unsigned int dirty;
146 unsigned int unqueued_dirty;
147 unsigned int congested;
148 unsigned int writeback;
149 unsigned int immediate;
150 unsigned int file_taken;
151 unsigned int taken;
152 } nr;
Yafang Shaoe5ca8072019-07-16 16:26:09 -0700153
154 /* for recording the reclaimed slab by now */
155 struct reclaim_state reclaim_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156};
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158#ifdef ARCH_HAS_PREFETCHW
159#define prefetchw_prev_lru_page(_page, _base, _field) \
160 do { \
161 if ((_page)->lru.prev != _base) { \
162 struct page *prev; \
163 \
164 prev = lru_to_page(&(_page->lru)); \
165 prefetchw(&prev->_field); \
166 } \
167 } while (0)
168#else
169#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
170#endif
171
172/*
Johannes Weinerc8439662020-06-03 16:02:37 -0700173 * From 0 .. 200. Higher means more swappy.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 */
175int vm_swappiness = 60;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Charan Teja Reddy0d61a652021-02-05 17:47:57 +0530177#define DEF_KSWAPD_THREADS_PER_NODE 1
Suren Baghdasaryanaa8f6902021-02-22 18:13:47 -0800178static int kswapd_threads = DEF_KSWAPD_THREADS_PER_NODE;
Charan Teja Reddy0d61a652021-02-05 17:47:57 +0530179static int __init kswapd_per_node_setup(char *str)
180{
181 int tmp;
182
183 if (kstrtoint(str, 0, &tmp) < 0)
184 return 0;
185
186 if (tmp > MAX_KSWAPD_THREADS || tmp <= 0)
187 return 0;
188
189 kswapd_threads = tmp;
190 return 1;
191}
192__setup("kswapd_per_node=", kswapd_per_node_setup);
193
Yang Shi0a432dc2019-09-23 15:38:12 -0700194static void set_task_reclaim_state(struct task_struct *task,
195 struct reclaim_state *rs)
196{
197 /* Check for an overwrite */
198 WARN_ON_ONCE(rs && task->reclaim_state);
199
200 /* Check for the nulling of an already-nulled member */
201 WARN_ON_ONCE(!rs && !task->reclaim_state);
202
203 task->reclaim_state = rs;
204}
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206static LIST_HEAD(shrinker_list);
207static DECLARE_RWSEM(shrinker_rwsem);
208
Yang Shi0a432dc2019-09-23 15:38:12 -0700209#ifdef CONFIG_MEMCG
Kirill Tkhai7e010df2018-08-17 15:48:34 -0700210/*
211 * We allow subsystems to populate their shrinker-related
212 * LRU lists before register_shrinker_prepared() is called
213 * for the shrinker, since we don't want to impose
214 * restrictions on their internal registration order.
215 * In this case shrink_slab_memcg() may find corresponding
216 * bit is set in the shrinkers map.
217 *
218 * This value is used by the function to detect registering
219 * shrinkers and to skip do_shrink_slab() calls for them.
220 */
221#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
222
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700223static DEFINE_IDR(shrinker_idr);
224static int shrinker_nr_max;
225
226static int prealloc_memcg_shrinker(struct shrinker *shrinker)
227{
228 int id, ret = -ENOMEM;
229
230 down_write(&shrinker_rwsem);
231 /* This may call shrinker, so it must use down_read_trylock() */
Kirill Tkhai7e010df2018-08-17 15:48:34 -0700232 id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700233 if (id < 0)
234 goto unlock;
235
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700236 if (id >= shrinker_nr_max) {
237 if (memcg_expand_shrinker_maps(id)) {
238 idr_remove(&shrinker_idr, id);
239 goto unlock;
240 }
241
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700242 shrinker_nr_max = id + 1;
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700243 }
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700244 shrinker->id = id;
245 ret = 0;
246unlock:
247 up_write(&shrinker_rwsem);
248 return ret;
249}
250
251static void unregister_memcg_shrinker(struct shrinker *shrinker)
252{
253 int id = shrinker->id;
254
255 BUG_ON(id < 0);
256
257 down_write(&shrinker_rwsem);
258 idr_remove(&shrinker_idr, id);
259 up_write(&shrinker_rwsem);
260}
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700261
Johannes Weinerb5ead352019-11-30 17:55:40 -0800262static bool cgroup_reclaim(struct scan_control *sc)
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800263{
Johannes Weinerb5ead352019-11-30 17:55:40 -0800264 return sc->target_mem_cgroup;
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800265}
Tejun Heo97c93412015-05-22 18:23:36 -0400266
267/**
Johannes Weinerb5ead352019-11-30 17:55:40 -0800268 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
Tejun Heo97c93412015-05-22 18:23:36 -0400269 * @sc: scan_control in question
270 *
271 * The normal page dirty throttling mechanism in balance_dirty_pages() is
272 * completely broken with the legacy memcg and direct stalling in
273 * shrink_page_list() is used for throttling instead, which lacks all the
274 * niceties such as fairness, adaptive pausing, bandwidth proportional
275 * allocation and configurability.
276 *
277 * This function tests whether the vmscan currently in progress can assume
278 * that the normal dirty throttling mechanism is operational.
279 */
Johannes Weinerb5ead352019-11-30 17:55:40 -0800280static bool writeback_throttling_sane(struct scan_control *sc)
Tejun Heo97c93412015-05-22 18:23:36 -0400281{
Johannes Weinerb5ead352019-11-30 17:55:40 -0800282 if (!cgroup_reclaim(sc))
Tejun Heo97c93412015-05-22 18:23:36 -0400283 return true;
284#ifdef CONFIG_CGROUP_WRITEBACK
Linus Torvalds69234ac2015-11-05 14:51:32 -0800285 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heo97c93412015-05-22 18:23:36 -0400286 return true;
287#endif
288 return false;
289}
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -0800290#else
Yang Shi0a432dc2019-09-23 15:38:12 -0700291static int prealloc_memcg_shrinker(struct shrinker *shrinker)
292{
293 return 0;
294}
295
296static void unregister_memcg_shrinker(struct shrinker *shrinker)
297{
298}
299
Johannes Weinerb5ead352019-11-30 17:55:40 -0800300static bool cgroup_reclaim(struct scan_control *sc)
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800301{
Johannes Weinerb5ead352019-11-30 17:55:40 -0800302 return false;
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800303}
Tejun Heo97c93412015-05-22 18:23:36 -0400304
Johannes Weinerb5ead352019-11-30 17:55:40 -0800305static bool writeback_throttling_sane(struct scan_control *sc)
Tejun Heo97c93412015-05-22 18:23:36 -0400306{
307 return true;
308}
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -0800309#endif
310
Mel Gorman5a1c84b2016-07-28 15:47:31 -0700311/*
312 * This misses isolated pages which are not accounted for to save counters.
313 * As the data only determines if reclaim or compaction continues, it is
314 * not expected that isolated pages will be a dominating factor.
315 */
316unsigned long zone_reclaimable_pages(struct zone *zone)
317{
318 unsigned long nr;
319
320 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
321 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
322 if (get_nr_swap_pages() > 0)
323 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
324 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
325
326 return nr;
327}
328
Michal Hockofd538802017-02-22 15:45:58 -0800329/**
330 * lruvec_lru_size - Returns the number of pages on the given LRU list.
331 * @lruvec: lru vector
332 * @lru: lru to use
333 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
334 */
335unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
KOSAKI Motohiroc9f299d2009-01-07 18:08:16 -0800336{
Johannes Weinerde3b0152019-11-30 17:55:31 -0800337 unsigned long size = 0;
Michal Hockofd538802017-02-22 15:45:58 -0800338 int zid;
339
Johannes Weinerde3b0152019-11-30 17:55:31 -0800340 for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
Michal Hockofd538802017-02-22 15:45:58 -0800341 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
KOSAKI Motohiroc9f299d2009-01-07 18:08:16 -0800342
Michal Hockofd538802017-02-22 15:45:58 -0800343 if (!managed_zone(zone))
344 continue;
Michal Hockob4536f0c82017-01-10 16:58:04 -0800345
Michal Hockofd538802017-02-22 15:45:58 -0800346 if (!mem_cgroup_disabled())
Johannes Weinerde3b0152019-11-30 17:55:31 -0800347 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
Michal Hockofd538802017-02-22 15:45:58 -0800348 else
Johannes Weinerde3b0152019-11-30 17:55:31 -0800349 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
Michal Hockofd538802017-02-22 15:45:58 -0800350 }
Johannes Weinerde3b0152019-11-30 17:55:31 -0800351 return size;
Michal Hockob4536f0c82017-01-10 16:58:04 -0800352}
353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354/*
Glauber Costa1d3d4432013-08-28 10:18:04 +1000355 * Add a shrinker callback to be called from the vm.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 */
Tetsuo Handa8e049442018-04-04 19:53:07 +0900357int prealloc_shrinker(struct shrinker *shrinker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358{
Alexey Dobriyanb9726c22019-03-05 15:48:26 -0800359 unsigned int size = sizeof(*shrinker->nr_deferred);
Glauber Costa1d3d4432013-08-28 10:18:04 +1000360
Glauber Costa1d3d4432013-08-28 10:18:04 +1000361 if (shrinker->flags & SHRINKER_NUMA_AWARE)
362 size *= nr_node_ids;
363
364 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
365 if (!shrinker->nr_deferred)
366 return -ENOMEM;
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700367
368 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
369 if (prealloc_memcg_shrinker(shrinker))
370 goto free_deferred;
371 }
372
Tetsuo Handa8e049442018-04-04 19:53:07 +0900373 return 0;
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700374
375free_deferred:
376 kfree(shrinker->nr_deferred);
377 shrinker->nr_deferred = NULL;
378 return -ENOMEM;
Tetsuo Handa8e049442018-04-04 19:53:07 +0900379}
Glauber Costa1d3d4432013-08-28 10:18:04 +1000380
Tetsuo Handa8e049442018-04-04 19:53:07 +0900381void free_prealloced_shrinker(struct shrinker *shrinker)
382{
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700383 if (!shrinker->nr_deferred)
384 return;
385
386 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
387 unregister_memcg_shrinker(shrinker);
388
Tetsuo Handa8e049442018-04-04 19:53:07 +0900389 kfree(shrinker->nr_deferred);
390 shrinker->nr_deferred = NULL;
391}
392
393void register_shrinker_prepared(struct shrinker *shrinker)
394{
Rusty Russell8e1f9362007-07-17 04:03:17 -0700395 down_write(&shrinker_rwsem);
396 list_add_tail(&shrinker->list, &shrinker_list);
Yang Shi42a9a532019-12-17 20:51:52 -0800397#ifdef CONFIG_MEMCG
Kirill Tkhai8df4a442018-08-21 21:51:49 -0700398 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
399 idr_replace(&shrinker_idr, shrinker, shrinker->id);
Kirill Tkhai7e010df2018-08-17 15:48:34 -0700400#endif
Rusty Russell8e1f9362007-07-17 04:03:17 -0700401 up_write(&shrinker_rwsem);
Tetsuo Handa8e049442018-04-04 19:53:07 +0900402}
403
404int register_shrinker(struct shrinker *shrinker)
405{
406 int err = prealloc_shrinker(shrinker);
407
408 if (err)
409 return err;
410 register_shrinker_prepared(shrinker);
Glauber Costa1d3d4432013-08-28 10:18:04 +1000411 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412}
Rusty Russell8e1f9362007-07-17 04:03:17 -0700413EXPORT_SYMBOL(register_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
415/*
416 * Remove one
417 */
Rusty Russell8e1f9362007-07-17 04:03:17 -0700418void unregister_shrinker(struct shrinker *shrinker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419{
Tetsuo Handabb422a72017-12-18 20:31:41 +0900420 if (!shrinker->nr_deferred)
421 return;
Kirill Tkhaib4c2b232018-08-17 15:47:29 -0700422 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
423 unregister_memcg_shrinker(shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 down_write(&shrinker_rwsem);
425 list_del(&shrinker->list);
426 up_write(&shrinker_rwsem);
Andrew Vaginae393322013-10-16 13:46:46 -0700427 kfree(shrinker->nr_deferred);
Tetsuo Handabb422a72017-12-18 20:31:41 +0900428 shrinker->nr_deferred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
Rusty Russell8e1f9362007-07-17 04:03:17 -0700430EXPORT_SYMBOL(unregister_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432#define SHRINK_BATCH 128
Glauber Costa1d3d4432013-08-28 10:18:04 +1000433
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800434static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
Josef Bacik9092c712018-01-31 16:16:26 -0800435 struct shrinker *shrinker, int priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
Glauber Costa1d3d4432013-08-28 10:18:04 +1000437 unsigned long freed = 0;
438 unsigned long long delta;
439 long total_scan;
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700440 long freeable;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000441 long nr;
442 long new_nr;
443 int nid = shrinkctl->nid;
444 long batch_size = shrinker->batch ? shrinker->batch
445 : SHRINK_BATCH;
Shaohua Li5f33a082016-12-12 16:41:50 -0800446 long scanned = 0, next_deferred;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000447
rongqianfengbf769b72021-05-21 10:30:14 +0800448 trace_android_vh_do_shrink_slab(shrinker, shrinkctl, priority);
449
Kirill Tkhaiac7fb3a2018-08-17 15:48:30 -0700450 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
451 nid = 0;
452
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700453 freeable = shrinker->count_objects(shrinker, shrinkctl);
Kirill Tkhai9b996462018-08-17 15:48:21 -0700454 if (freeable == 0 || freeable == SHRINK_EMPTY)
455 return freeable;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000456
457 /*
458 * copy the current shrinker scan count into a local variable
459 * and zero it so that other concurrent shrinker invocations
460 * don't also do this scanning work.
461 */
462 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
463
464 total_scan = nr;
Johannes Weiner4b85afb2018-10-26 15:06:42 -0700465 if (shrinker->seeks) {
466 delta = freeable >> priority;
467 delta *= 4;
468 do_div(delta, shrinker->seeks);
469 } else {
470 /*
471 * These objects don't require any IO to create. Trim
472 * them aggressively under memory pressure to keep
473 * them from causing refetches in the IO caches.
474 */
475 delta = freeable / 2;
476 }
Roman Gushchin172b06c32018-09-20 12:22:46 -0700477
Glauber Costa1d3d4432013-08-28 10:18:04 +1000478 total_scan += delta;
479 if (total_scan < 0) {
Sakari Ailusd75f7732019-03-25 21:32:28 +0200480 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
Dave Chinnera0b02132013-08-28 10:18:16 +1000481 shrinker->scan_objects, total_scan);
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700482 total_scan = freeable;
Shaohua Li5f33a082016-12-12 16:41:50 -0800483 next_deferred = nr;
484 } else
485 next_deferred = total_scan;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000486
487 /*
488 * We need to avoid excessive windup on filesystem shrinkers
489 * due to large numbers of GFP_NOFS allocations causing the
490 * shrinkers to return -1 all the time. This results in a large
491 * nr being built up so when a shrink that can do some work
492 * comes along it empties the entire cache due to nr >>>
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700493 * freeable. This is bad for sustaining a working set in
Glauber Costa1d3d4432013-08-28 10:18:04 +1000494 * memory.
495 *
496 * Hence only allow the shrinker to scan the entire cache when
497 * a large delta change is calculated directly.
498 */
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700499 if (delta < freeable / 4)
500 total_scan = min(total_scan, freeable / 2);
Glauber Costa1d3d4432013-08-28 10:18:04 +1000501
502 /*
503 * Avoid risking looping forever due to too large nr value:
504 * never try to free more than twice the estimate number of
505 * freeable entries.
506 */
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700507 if (total_scan > freeable * 2)
508 total_scan = freeable * 2;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000509
510 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
Josef Bacik9092c712018-01-31 16:16:26 -0800511 freeable, delta, total_scan, priority);
Glauber Costa1d3d4432013-08-28 10:18:04 +1000512
Vladimir Davydov0b1fb402014-01-23 15:53:22 -0800513 /*
514 * Normally, we should not scan less than batch_size objects in one
515 * pass to avoid too frequent shrinker calls, but if the slab has less
516 * than batch_size objects in total and we are really tight on memory,
517 * we will try to reclaim all available objects, otherwise we can end
518 * up failing allocations although there are plenty of reclaimable
519 * objects spread over several slabs with usage less than the
520 * batch_size.
521 *
522 * We detect the "tight on memory" situations by looking at the total
523 * number of objects we want to scan (total_scan). If it is greater
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700524 * than the total number of objects on slab (freeable), we must be
Vladimir Davydov0b1fb402014-01-23 15:53:22 -0800525 * scanning at high prio and therefore should try to reclaim as much as
526 * possible.
527 */
528 while (total_scan >= batch_size ||
Vladimir Davydovd5bc5fd2014-04-03 14:47:32 -0700529 total_scan >= freeable) {
Dave Chinnera0b02132013-08-28 10:18:16 +1000530 unsigned long ret;
Vladimir Davydov0b1fb402014-01-23 15:53:22 -0800531 unsigned long nr_to_scan = min(batch_size, total_scan);
Glauber Costa1d3d4432013-08-28 10:18:04 +1000532
Vladimir Davydov0b1fb402014-01-23 15:53:22 -0800533 shrinkctl->nr_to_scan = nr_to_scan;
Chris Wilsond460acb2017-09-06 16:19:26 -0700534 shrinkctl->nr_scanned = nr_to_scan;
Dave Chinnera0b02132013-08-28 10:18:16 +1000535 ret = shrinker->scan_objects(shrinker, shrinkctl);
536 if (ret == SHRINK_STOP)
537 break;
538 freed += ret;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000539
Chris Wilsond460acb2017-09-06 16:19:26 -0700540 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
541 total_scan -= shrinkctl->nr_scanned;
542 scanned += shrinkctl->nr_scanned;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000543
544 cond_resched();
545 }
546
Shaohua Li5f33a082016-12-12 16:41:50 -0800547 if (next_deferred >= scanned)
548 next_deferred -= scanned;
549 else
550 next_deferred = 0;
Glauber Costa1d3d4432013-08-28 10:18:04 +1000551 /*
552 * move the unused scan count back into the shrinker in a
553 * manner that handles concurrent updates. If we exhausted the
554 * scan, there is no need to do an update.
555 */
Shaohua Li5f33a082016-12-12 16:41:50 -0800556 if (next_deferred > 0)
557 new_nr = atomic_long_add_return(next_deferred,
Glauber Costa1d3d4432013-08-28 10:18:04 +1000558 &shrinker->nr_deferred[nid]);
559 else
560 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
561
Dave Hansendf9024a2014-06-04 16:08:07 -0700562 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
Glauber Costa1d3d4432013-08-28 10:18:04 +1000563 return freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
Yang Shi0a432dc2019-09-23 15:38:12 -0700566#ifdef CONFIG_MEMCG
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700567static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
568 struct mem_cgroup *memcg, int priority)
569{
570 struct memcg_shrinker_map *map;
Kirill Tkhaib8e57ef2018-10-05 15:52:10 -0700571 unsigned long ret, freed = 0;
572 int i;
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700573
Yang Shi0a432dc2019-09-23 15:38:12 -0700574 if (!mem_cgroup_online(memcg))
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700575 return 0;
576
577 if (!down_read_trylock(&shrinker_rwsem))
578 return 0;
579
580 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
581 true);
582 if (unlikely(!map))
583 goto unlock;
584
585 for_each_set_bit(i, map->map, shrinker_nr_max) {
586 struct shrink_control sc = {
587 .gfp_mask = gfp_mask,
588 .nid = nid,
589 .memcg = memcg,
590 };
591 struct shrinker *shrinker;
592
593 shrinker = idr_find(&shrinker_idr, i);
Kirill Tkhai7e010df2018-08-17 15:48:34 -0700594 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
595 if (!shrinker)
596 clear_bit(i, map->map);
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700597 continue;
598 }
599
Yang Shi0a432dc2019-09-23 15:38:12 -0700600 /* Call non-slab shrinkers even though kmem is disabled */
601 if (!memcg_kmem_enabled() &&
602 !(shrinker->flags & SHRINKER_NONSLAB))
603 continue;
604
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700605 ret = do_shrink_slab(&sc, shrinker, priority);
Kirill Tkhaif90280d2018-08-17 15:48:25 -0700606 if (ret == SHRINK_EMPTY) {
607 clear_bit(i, map->map);
608 /*
609 * After the shrinker reported that it had no objects to
610 * free, but before we cleared the corresponding bit in
611 * the memcg shrinker map, a new object might have been
612 * added. To make sure, we have the bit set in this
613 * case, we invoke the shrinker one more time and reset
614 * the bit if it reports that it is not empty anymore.
615 * The memory barrier here pairs with the barrier in
616 * memcg_set_shrinker_bit():
617 *
618 * list_lru_add() shrink_slab_memcg()
619 * list_add_tail() clear_bit()
620 * <MB> <MB>
621 * set_bit() do_shrink_slab()
622 */
623 smp_mb__after_atomic();
624 ret = do_shrink_slab(&sc, shrinker, priority);
625 if (ret == SHRINK_EMPTY)
626 ret = 0;
627 else
628 memcg_set_shrinker_bit(memcg, nid, i);
629 }
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700630 freed += ret;
631
632 if (rwsem_is_contended(&shrinker_rwsem)) {
633 freed = freed ? : 1;
634 break;
635 }
636 }
637unlock:
638 up_read(&shrinker_rwsem);
639 return freed;
640}
Yang Shi0a432dc2019-09-23 15:38:12 -0700641#else /* CONFIG_MEMCG */
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700642static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
643 struct mem_cgroup *memcg, int priority)
644{
645 return 0;
646}
Yang Shi0a432dc2019-09-23 15:38:12 -0700647#endif /* CONFIG_MEMCG */
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700648
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800649/**
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800650 * shrink_slab - shrink slab caches
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800651 * @gfp_mask: allocation context
652 * @nid: node whose slab caches to target
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800653 * @memcg: memory cgroup whose slab caches to target
Josef Bacik9092c712018-01-31 16:16:26 -0800654 * @priority: the reclaim priority
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 *
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800656 * Call the shrink functions to age shrinkable caches.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 *
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800658 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
659 * unaware shrinkers will receive a node id of 0 instead.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 *
Vladimir Davydovaeed1d322018-08-17 15:48:17 -0700661 * @memcg specifies the memory cgroup to target. Unaware shrinkers
662 * are called only if it is the root cgroup.
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800663 *
Josef Bacik9092c712018-01-31 16:16:26 -0800664 * @priority is sc->priority, we take the number of objects and >> by priority
665 * in order to get the scan target.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 *
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800667 * Returns the number of reclaimed slab objects.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 */
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800669static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
670 struct mem_cgroup *memcg,
Josef Bacik9092c712018-01-31 16:16:26 -0800671 int priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
Kirill Tkhaib8e57ef2018-10-05 15:52:10 -0700673 unsigned long ret, freed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 struct shrinker *shrinker;
wudean396a6ad2021-04-27 17:40:41 +0800675 bool bypass = false;
676
677 trace_android_vh_shrink_slab_bypass(gfp_mask, nid, memcg, priority, &bypass);
678 if (bypass)
679 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Yang Shifa1e5122019-08-02 21:48:44 -0700681 /*
682 * The root memcg might be allocated even though memcg is disabled
683 * via "cgroup_disable=memory" boot parameter. This could make
684 * mem_cgroup_is_root() return false, then just run memcg slab
685 * shrink, but skip global shrink. This may result in premature
686 * oom.
687 */
688 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
Kirill Tkhaib0dedc42018-08-17 15:48:14 -0700689 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800690
Tetsuo Handae830c632018-04-05 16:23:35 -0700691 if (!down_read_trylock(&shrinker_rwsem))
Minchan Kimf06590b2011-05-24 17:11:11 -0700692 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 list_for_each_entry(shrinker, &shrinker_list, list) {
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800695 struct shrink_control sc = {
696 .gfp_mask = gfp_mask,
697 .nid = nid,
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800698 .memcg = memcg,
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800699 };
Vladimir Davydovec970972014-01-23 15:53:23 -0800700
Kirill Tkhai9b996462018-08-17 15:48:21 -0700701 ret = do_shrink_slab(&sc, shrinker, priority);
702 if (ret == SHRINK_EMPTY)
703 ret = 0;
704 freed += ret;
Minchan Kime4966122018-01-31 16:16:55 -0800705 /*
706 * Bail out if someone want to register a new shrinker to
Ethon Paul55b65a52020-06-04 16:49:10 -0700707 * prevent the registration from being stalled for long periods
Minchan Kime4966122018-01-31 16:16:55 -0800708 * by parallel ongoing shrinking.
709 */
710 if (rwsem_is_contended(&shrinker_rwsem)) {
711 freed = freed ? : 1;
712 break;
713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 }
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 up_read(&shrinker_rwsem);
Minchan Kimf06590b2011-05-24 17:11:11 -0700717out:
718 cond_resched();
Dave Chinner24f7c6b2013-08-28 10:17:56 +1000719 return freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720}
721
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800722void drop_slab_node(int nid)
723{
724 unsigned long freed;
725
726 do {
727 struct mem_cgroup *memcg = NULL;
728
Chunxin Zang069c4112020-10-13 16:56:46 -0700729 if (fatal_signal_pending(current))
730 return;
731
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800732 freed = 0;
Vladimir Davydovaeed1d322018-08-17 15:48:17 -0700733 memcg = mem_cgroup_iter(NULL, NULL, NULL);
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800734 do {
Josef Bacik9092c712018-01-31 16:16:26 -0800735 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800736 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
737 } while (freed > 10);
738}
739
740void drop_slab(void)
741{
742 int nid;
743
744 for_each_online_node(nid)
745 drop_slab_node(nid);
746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748static inline int is_page_cache_freeable(struct page *page)
749{
Johannes Weinerceddc3a2009-09-21 17:03:00 -0700750 /*
751 * A freeable page cache page is referenced only by the caller
Matthew Wilcox67891ff2018-06-10 07:34:39 -0400752 * that isolated the page, the page cache and optional buffer
753 * heads at page->private.
Johannes Weinerceddc3a2009-09-21 17:03:00 -0700754 */
Matthew Wilcox (Oracle)3efe62e2020-10-15 20:05:56 -0700755 int page_cache_pins = thp_nr_pages(page);
Matthew Wilcox67891ff2018-06-10 07:34:39 -0400756 return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
Yang Shicb165562019-11-30 17:55:28 -0800759static int may_write_to_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760{
Christoph Lameter930d9152006-01-08 01:00:47 -0800761 if (current->flags & PF_SWAPWRITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 return 1;
Tejun Heo703c2702015-05-22 17:13:44 -0400763 if (!inode_write_congested(inode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return 1;
Tejun Heo703c2702015-05-22 17:13:44 -0400765 if (inode_to_bdi(inode) == current->backing_dev_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 return 1;
767 return 0;
768}
769
770/*
771 * We detected a synchronous write error writing a page out. Probably
772 * -ENOSPC. We need to propagate that into the address_space for a subsequent
773 * fsync(), msync() or close().
774 *
775 * The tricky part is that after writepage we cannot touch the mapping: nothing
776 * prevents it from being freed up. But we have a ref on the page and once
777 * that page is locked, the mapping is pinned.
778 *
779 * We're allowed to run sleeping lock_page() here because we know the caller has
780 * __GFP_FS.
781 */
782static void handle_write_error(struct address_space *mapping,
783 struct page *page, int error)
784{
Jens Axboe7eaceac2011-03-10 08:52:07 +0100785 lock_page(page);
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -0700786 if (page_mapping(page) == mapping)
787 mapping_set_error(mapping, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 unlock_page(page);
789}
790
Christoph Lameter04e62a22006-06-23 02:03:38 -0700791/* possible outcome of pageout() */
792typedef enum {
793 /* failed to write page out, page is locked */
794 PAGE_KEEP,
795 /* move page to the active list, page is locked */
796 PAGE_ACTIVATE,
797 /* page has been sent to the disk successfully, page is unlocked */
798 PAGE_SUCCESS,
799 /* page is clean and locked */
800 PAGE_CLEAN,
801} pageout_t;
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/*
Andrew Morton1742f192006-03-22 00:08:21 -0800804 * pageout is called by shrink_page_list() for each dirty page.
805 * Calls ->writepage().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 */
Yang Shicb165562019-11-30 17:55:28 -0800807static pageout_t pageout(struct page *page, struct address_space *mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
809 /*
810 * If the page is dirty, only perform writeback if that write
811 * will be non-blocking. To prevent this allocation from being
812 * stalled by pagecache activity. But note that there may be
813 * stalls if we need to run get_block(). We could test
814 * PagePrivate for that.
815 *
Al Viro81742022014-04-03 03:17:43 -0400816 * If this process is currently in __generic_file_write_iter() against
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 * this page's queue, we can perform writeback even if that
818 * will block.
819 *
820 * If the page is swapcache, write it back even if that would
821 * block, for some throttling. This happens by accident, because
822 * swap_backing_dev_info is bust: it doesn't reflect the
823 * congestion state of the swapdevs. Easy to fix, if needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 */
825 if (!is_page_cache_freeable(page))
826 return PAGE_KEEP;
827 if (!mapping) {
828 /*
829 * Some data journaling orphaned pages can have
830 * page->mapping == NULL while being dirty with clean buffers.
831 */
David Howells266cf652009-04-03 16:42:36 +0100832 if (page_has_private(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 if (try_to_free_buffers(page)) {
834 ClearPageDirty(page);
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700835 pr_info("%s: orphaned page\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 return PAGE_CLEAN;
837 }
838 }
839 return PAGE_KEEP;
840 }
841 if (mapping->a_ops->writepage == NULL)
842 return PAGE_ACTIVATE;
Yang Shicb165562019-11-30 17:55:28 -0800843 if (!may_write_to_inode(mapping->host))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 return PAGE_KEEP;
845
846 if (clear_page_dirty_for_io(page)) {
847 int res;
848 struct writeback_control wbc = {
849 .sync_mode = WB_SYNC_NONE,
850 .nr_to_write = SWAP_CLUSTER_MAX,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700851 .range_start = 0,
852 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 .for_reclaim = 1,
854 };
855
856 SetPageReclaim(page);
857 res = mapping->a_ops->writepage(page, &wbc);
858 if (res < 0)
859 handle_write_error(mapping, page, res);
Zach Brown994fc28c2005-12-15 14:28:17 -0800860 if (res == AOP_WRITEPAGE_ACTIVATE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 ClearPageReclaim(page);
862 return PAGE_ACTIVATE;
863 }
Andy Whitcroftc661b072007-08-22 14:01:26 -0700864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 if (!PageWriteback(page)) {
866 /* synchronous write or broken a_ops? */
867 ClearPageReclaim(page);
868 }
yalin wang3aa23852016-01-14 15:18:30 -0800869 trace_mm_vmscan_writepage(page);
Mel Gormanc4a25632016-07-28 15:46:23 -0700870 inc_node_page_state(page, NR_VMSCAN_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 return PAGE_SUCCESS;
872 }
873
874 return PAGE_CLEAN;
875}
876
Andrew Mortona649fd92006-10-17 00:09:36 -0700877/*
Nick Piggine2867812008-07-25 19:45:30 -0700878 * Same as remove_mapping, but if the page is removed from the mapping, it
879 * gets returned with a refcount of 0.
Andrew Mortona649fd92006-10-17 00:09:36 -0700880 */
Johannes Weinera5289102014-04-03 14:47:51 -0700881static int __remove_mapping(struct address_space *mapping, struct page *page,
Johannes Weinerb9107182019-11-30 17:55:59 -0800882 bool reclaimed, struct mem_cgroup *target_memcg)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800883{
Greg Thelenc4843a72015-05-22 17:13:16 -0400884 unsigned long flags;
Huang Yingbd4c82c22017-09-06 16:22:49 -0700885 int refcount;
Joonsoo Kimaae466b2020-08-11 18:30:50 -0700886 void *shadow = NULL;
Greg Thelenc4843a72015-05-22 17:13:16 -0400887
Nick Piggin28e4d962006-09-25 23:31:23 -0700888 BUG_ON(!PageLocked(page));
889 BUG_ON(mapping != page_mapping(page));
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800890
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700891 xa_lock_irqsave(&mapping->i_pages, flags);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800892 /*
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700893 * The non racy check for a busy page.
894 *
895 * Must be careful with the order of the tests. When someone has
896 * a ref to the page, it may be possible that they dirty it then
897 * drop the reference. So if PageDirty is tested before page_count
898 * here, then the following race may occur:
899 *
900 * get_user_pages(&page);
901 * [user mapping goes away]
902 * write_to(page);
903 * !PageDirty(page) [good]
904 * SetPageDirty(page);
905 * put_page(page);
906 * !page_count(page) [good, discard it]
907 *
908 * [oops, our write_to data is lost]
909 *
910 * Reversing the order of the tests ensures such a situation cannot
911 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700912 * load is not satisfied before that of page->_refcount.
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700913 *
914 * Note that if SetPageDirty is always performed via set_page_dirty,
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700915 * and thus under the i_pages lock, then this ordering is not required.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800916 */
William Kucharski906d2782019-10-18 20:20:33 -0700917 refcount = 1 + compound_nr(page);
Huang Yingbd4c82c22017-09-06 16:22:49 -0700918 if (!page_ref_freeze(page, refcount))
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800919 goto cannot_free;
Jiang Biao1c4c3b92018-08-21 21:53:13 -0700920 /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
Nick Piggine2867812008-07-25 19:45:30 -0700921 if (unlikely(PageDirty(page))) {
Huang Yingbd4c82c22017-09-06 16:22:49 -0700922 page_ref_unfreeze(page, refcount);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800923 goto cannot_free;
Nick Piggine2867812008-07-25 19:45:30 -0700924 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800925
926 if (PageSwapCache(page)) {
927 swp_entry_t swap = { .val = page_private(page) };
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700928 mem_cgroup_swapout(page, swap);
Joonsoo Kimaae466b2020-08-11 18:30:50 -0700929 if (reclaimed && !mapping_exiting(mapping))
930 shadow = workingset_eviction(page, target_memcg);
931 __delete_from_swap_cache(page, swap, shadow);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700932 xa_unlock_irqrestore(&mapping->i_pages, flags);
Minchan Kim75f6d6d2017-07-06 15:37:21 -0700933 put_swap_page(page, swap);
Nick Piggine2867812008-07-25 19:45:30 -0700934 } else {
Linus Torvalds6072d132010-12-01 13:35:19 -0500935 void (*freepage)(struct page *);
936
937 freepage = mapping->a_ops->freepage;
Johannes Weinera5289102014-04-03 14:47:51 -0700938 /*
939 * Remember a shadow entry for reclaimed file cache in
940 * order to detect refaults, thus thrashing, later on.
941 *
942 * But don't store shadows in an address space that is
dylan-meiners238c3042020-08-06 23:26:29 -0700943 * already exiting. This is not just an optimization,
Johannes Weinera5289102014-04-03 14:47:51 -0700944 * inode reclaim needs to empty out the radix tree or
945 * the nodes are lost. Don't plant shadows behind its
946 * back.
Ross Zwislerf9fe48b2016-01-22 15:10:40 -0800947 *
948 * We also don't store shadows for DAX mappings because the
949 * only page cache pages found in these are zero pages
950 * covering holes, and because we don't want to mix DAX
951 * exceptional entries and shadow exceptional entries in the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700952 * same address_space.
Johannes Weinera5289102014-04-03 14:47:51 -0700953 */
Huang Ying9de4f222020-04-06 20:04:41 -0700954 if (reclaimed && page_is_file_lru(page) &&
Ross Zwislerf9fe48b2016-01-22 15:10:40 -0800955 !mapping_exiting(mapping) && !dax_mapping(mapping))
Johannes Weinerb9107182019-11-30 17:55:59 -0800956 shadow = workingset_eviction(page, target_memcg);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700957 __delete_from_page_cache(page, shadow);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700958 xa_unlock_irqrestore(&mapping->i_pages, flags);
Linus Torvalds6072d132010-12-01 13:35:19 -0500959
960 if (freepage != NULL)
961 freepage(page);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800962 }
963
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800964 return 1;
965
966cannot_free:
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700967 xa_unlock_irqrestore(&mapping->i_pages, flags);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800968 return 0;
969}
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/*
Nick Piggine2867812008-07-25 19:45:30 -0700972 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
973 * someone else has a ref on the page, abort and return 0. If it was
974 * successfully detached, return 1. Assumes the caller has a single ref on
975 * this page.
976 */
977int remove_mapping(struct address_space *mapping, struct page *page)
978{
Johannes Weinerb9107182019-11-30 17:55:59 -0800979 if (__remove_mapping(mapping, page, false, NULL)) {
Nick Piggine2867812008-07-25 19:45:30 -0700980 /*
981 * Unfreezing the refcount with 1 rather than 2 effectively
982 * drops the pagecache ref for us without requiring another
983 * atomic operation.
984 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700985 page_ref_unfreeze(page, 1);
Nick Piggine2867812008-07-25 19:45:30 -0700986 return 1;
987 }
988 return 0;
989}
990
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700991/**
992 * putback_lru_page - put previously isolated page onto appropriate LRU list
993 * @page: page to be put back to appropriate lru list
994 *
995 * Add previously isolated @page to appropriate LRU list.
996 * Page may still be unevictable for other reasons.
997 *
998 * lru_lock must not be held, interrupts must be enabled.
999 */
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001000void putback_lru_page(struct page *page)
1001{
Shakeel Butt9c4e6b12018-02-21 14:45:28 -08001002 lru_cache_add(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001003 put_page(page); /* drop ref from isolate */
1004}
1005
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001006enum page_references {
1007 PAGEREF_RECLAIM,
1008 PAGEREF_RECLAIM_CLEAN,
Johannes Weiner645747462010-03-05 13:42:22 -08001009 PAGEREF_KEEP,
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001010 PAGEREF_ACTIVATE,
1011};
1012
1013static enum page_references page_check_references(struct page *page,
1014 struct scan_control *sc)
1015{
Johannes Weiner645747462010-03-05 13:42:22 -08001016 int referenced_ptes, referenced_page;
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001017 unsigned long vm_flags;
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001018
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001019 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1020 &vm_flags);
Johannes Weiner645747462010-03-05 13:42:22 -08001021 referenced_page = TestClearPageReferenced(page);
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001022
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001023 /*
1024 * Mlock lost the isolation race with us. Let try_to_unmap()
1025 * move the page to the unevictable list.
1026 */
1027 if (vm_flags & VM_LOCKED)
1028 return PAGEREF_RECLAIM;
1029
Johannes Weiner645747462010-03-05 13:42:22 -08001030 if (referenced_ptes) {
Johannes Weiner645747462010-03-05 13:42:22 -08001031 /*
1032 * All mapped pages start out with page table
1033 * references from the instantiating fault, so we need
1034 * to look twice if a mapped file page is used more
1035 * than once.
1036 *
1037 * Mark it and spare it for another trip around the
1038 * inactive list. Another page table reference will
1039 * lead to its activation.
1040 *
1041 * Note: the mark is set for activated pages as well
1042 * so that recently deactivated but used pages are
1043 * quickly recovered.
1044 */
1045 SetPageReferenced(page);
1046
Konstantin Khlebnikov34dbc672012-01-10 15:06:59 -08001047 if (referenced_page || referenced_ptes > 1)
Johannes Weiner645747462010-03-05 13:42:22 -08001048 return PAGEREF_ACTIVATE;
1049
Konstantin Khlebnikovc909e992012-01-10 15:07:03 -08001050 /*
1051 * Activate file-backed executable pages after first usage.
1052 */
Joonsoo Kimb5181542020-08-11 18:30:40 -07001053 if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
Konstantin Khlebnikovc909e992012-01-10 15:07:03 -08001054 return PAGEREF_ACTIVATE;
1055
Johannes Weiner645747462010-03-05 13:42:22 -08001056 return PAGEREF_KEEP;
1057 }
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001058
1059 /* Reclaim if clean, defer dirty pages to writeback */
KOSAKI Motohiro2e302442010-10-26 14:21:46 -07001060 if (referenced_page && !PageSwapBacked(page))
Johannes Weiner645747462010-03-05 13:42:22 -08001061 return PAGEREF_RECLAIM_CLEAN;
1062
1063 return PAGEREF_RECLAIM;
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001064}
1065
Mel Gormane2be15f2013-07-03 15:01:57 -07001066/* Check if a page is dirty or under writeback */
1067static void page_check_dirty_writeback(struct page *page,
1068 bool *dirty, bool *writeback)
1069{
Mel Gormanb4597222013-07-03 15:02:05 -07001070 struct address_space *mapping;
1071
Mel Gormane2be15f2013-07-03 15:01:57 -07001072 /*
1073 * Anonymous pages are not handled by flushers and must be written
1074 * from reclaim context. Do not stall reclaim based on them
1075 */
Huang Ying9de4f222020-04-06 20:04:41 -07001076 if (!page_is_file_lru(page) ||
Shaohua Li802a3a92017-05-03 14:52:32 -07001077 (PageAnon(page) && !PageSwapBacked(page))) {
Mel Gormane2be15f2013-07-03 15:01:57 -07001078 *dirty = false;
1079 *writeback = false;
1080 return;
1081 }
1082
1083 /* By default assume that the page flags are accurate */
1084 *dirty = PageDirty(page);
1085 *writeback = PageWriteback(page);
Mel Gormanb4597222013-07-03 15:02:05 -07001086
1087 /* Verify dirty/writeback state if the filesystem supports it */
1088 if (!page_has_private(page))
1089 return;
1090
1091 mapping = page_mapping(page);
1092 if (mapping && mapping->a_ops->is_dirty_writeback)
1093 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
Mel Gormane2be15f2013-07-03 15:01:57 -07001094}
1095
Nick Piggine2867812008-07-25 19:45:30 -07001096/*
Andrew Morton1742f192006-03-22 00:08:21 -08001097 * shrink_page_list() returns the number of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 */
Maninder Singh730ec8c2020-06-03 16:01:18 -07001099static unsigned int shrink_page_list(struct list_head *page_list,
1100 struct pglist_data *pgdat,
1101 struct scan_control *sc,
Maninder Singh730ec8c2020-06-03 16:01:18 -07001102 struct reclaim_stat *stat,
1103 bool ignore_references)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
1105 LIST_HEAD(ret_pages);
Mel Gormanabe4c3b2010-08-09 17:19:31 -07001106 LIST_HEAD(free_pages);
Maninder Singh730ec8c2020-06-03 16:01:18 -07001107 unsigned int nr_reclaimed = 0;
1108 unsigned int pgactivate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Kirill Tkhai060f0052019-03-05 15:48:15 -08001110 memset(stat, 0, sizeof(*stat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 cond_resched();
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 while (!list_empty(page_list)) {
1114 struct address_space *mapping;
1115 struct page *page;
Minchan Kim8940b342019-09-25 16:49:11 -07001116 enum page_references references = PAGEREF_RECLAIM;
Kirill Tkhai4b793062020-04-01 21:10:18 -07001117 bool dirty, writeback, may_enter_fs;
Yang Shi98879b32019-07-11 20:59:30 -07001118 unsigned int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
1120 cond_resched();
1121
1122 page = lru_to_page(page_list);
1123 list_del(&page->lru);
1124
Nick Piggin529ae9a2008-08-02 12:01:03 +02001125 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 goto keep;
1127
Sasha Levin309381fea2014-01-23 15:52:54 -08001128 VM_BUG_ON_PAGE(PageActive(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -07001130 nr_pages = compound_nr(page);
Yang Shi98879b32019-07-11 20:59:30 -07001131
1132 /* Account the number of base pages even though THP */
1133 sc->nr_scanned += nr_pages;
Christoph Lameter80e43422006-02-11 17:55:53 -08001134
Hugh Dickins39b5f292012-10-08 16:33:18 -07001135 if (unlikely(!page_evictable(page)))
Minchan Kimad6b6702017-05-03 14:54:13 -07001136 goto activate_locked;
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001137
Johannes Weinera6dc60f82009-03-31 15:19:30 -07001138 if (!sc->may_unmap && page_mapped(page))
Christoph Lameter80e43422006-02-11 17:55:53 -08001139 goto keep_locked;
1140
Andy Whitcroftc661b072007-08-22 14:01:26 -07001141 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1142 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1143
Mel Gorman283aba92013-07-03 15:01:51 -07001144 /*
Andrey Ryabinin894befe2018-04-10 16:27:51 -07001145 * The number of dirty pages determines if a node is marked
Mel Gormane2be15f2013-07-03 15:01:57 -07001146 * reclaim_congested which affects wait_iff_congested. kswapd
1147 * will stall and start writing pages if the tail of the LRU
1148 * is all dirty unqueued pages.
1149 */
1150 page_check_dirty_writeback(page, &dirty, &writeback);
1151 if (dirty || writeback)
Kirill Tkhai060f0052019-03-05 15:48:15 -08001152 stat->nr_dirty++;
Mel Gormane2be15f2013-07-03 15:01:57 -07001153
1154 if (dirty && !writeback)
Kirill Tkhai060f0052019-03-05 15:48:15 -08001155 stat->nr_unqueued_dirty++;
Mel Gormane2be15f2013-07-03 15:01:57 -07001156
Mel Gormand04e8ac2013-07-03 15:02:03 -07001157 /*
1158 * Treat this page as congested if the underlying BDI is or if
1159 * pages are cycling through the LRU so quickly that the
1160 * pages marked for immediate reclaim are making it to the
1161 * end of the LRU a second time.
1162 */
Mel Gormane2be15f2013-07-03 15:01:57 -07001163 mapping = page_mapping(page);
Jamie Liu1da58ee2014-12-10 15:43:20 -08001164 if (((dirty || writeback) && mapping &&
Tejun Heo703c2702015-05-22 17:13:44 -04001165 inode_write_congested(mapping->host)) ||
Mel Gormand04e8ac2013-07-03 15:02:03 -07001166 (writeback && PageReclaim(page)))
Kirill Tkhai060f0052019-03-05 15:48:15 -08001167 stat->nr_congested++;
Mel Gormane2be15f2013-07-03 15:01:57 -07001168
1169 /*
Mel Gorman283aba92013-07-03 15:01:51 -07001170 * If a page at the tail of the LRU is under writeback, there
1171 * are three cases to consider.
1172 *
1173 * 1) If reclaim is encountering an excessive number of pages
1174 * under writeback and this page is both under writeback and
1175 * PageReclaim then it indicates that pages are being queued
1176 * for IO but are being recycled through the LRU before the
1177 * IO can complete. Waiting on the page itself risks an
1178 * indefinite stall if it is impossible to writeback the
1179 * page due to IO error or disconnected storage so instead
Mel Gormanb1a6f212013-07-03 15:01:58 -07001180 * note that the LRU is being scanned too quickly and the
1181 * caller can stall after page list has been processed.
Mel Gorman283aba92013-07-03 15:01:51 -07001182 *
Tejun Heo97c93412015-05-22 18:23:36 -04001183 * 2) Global or new memcg reclaim encounters a page that is
Michal Hockoecf5fc62015-08-04 14:36:58 -07001184 * not marked for immediate reclaim, or the caller does not
1185 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1186 * not to fs). In this case mark the page for immediate
Tejun Heo97c93412015-05-22 18:23:36 -04001187 * reclaim and continue scanning.
Mel Gorman283aba92013-07-03 15:01:51 -07001188 *
Michal Hockoecf5fc62015-08-04 14:36:58 -07001189 * Require may_enter_fs because we would wait on fs, which
1190 * may not have submitted IO yet. And the loop driver might
Mel Gorman283aba92013-07-03 15:01:51 -07001191 * enter reclaim, and deadlock if it waits on a page for
1192 * which it is needed to do the write (loop masks off
1193 * __GFP_IO|__GFP_FS for this reason); but more thought
1194 * would probably show more reasons.
1195 *
Hugh Dickins7fadc822015-09-08 15:03:46 -07001196 * 3) Legacy memcg encounters a page that is already marked
Mel Gorman283aba92013-07-03 15:01:51 -07001197 * PageReclaim. memcg does not have any dirty pages
1198 * throttling so we could easily OOM just because too many
1199 * pages are in writeback and there is nothing else to
1200 * reclaim. Wait for the writeback to complete.
Johannes Weinerc55e8d02017-02-24 14:56:23 -08001201 *
1202 * In cases 1) and 2) we activate the pages to get them out of
1203 * the way while we continue scanning for clean pages on the
1204 * inactive list and refilling from the active list. The
1205 * observation here is that waiting for disk writes is more
1206 * expensive than potentially causing reloads down the line.
1207 * Since they're marked for immediate reclaim, they won't put
1208 * memory pressure on the cache working set any longer than it
1209 * takes to write them to disk.
Mel Gorman283aba92013-07-03 15:01:51 -07001210 */
Andy Whitcroftc661b072007-08-22 14:01:26 -07001211 if (PageWriteback(page)) {
Mel Gorman283aba92013-07-03 15:01:51 -07001212 /* Case 1 above */
1213 if (current_is_kswapd() &&
1214 PageReclaim(page) &&
Mel Gorman599d0c92016-07-28 15:45:31 -07001215 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
Kirill Tkhai060f0052019-03-05 15:48:15 -08001216 stat->nr_immediate++;
Johannes Weinerc55e8d02017-02-24 14:56:23 -08001217 goto activate_locked;
Mel Gorman283aba92013-07-03 15:01:51 -07001218
1219 /* Case 2 above */
Johannes Weinerb5ead352019-11-30 17:55:40 -08001220 } else if (writeback_throttling_sane(sc) ||
Michal Hockoecf5fc62015-08-04 14:36:58 -07001221 !PageReclaim(page) || !may_enter_fs) {
Hugh Dickinsc3b94f42012-07-31 16:45:59 -07001222 /*
1223 * This is slightly racy - end_page_writeback()
1224 * might have just cleared PageReclaim, then
1225 * setting PageReclaim here end up interpreted
1226 * as PageReadahead - but that does not matter
1227 * enough to care. What we do want is for this
1228 * page to have PageReclaim set next time memcg
1229 * reclaim reaches the tests above, so it will
1230 * then wait_on_page_writeback() to avoid OOM;
1231 * and it's also appropriate in global reclaim.
1232 */
1233 SetPageReclaim(page);
Kirill Tkhai060f0052019-03-05 15:48:15 -08001234 stat->nr_writeback++;
Johannes Weinerc55e8d02017-02-24 14:56:23 -08001235 goto activate_locked;
Mel Gorman283aba92013-07-03 15:01:51 -07001236
1237 /* Case 3 above */
1238 } else {
Hugh Dickins7fadc822015-09-08 15:03:46 -07001239 unlock_page(page);
Mel Gorman283aba92013-07-03 15:01:51 -07001240 wait_on_page_writeback(page);
Hugh Dickins7fadc822015-09-08 15:03:46 -07001241 /* then go back and try same page again */
1242 list_add_tail(&page->lru, page_list);
1243 continue;
Michal Hockoe62e3842012-07-31 16:45:55 -07001244 }
Andy Whitcroftc661b072007-08-22 14:01:26 -07001245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
Minchan Kim8940b342019-09-25 16:49:11 -07001247 if (!ignore_references)
Minchan Kim02c6de82012-10-08 16:31:55 -07001248 references = page_check_references(page, sc);
1249
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001250 switch (references) {
1251 case PAGEREF_ACTIVATE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 goto activate_locked;
Johannes Weiner645747462010-03-05 13:42:22 -08001253 case PAGEREF_KEEP:
Yang Shi98879b32019-07-11 20:59:30 -07001254 stat->nr_ref_keep += nr_pages;
Johannes Weiner645747462010-03-05 13:42:22 -08001255 goto keep_locked;
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001256 case PAGEREF_RECLAIM:
1257 case PAGEREF_RECLAIM_CLEAN:
1258 ; /* try to reclaim the page below */
1259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 /*
1262 * Anonymous process memory has backing store?
1263 * Try to allocate it some swap space here.
Shaohua Li802a3a92017-05-03 14:52:32 -07001264 * Lazyfree page could be freed directly
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 */
Huang Yingbd4c82c22017-09-06 16:22:49 -07001266 if (PageAnon(page) && PageSwapBacked(page)) {
1267 if (!PageSwapCache(page)) {
1268 if (!(sc->gfp_mask & __GFP_IO))
1269 goto keep_locked;
Linus Torvalds72c5ce82021-01-16 15:34:57 -08001270 if (page_maybe_dma_pinned(page))
1271 goto keep_locked;
Huang Yingbd4c82c22017-09-06 16:22:49 -07001272 if (PageTransHuge(page)) {
1273 /* cannot split THP, skip it */
1274 if (!can_split_huge_page(page, NULL))
1275 goto activate_locked;
1276 /*
1277 * Split pages without a PMD map right
1278 * away. Chances are some or all of the
1279 * tail pages can be freed without IO.
1280 */
1281 if (!compound_mapcount(page) &&
1282 split_huge_page_to_list(page,
1283 page_list))
1284 goto activate_locked;
1285 }
1286 if (!add_to_swap(page)) {
1287 if (!PageTransHuge(page))
Yang Shi98879b32019-07-11 20:59:30 -07001288 goto activate_locked_split;
Huang Yingbd4c82c22017-09-06 16:22:49 -07001289 /* Fallback to swap normal pages */
1290 if (split_huge_page_to_list(page,
1291 page_list))
1292 goto activate_locked;
Huang Yingfe490cc2017-09-06 16:22:52 -07001293#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1294 count_vm_event(THP_SWPOUT_FALLBACK);
1295#endif
Huang Yingbd4c82c22017-09-06 16:22:49 -07001296 if (!add_to_swap(page))
Yang Shi98879b32019-07-11 20:59:30 -07001297 goto activate_locked_split;
Huang Yingbd4c82c22017-09-06 16:22:49 -07001298 }
Minchan Kim0f074652017-07-06 15:37:24 -07001299
Kirill Tkhai4b793062020-04-01 21:10:18 -07001300 may_enter_fs = true;
Huang Yingbd4c82c22017-09-06 16:22:49 -07001301
1302 /* Adding to swap updated mapping */
1303 mapping = page_mapping(page);
Minchan Kim0f074652017-07-06 15:37:24 -07001304 }
Kirill A. Shutemov7751b2d2016-07-26 15:25:56 -07001305 } else if (unlikely(PageTransHuge(page))) {
1306 /* Split file THP */
1307 if (split_huge_page_to_list(page, page_list))
1308 goto keep_locked;
Mel Gormane2be15f2013-07-03 15:01:57 -07001309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310
1311 /*
Yang Shi98879b32019-07-11 20:59:30 -07001312 * THP may get split above, need minus tail pages and update
1313 * nr_pages to avoid accounting tail pages twice.
1314 *
1315 * The tail pages that are added into swap cache successfully
1316 * reach here.
1317 */
1318 if ((nr_pages > 1) && !PageTransHuge(page)) {
1319 sc->nr_scanned -= (nr_pages - 1);
1320 nr_pages = 1;
1321 }
1322
1323 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * The page is mapped into the page tables of one or more
1325 * processes. Try to unmap it here.
1326 */
Shaohua Li802a3a92017-05-03 14:52:32 -07001327 if (page_mapped(page)) {
Shakeel Buttdd156e32020-12-14 19:06:39 -08001328 enum ttu_flags flags = TTU_BATCH_FLUSH;
Jaewon Kim1f318a92020-06-03 16:01:15 -07001329 bool was_swapbacked = PageSwapBacked(page);
Huang Yingbd4c82c22017-09-06 16:22:49 -07001330
1331 if (unlikely(PageTransHuge(page)))
1332 flags |= TTU_SPLIT_HUGE_PMD;
Jaewon Kim1f318a92020-06-03 16:01:15 -07001333
Huang Yingbd4c82c22017-09-06 16:22:49 -07001334 if (!try_to_unmap(page, flags)) {
Yang Shi98879b32019-07-11 20:59:30 -07001335 stat->nr_unmap_fail += nr_pages;
Jaewon Kim1f318a92020-06-03 16:01:15 -07001336 if (!was_swapbacked && PageSwapBacked(page))
1337 stat->nr_lazyfree_fail += nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 goto activate_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 }
1340 }
1341
1342 if (PageDirty(page)) {
Mel Gormanee728862011-10-31 17:07:38 -07001343 /*
Johannes Weiner4eda4822017-02-24 14:56:20 -08001344 * Only kswapd can writeback filesystem pages
1345 * to avoid risk of stack overflow. But avoid
1346 * injecting inefficient single-page IO into
1347 * flusher writeback as much as possible: only
1348 * write pages when we've encountered many
1349 * dirty pages, and when we've already scanned
1350 * the rest of the LRU for clean pages and see
1351 * the same dirty pages again (PageReclaim).
Mel Gormanee728862011-10-31 17:07:38 -07001352 */
Huang Ying9de4f222020-04-06 20:04:41 -07001353 if (page_is_file_lru(page) &&
Johannes Weiner4eda4822017-02-24 14:56:20 -08001354 (!current_is_kswapd() || !PageReclaim(page) ||
1355 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
Mel Gorman49ea7eb2011-10-31 17:07:59 -07001356 /*
1357 * Immediately reclaim when written back.
1358 * Similar in principal to deactivate_page()
1359 * except we already have the page isolated
1360 * and know it's dirty
1361 */
Mel Gormanc4a25632016-07-28 15:46:23 -07001362 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
Mel Gorman49ea7eb2011-10-31 17:07:59 -07001363 SetPageReclaim(page);
1364
Johannes Weinerc55e8d02017-02-24 14:56:23 -08001365 goto activate_locked;
Mel Gormanee728862011-10-31 17:07:38 -07001366 }
1367
Johannes Weinerdfc8d632010-03-05 13:42:19 -08001368 if (references == PAGEREF_RECLAIM_CLEAN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 goto keep_locked;
Andrew Morton4dd4b922008-03-24 12:29:52 -07001370 if (!may_enter_fs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 goto keep_locked;
Christoph Lameter52a83632006-02-01 03:05:28 -08001372 if (!sc->may_writepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 goto keep_locked;
1374
Mel Gormand950c942015-09-04 15:47:35 -07001375 /*
1376 * Page is dirty. Flush the TLB if a writable entry
1377 * potentially exists to avoid CPU writes after IO
1378 * starts and then write it out here.
1379 */
1380 try_to_unmap_flush_dirty();
Yang Shicb165562019-11-30 17:55:28 -08001381 switch (pageout(page, mapping)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 case PAGE_KEEP:
1383 goto keep_locked;
1384 case PAGE_ACTIVATE:
1385 goto activate_locked;
1386 case PAGE_SUCCESS:
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07001387 stat->nr_pageout += thp_nr_pages(page);
Johannes Weiner96f8bf42020-06-03 16:03:09 -07001388
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -07001389 if (PageWriteback(page))
Mel Gorman41ac1992012-05-29 15:06:19 -07001390 goto keep;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -07001391 if (PageDirty(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 goto keep;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -07001393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 /*
1395 * A synchronous write - probably a ramdisk. Go
1396 * ahead and try to reclaim the page.
1397 */
Nick Piggin529ae9a2008-08-02 12:01:03 +02001398 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 goto keep;
1400 if (PageDirty(page) || PageWriteback(page))
1401 goto keep_locked;
1402 mapping = page_mapping(page);
1403 case PAGE_CLEAN:
1404 ; /* try to free the page below */
1405 }
1406 }
1407
1408 /*
1409 * If the page has buffers, try to free the buffer mappings
1410 * associated with this page. If we succeed we try to free
1411 * the page as well.
1412 *
1413 * We do this even if the page is PageDirty().
1414 * try_to_release_page() does not perform I/O, but it is
1415 * possible for a page to have PageDirty set, but it is actually
1416 * clean (all its buffers are clean). This happens if the
1417 * buffers were written out directly, with submit_bh(). ext3
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001418 * will do this, as well as the blockdev mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 * try_to_release_page() will discover that cleanness and will
1420 * drop the buffers and mark the page clean - it can be freed.
1421 *
1422 * Rarely, pages can have buffers and no ->mapping. These are
1423 * the pages which were not successfully invalidated in
1424 * truncate_complete_page(). We try to drop those buffers here
1425 * and if that worked, and the page is no longer mapped into
1426 * process address space (page_count == 1) it can be freed.
1427 * Otherwise, leave the page on the LRU so it is swappable.
1428 */
David Howells266cf652009-04-03 16:42:36 +01001429 if (page_has_private(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 if (!try_to_release_page(page, sc->gfp_mask))
1431 goto activate_locked;
Nick Piggine2867812008-07-25 19:45:30 -07001432 if (!mapping && page_count(page) == 1) {
1433 unlock_page(page);
1434 if (put_page_testzero(page))
1435 goto free_it;
1436 else {
1437 /*
1438 * rare race with speculative reference.
1439 * the speculative reference will free
1440 * this page shortly, so we may
1441 * increment nr_reclaimed here (and
1442 * leave it off the LRU).
1443 */
1444 nr_reclaimed++;
1445 continue;
1446 }
1447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 }
1449
Shaohua Li802a3a92017-05-03 14:52:32 -07001450 if (PageAnon(page) && !PageSwapBacked(page)) {
1451 /* follow __remove_mapping for reference */
1452 if (!page_ref_freeze(page, 1))
1453 goto keep_locked;
1454 if (PageDirty(page)) {
1455 page_ref_unfreeze(page, 1);
1456 goto keep_locked;
1457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Shaohua Li802a3a92017-05-03 14:52:32 -07001459 count_vm_event(PGLAZYFREED);
Roman Gushchin22621852017-07-06 15:40:25 -07001460 count_memcg_page_event(page, PGLAZYFREED);
Johannes Weinerb9107182019-11-30 17:55:59 -08001461 } else if (!mapping || !__remove_mapping(mapping, page, true,
1462 sc->target_mem_cgroup))
Shaohua Li802a3a92017-05-03 14:52:32 -07001463 goto keep_locked;
Hugh Dickins9a1ea432018-12-28 00:36:14 -08001464
1465 unlock_page(page);
Nick Piggine2867812008-07-25 19:45:30 -07001466free_it:
Yang Shi98879b32019-07-11 20:59:30 -07001467 /*
1468 * THP may get swapped out in a whole, need account
1469 * all base pages.
1470 */
1471 nr_reclaimed += nr_pages;
Mel Gormanabe4c3b2010-08-09 17:19:31 -07001472
1473 /*
1474 * Is there need to periodically free_page_list? It would
1475 * appear not as the counts should be low
1476 */
Yang Shi7ae88532019-09-23 15:38:09 -07001477 if (unlikely(PageTransHuge(page)))
Matthew Wilcox (Oracle)ff45fc32020-06-03 16:01:09 -07001478 destroy_compound_page(page);
Yang Shi7ae88532019-09-23 15:38:09 -07001479 else
Huang Yingbd4c82c22017-09-06 16:22:49 -07001480 list_add(&page->lru, &free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 continue;
1482
Yang Shi98879b32019-07-11 20:59:30 -07001483activate_locked_split:
1484 /*
1485 * The tail pages that are failed to add into swap cache
1486 * reach here. Fixup nr_scanned and nr_pages.
1487 */
1488 if (nr_pages > 1) {
1489 sc->nr_scanned -= (nr_pages - 1);
1490 nr_pages = 1;
1491 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492activate_locked:
Rik van Riel68a223942008-10-18 20:26:23 -07001493 /* Not a candidate for swapping, so reclaim swap space. */
Minchan Kimad6b6702017-05-03 14:54:13 -07001494 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1495 PageMlocked(page)))
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -08001496 try_to_free_swap(page);
Sasha Levin309381fea2014-01-23 15:52:54 -08001497 VM_BUG_ON_PAGE(PageActive(page), page);
Minchan Kimad6b6702017-05-03 14:54:13 -07001498 if (!PageMlocked(page)) {
Huang Ying9de4f222020-04-06 20:04:41 -07001499 int type = page_is_file_lru(page);
Minchan Kimad6b6702017-05-03 14:54:13 -07001500 SetPageActive(page);
Yang Shi98879b32019-07-11 20:59:30 -07001501 stat->nr_activate[type] += nr_pages;
Roman Gushchin22621852017-07-06 15:40:25 -07001502 count_memcg_page_event(page, PGACTIVATE);
Minchan Kimad6b6702017-05-03 14:54:13 -07001503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504keep_locked:
1505 unlock_page(page);
1506keep:
1507 list_add(&page->lru, &ret_pages);
Sasha Levin309381fea2014-01-23 15:52:54 -08001508 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 }
Mel Gormanabe4c3b2010-08-09 17:19:31 -07001510
Yang Shi98879b32019-07-11 20:59:30 -07001511 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1512
Johannes Weiner747db952014-08-08 14:19:24 -07001513 mem_cgroup_uncharge_list(&free_pages);
Mel Gorman72b252a2015-09-04 15:47:32 -07001514 try_to_unmap_flush();
Mel Gorman2d4894b2017-11-15 17:37:59 -08001515 free_unref_page_list(&free_pages);
Mel Gormanabe4c3b2010-08-09 17:19:31 -07001516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 list_splice(&ret_pages, page_list);
Kirill Tkhai886cf192019-05-13 17:16:51 -07001518 count_vm_events(PGACTIVATE, pgactivate);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001519
Andrew Morton05ff5132006-03-22 00:08:20 -08001520 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521}
1522
Maninder Singh730ec8c2020-06-03 16:01:18 -07001523unsigned int reclaim_clean_pages_from_list(struct zone *zone,
Minchan Kim02c6de82012-10-08 16:31:55 -07001524 struct list_head *page_list)
1525{
1526 struct scan_control sc = {
1527 .gfp_mask = GFP_KERNEL,
1528 .priority = DEF_PRIORITY,
1529 .may_unmap = 1,
1530 };
Jaewon Kim1f318a92020-06-03 16:01:15 -07001531 struct reclaim_stat stat;
Maninder Singh730ec8c2020-06-03 16:01:18 -07001532 unsigned int nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07001533 struct page *page, *next;
1534 LIST_HEAD(clean_pages);
1535
1536 list_for_each_entry_safe(page, next, page_list, lru) {
Huang Ying9de4f222020-04-06 20:04:41 -07001537 if (page_is_file_lru(page) && !PageDirty(page) &&
Minchan Kima58f2ce2019-06-13 15:56:15 -07001538 !__PageMovable(page) && !PageUnevictable(page)) {
Minchan Kim02c6de82012-10-08 16:31:55 -07001539 ClearPageActive(page);
1540 list_move(&page->lru, &clean_pages);
1541 }
1542 }
1543
Jaewon Kim1f318a92020-06-03 16:01:15 -07001544 nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
Shakeel Buttdd156e32020-12-14 19:06:39 -08001545 &stat, true);
Minchan Kim02c6de82012-10-08 16:31:55 -07001546 list_splice(&clean_pages, page_list);
Nicholas Piggin2da9f632020-11-13 22:51:46 -08001547 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1548 -(long)nr_reclaimed);
Jaewon Kim1f318a92020-06-03 16:01:15 -07001549 /*
1550 * Since lazyfree pages are isolated from file LRU from the beginning,
1551 * they will rotate back to anonymous LRU in the end if it failed to
1552 * discard so isolated count will be mismatched.
1553 * Compensate the isolated count for both LRU lists.
1554 */
1555 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1556 stat.nr_lazyfree_fail);
1557 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
Nicholas Piggin2da9f632020-11-13 22:51:46 -08001558 -(long)stat.nr_lazyfree_fail);
Jaewon Kim1f318a92020-06-03 16:01:15 -07001559 return nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07001560}
1561
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001562/*
1563 * Attempt to remove the specified page from its LRU. Only take this page
1564 * if it is of the appropriate PageActive status. Pages which are being
1565 * freed elsewhere are also ignored.
1566 *
1567 * page: page to consider
1568 * mode: one of the LRU isolation modes defined above
1569 *
1570 * returns 0 on success, -ve errno on failure.
1571 */
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -07001572int __isolate_lru_page(struct page *page, isolate_mode_t mode)
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001573{
1574 int ret = -EINVAL;
1575
1576 /* Only take pages on the LRU. */
1577 if (!PageLRU(page))
1578 return ret;
1579
Minchan Kime46a2872012-10-08 16:33:48 -07001580 /* Compaction should not handle unevictable pages but CMA can do so */
1581 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001582 return ret;
1583
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001584 ret = -EBUSY;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001585
Mel Gormanc8244932012-01-12 17:19:38 -08001586 /*
1587 * To minimise LRU disruption, the caller can indicate that it only
1588 * wants to isolate pages it will be able to operate on without
1589 * blocking - clean pages for the most part.
1590 *
Mel Gormanc8244932012-01-12 17:19:38 -08001591 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1592 * that it is possible to migrate without blocking
1593 */
Johannes Weiner1276ad62017-02-24 14:56:11 -08001594 if (mode & ISOLATE_ASYNC_MIGRATE) {
Mel Gormanc8244932012-01-12 17:19:38 -08001595 /* All the caller can do on PageWriteback is block */
1596 if (PageWriteback(page))
1597 return ret;
1598
1599 if (PageDirty(page)) {
1600 struct address_space *mapping;
Mel Gorman69d763f2018-01-31 16:19:52 -08001601 bool migrate_dirty;
Mel Gormanc8244932012-01-12 17:19:38 -08001602
Mel Gormanc8244932012-01-12 17:19:38 -08001603 /*
1604 * Only pages without mappings or that have a
1605 * ->migratepage callback are possible to migrate
Mel Gorman69d763f2018-01-31 16:19:52 -08001606 * without blocking. However, we can be racing with
1607 * truncation so it's necessary to lock the page
1608 * to stabilise the mapping as truncation holds
1609 * the page lock until after the page is removed
1610 * from the page cache.
Mel Gormanc8244932012-01-12 17:19:38 -08001611 */
Mel Gorman69d763f2018-01-31 16:19:52 -08001612 if (!trylock_page(page))
1613 return ret;
1614
Mel Gormanc8244932012-01-12 17:19:38 -08001615 mapping = page_mapping(page);
Hugh Dickins145e1a72018-06-01 16:50:50 -07001616 migrate_dirty = !mapping || mapping->a_ops->migratepage;
Mel Gorman69d763f2018-01-31 16:19:52 -08001617 unlock_page(page);
1618 if (!migrate_dirty)
Mel Gormanc8244932012-01-12 17:19:38 -08001619 return ret;
1620 }
1621 }
Minchan Kim39deaf82011-10-31 17:06:51 -07001622
Minchan Kimf80c0672011-10-31 17:06:55 -07001623 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1624 return ret;
1625
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001626 if (likely(get_page_unless_zero(page))) {
1627 /*
1628 * Be careful not to clear PageLRU until after we're
1629 * sure the page is not being freed elsewhere -- the
1630 * page release code relies on it.
1631 */
1632 ClearPageLRU(page);
1633 ret = 0;
1634 }
1635
1636 return ret;
1637}
1638
Mel Gorman7ee36a12016-07-28 15:47:17 -07001639
1640/*
1641 * Update LRU sizes after isolating pages. The LRU size updates must
Ethon Paul55b65a52020-06-04 16:49:10 -07001642 * be complete before mem_cgroup_update_lru_size due to a sanity check.
Mel Gorman7ee36a12016-07-28 15:47:17 -07001643 */
1644static __always_inline void update_lru_sizes(struct lruvec *lruvec,
Michal Hockob4536f0c82017-01-10 16:58:04 -08001645 enum lru_list lru, unsigned long *nr_zone_taken)
Mel Gorman7ee36a12016-07-28 15:47:17 -07001646{
Mel Gorman7ee36a12016-07-28 15:47:17 -07001647 int zid;
1648
Mel Gorman7ee36a12016-07-28 15:47:17 -07001649 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1650 if (!nr_zone_taken[zid])
1651 continue;
1652
Wei Yanga892cb62020-06-03 16:01:12 -07001653 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
Mel Gorman7ee36a12016-07-28 15:47:17 -07001654 }
Mel Gorman7ee36a12016-07-28 15:47:17 -07001655
Mel Gorman7ee36a12016-07-28 15:47:17 -07001656}
1657
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08001658/**
1659 * pgdat->lru_lock is heavily contended. Some of the functions that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 * shrink the lists perform better by taking out a batch of pages
1661 * and working on them outside the LRU lock.
1662 *
1663 * For pagecache intensive workloads, this function is the hottest
1664 * spot in the kernel (apart from copy_*_user functions).
1665 *
1666 * Appropriate locks must be held before calling this function.
1667 *
Minchan Kim791b48b2017-05-12 15:47:06 -07001668 * @nr_to_scan: The number of eligible pages to look through on the list.
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07001669 * @lruvec: The LRU vector to pull pages from.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 * @dst: The temp list to put pages on to.
Hugh Dickinsf6260122012-01-12 17:20:06 -08001671 * @nr_scanned: The number of pages that were scanned.
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001672 * @sc: The scan_control struct for this reclaim session
Konstantin Khlebnikov3cb99452012-05-29 15:06:53 -07001673 * @lru: LRU list id for isolating
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 *
1675 * returns how many pages were moved onto *@dst.
1676 */
Andrew Morton69e05942006-03-22 00:08:19 -08001677static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07001678 struct lruvec *lruvec, struct list_head *dst,
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001679 unsigned long *nr_scanned, struct scan_control *sc,
Kirill Tkhaia9e7c392019-03-05 15:46:55 -08001680 enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681{
Hugh Dickins75b00af2012-05-29 15:07:09 -07001682 struct list_head *src = &lruvec->lists[lru];
Andrew Morton69e05942006-03-22 00:08:19 -08001683 unsigned long nr_taken = 0;
Mel Gorman599d0c92016-07-28 15:45:31 -07001684 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
Mel Gorman7cc30fc2016-07-28 15:46:59 -07001685 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
Johannes Weiner3db65812017-05-03 14:52:13 -07001686 unsigned long skipped = 0;
Minchan Kim791b48b2017-05-12 15:47:06 -07001687 unsigned long scan, total_scan, nr_pages;
Mel Gormanb2e18752016-07-28 15:45:37 -07001688 LIST_HEAD(pages_skipped);
Kirill Tkhaia9e7c392019-03-05 15:46:55 -08001689 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Yang Shi98879b32019-07-11 20:59:30 -07001691 total_scan = 0;
Minchan Kim791b48b2017-05-12 15:47:06 -07001692 scan = 0;
Yang Shi98879b32019-07-11 20:59:30 -07001693 while (scan < nr_to_scan && !list_empty(src)) {
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001694 struct page *page;
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 page = lru_to_page(src);
1697 prefetchw_prev_lru_page(page, src, flags);
1698
Sasha Levin309381fea2014-01-23 15:52:54 -08001699 VM_BUG_ON_PAGE(!PageLRU(page), page);
Nick Piggin8d438f92006-03-22 00:07:59 -08001700
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -07001701 nr_pages = compound_nr(page);
Yang Shi98879b32019-07-11 20:59:30 -07001702 total_scan += nr_pages;
1703
Mel Gormanb2e18752016-07-28 15:45:37 -07001704 if (page_zonenum(page) > sc->reclaim_idx) {
1705 list_move(&page->lru, &pages_skipped);
Yang Shi98879b32019-07-11 20:59:30 -07001706 nr_skipped[page_zonenum(page)] += nr_pages;
Mel Gormanb2e18752016-07-28 15:45:37 -07001707 continue;
1708 }
1709
Minchan Kim791b48b2017-05-12 15:47:06 -07001710 /*
1711 * Do not count skipped pages because that makes the function
1712 * return with no isolated pages if the LRU mostly contains
1713 * ineligible pages. This causes the VM to not reclaim any
1714 * pages, triggering a premature OOM.
Yang Shi98879b32019-07-11 20:59:30 -07001715 *
1716 * Account all tail pages of THP. This would not cause
1717 * premature OOM since __isolate_lru_page() returns -EBUSY
1718 * only when the page is being freed somewhere else.
Minchan Kim791b48b2017-05-12 15:47:06 -07001719 */
Yang Shi98879b32019-07-11 20:59:30 -07001720 scan += nr_pages;
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -07001721 switch (__isolate_lru_page(page, mode)) {
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001722 case 0:
Mel Gorman599d0c92016-07-28 15:45:31 -07001723 nr_taken += nr_pages;
1724 nr_zone_taken[page_zonenum(page)] += nr_pages;
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001725 list_move(&page->lru, dst);
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001726 break;
Nick Piggin46453a62006-03-22 00:07:58 -08001727
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001728 case -EBUSY:
1729 /* else it is being freed elsewhere */
1730 list_move(&page->lru, src);
1731 continue;
1732
1733 default:
1734 BUG();
1735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 }
1737
Mel Gormanb2e18752016-07-28 15:45:37 -07001738 /*
1739 * Splice any skipped pages to the start of the LRU list. Note that
1740 * this disrupts the LRU order when reclaiming for lower zones but
1741 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1742 * scanning would soon rescan the same pages to skip and put the
1743 * system at risk of premature OOM.
1744 */
Mel Gorman7cc30fc2016-07-28 15:46:59 -07001745 if (!list_empty(&pages_skipped)) {
1746 int zid;
1747
Johannes Weiner3db65812017-05-03 14:52:13 -07001748 list_splice(&pages_skipped, src);
Mel Gorman7cc30fc2016-07-28 15:46:59 -07001749 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1750 if (!nr_skipped[zid])
1751 continue;
1752
1753 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
Michal Hocko1265e3a2017-02-22 15:44:21 -08001754 skipped += nr_skipped[zid];
Mel Gorman7cc30fc2016-07-28 15:46:59 -07001755 }
1756 }
Minchan Kim791b48b2017-05-12 15:47:06 -07001757 *nr_scanned = total_scan;
Michal Hocko1265e3a2017-02-22 15:44:21 -08001758 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
Minchan Kim791b48b2017-05-12 15:47:06 -07001759 total_scan, skipped, nr_taken, mode, lru);
Michal Hockob4536f0c82017-01-10 16:58:04 -08001760 update_lru_sizes(lruvec, lru, nr_zone_taken);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 return nr_taken;
1762}
1763
Nick Piggin62695a82008-10-18 20:26:09 -07001764/**
1765 * isolate_lru_page - tries to isolate a page from its LRU list
1766 * @page: page to isolate from its LRU list
1767 *
1768 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1769 * vmstat statistic corresponding to whatever LRU list the page was on.
1770 *
1771 * Returns 0 if the page was removed from an LRU list.
1772 * Returns -EBUSY if the page was not on an LRU list.
1773 *
1774 * The returned page will have PageLRU() cleared. If it was found on
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001775 * the active list, it will have PageActive set. If it was found on
1776 * the unevictable list, it will have the PageUnevictable bit set. That flag
1777 * may need to be cleared by the caller before letting the page go.
Nick Piggin62695a82008-10-18 20:26:09 -07001778 *
1779 * The vmstat statistic corresponding to the list on which the page was
1780 * found will be decremented.
1781 *
1782 * Restrictions:
Mike Rapoporta5d09be2018-02-06 15:42:19 -08001783 *
Nick Piggin62695a82008-10-18 20:26:09 -07001784 * (1) Must be called with an elevated refcount on the page. This is a
Hui Su01c47762020-10-13 16:56:49 -07001785 * fundamental difference from isolate_lru_pages (which is called
Nick Piggin62695a82008-10-18 20:26:09 -07001786 * without a stable reference).
1787 * (2) the lru_lock must not be held.
1788 * (3) interrupts must be enabled.
1789 */
1790int isolate_lru_page(struct page *page)
1791{
1792 int ret = -EBUSY;
1793
Sasha Levin309381fea2014-01-23 15:52:54 -08001794 VM_BUG_ON_PAGE(!page_count(page), page);
Kirill A. Shutemovcf2a82e2016-02-05 15:36:36 -08001795 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001796
Nick Piggin62695a82008-10-18 20:26:09 -07001797 if (PageLRU(page)) {
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08001798 pg_data_t *pgdat = page_pgdat(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001799 struct lruvec *lruvec;
Nick Piggin62695a82008-10-18 20:26:09 -07001800
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08001801 spin_lock_irq(&pgdat->lru_lock);
1802 lruvec = mem_cgroup_page_lruvec(page, pgdat);
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001803 if (PageLRU(page)) {
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001804 int lru = page_lru(page);
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001805 get_page(page);
Nick Piggin62695a82008-10-18 20:26:09 -07001806 ClearPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001807 del_page_from_lru_list(page, lruvec, lru);
1808 ret = 0;
Nick Piggin62695a82008-10-18 20:26:09 -07001809 }
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08001810 spin_unlock_irq(&pgdat->lru_lock);
Nick Piggin62695a82008-10-18 20:26:09 -07001811 }
1812 return ret;
1813}
1814
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001815/*
Fengguang Wud37dd5d2012-12-18 14:23:28 -08001816 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
Xianting Tian178821b2019-11-30 17:56:05 -08001817 * then get rescheduled. When there are massive number of tasks doing page
Fengguang Wud37dd5d2012-12-18 14:23:28 -08001818 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1819 * the LRU list will go small and be scanned faster than necessary, leading to
1820 * unnecessary swapping, thrashing and OOM.
Rik van Riel35cd7812009-09-21 17:01:38 -07001821 */
Mel Gorman599d0c92016-07-28 15:45:31 -07001822static int too_many_isolated(struct pglist_data *pgdat, int file,
Rik van Riel35cd7812009-09-21 17:01:38 -07001823 struct scan_control *sc)
1824{
1825 unsigned long inactive, isolated;
1826
1827 if (current_is_kswapd())
1828 return 0;
1829
Johannes Weinerb5ead352019-11-30 17:55:40 -08001830 if (!writeback_throttling_sane(sc))
Rik van Riel35cd7812009-09-21 17:01:38 -07001831 return 0;
1832
1833 if (file) {
Mel Gorman599d0c92016-07-28 15:45:31 -07001834 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1835 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
Rik van Riel35cd7812009-09-21 17:01:38 -07001836 } else {
Mel Gorman599d0c92016-07-28 15:45:31 -07001837 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1838 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
Rik van Riel35cd7812009-09-21 17:01:38 -07001839 }
1840
Fengguang Wu3cf23842012-12-18 14:23:31 -08001841 /*
1842 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1843 * won't get blocked by normal direct-reclaimers, forming a circular
1844 * deadlock.
1845 */
Mel Gormand0164ad2015-11-06 16:28:21 -08001846 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Fengguang Wu3cf23842012-12-18 14:23:31 -08001847 inactive >>= 3;
1848
Rik van Riel35cd7812009-09-21 17:01:38 -07001849 return isolated > inactive;
1850}
1851
Kirill Tkhaia222f342019-05-13 17:17:00 -07001852/*
1853 * This moves pages from @list to corresponding LRU list.
1854 *
1855 * We move them the other way if the page is referenced by one or more
1856 * processes, from rmap.
1857 *
1858 * If the pages are mostly unmapped, the processing is fast and it is
1859 * appropriate to hold zone_lru_lock across the whole operation. But if
1860 * the pages are mapped, the processing is slow (page_referenced()) so we
1861 * should drop zone_lru_lock around each page. It's impossible to balance
1862 * this, so instead we remove the pages from the LRU while processing them.
1863 * It is safe to rely on PG_active against the non-LRU pages in here because
1864 * nobody will play with that bit on a non-LRU page.
1865 *
1866 * The downside is that we have to touch page->_refcount against each page.
1867 * But we had to alter page->flags anyway.
1868 *
1869 * Returns the number of pages moved to the given lruvec.
1870 */
1871
1872static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1873 struct list_head *list)
Mel Gorman66635622010-08-09 17:19:30 -07001874{
Mel Gorman599d0c92016-07-28 15:45:31 -07001875 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
Kirill Tkhaia222f342019-05-13 17:17:00 -07001876 int nr_pages, nr_moved = 0;
Hugh Dickins3f797682012-01-12 17:20:07 -08001877 LIST_HEAD(pages_to_free);
Kirill Tkhaia222f342019-05-13 17:17:00 -07001878 struct page *page;
1879 enum lru_list lru;
Mel Gorman66635622010-08-09 17:19:30 -07001880
Kirill Tkhaia222f342019-05-13 17:17:00 -07001881 while (!list_empty(list)) {
1882 page = lru_to_page(list);
Sasha Levin309381fea2014-01-23 15:52:54 -08001883 VM_BUG_ON_PAGE(PageLRU(page), page);
Hugh Dickins39b5f292012-10-08 16:33:18 -07001884 if (unlikely(!page_evictable(page))) {
Kirill Tkhaia222f342019-05-13 17:17:00 -07001885 list_del(&page->lru);
Mel Gorman599d0c92016-07-28 15:45:31 -07001886 spin_unlock_irq(&pgdat->lru_lock);
Mel Gorman66635622010-08-09 17:19:30 -07001887 putback_lru_page(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07001888 spin_lock_irq(&pgdat->lru_lock);
Mel Gorman66635622010-08-09 17:19:30 -07001889 continue;
1890 }
Mel Gorman599d0c92016-07-28 15:45:31 -07001891 lruvec = mem_cgroup_page_lruvec(page, pgdat);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001892
Linus Torvalds7a608572011-01-17 14:42:19 -08001893 SetPageLRU(page);
Mel Gorman66635622010-08-09 17:19:30 -07001894 lru = page_lru(page);
Kirill Tkhaia222f342019-05-13 17:17:00 -07001895
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07001896 nr_pages = thp_nr_pages(page);
Kirill Tkhaia222f342019-05-13 17:17:00 -07001897 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1898 list_move(&page->lru, &lruvec->lists[lru]);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001899
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001900 if (put_page_testzero(page)) {
1901 __ClearPageLRU(page);
1902 __ClearPageActive(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001903 del_page_from_lru_list(page, lruvec, lru);
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001904
1905 if (unlikely(PageCompound(page))) {
Mel Gorman599d0c92016-07-28 15:45:31 -07001906 spin_unlock_irq(&pgdat->lru_lock);
Matthew Wilcox (Oracle)ff45fc32020-06-03 16:01:09 -07001907 destroy_compound_page(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07001908 spin_lock_irq(&pgdat->lru_lock);
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001909 } else
1910 list_add(&page->lru, &pages_to_free);
Kirill Tkhaia222f342019-05-13 17:17:00 -07001911 } else {
1912 nr_moved += nr_pages;
Johannes Weiner31d8fca2020-06-25 20:30:31 -07001913 if (PageActive(page))
1914 workingset_age_nonresident(lruvec, nr_pages);
Mel Gorman66635622010-08-09 17:19:30 -07001915 }
1916 }
Mel Gorman66635622010-08-09 17:19:30 -07001917
Hugh Dickins3f797682012-01-12 17:20:07 -08001918 /*
1919 * To save our caller's stack, now use input list for pages to free.
1920 */
Kirill Tkhaia222f342019-05-13 17:17:00 -07001921 list_splice(&pages_to_free, list);
1922
1923 return nr_moved;
Mel Gorman66635622010-08-09 17:19:30 -07001924}
1925
1926/*
NeilBrown399ba0b2014-06-04 16:07:42 -07001927 * If a kernel thread (such as nfsd for loop-back mounts) services
NeilBrowna37b0712020-06-01 21:48:18 -07001928 * a backing device by writing to the page cache it sets PF_LOCAL_THROTTLE.
NeilBrown399ba0b2014-06-04 16:07:42 -07001929 * In that case we should only throttle if the backing device it is
1930 * writing to is congested. In other cases it is safe to throttle.
1931 */
1932static int current_may_throttle(void)
1933{
NeilBrowna37b0712020-06-01 21:48:18 -07001934 return !(current->flags & PF_LOCAL_THROTTLE) ||
NeilBrown399ba0b2014-06-04 16:07:42 -07001935 current->backing_dev_info == NULL ||
1936 bdi_write_congested(current->backing_dev_info);
1937}
1938
1939/*
Mel Gormanb2e18752016-07-28 15:45:37 -07001940 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
Andrew Morton1742f192006-03-22 00:08:21 -08001941 * of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 */
Mel Gorman66635622010-08-09 17:19:30 -07001943static noinline_for_stack unsigned long
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001944shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001945 struct scan_control *sc, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
1947 LIST_HEAD(page_list);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001948 unsigned long nr_scanned;
Maninder Singh730ec8c2020-06-03 16:01:18 -07001949 unsigned int nr_reclaimed = 0;
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001950 unsigned long nr_taken;
Kirill Tkhai060f0052019-03-05 15:48:15 -08001951 struct reclaim_stat stat;
Johannes Weiner497a6c12020-06-03 16:02:34 -07001952 bool file = is_file_lru(lru);
Kirill Tkhaif46b7912019-05-13 17:22:33 -07001953 enum vm_event_item item;
Mel Gorman599d0c92016-07-28 15:45:31 -07001954 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
Michal Hockodb73ee02017-09-06 16:21:11 -07001955 bool stalled = false;
KOSAKI Motohiro78dc5832009-06-16 15:31:40 -07001956
Mel Gorman599d0c92016-07-28 15:45:31 -07001957 while (unlikely(too_many_isolated(pgdat, file, sc))) {
Michal Hockodb73ee02017-09-06 16:21:11 -07001958 if (stalled)
1959 return 0;
1960
1961 /* wait a bit for the reclaimer. */
1962 msleep(100);
1963 stalled = true;
Rik van Riel35cd7812009-09-21 17:01:38 -07001964
1965 /* We are about to die and free our memory. Return now. */
1966 if (fatal_signal_pending(current))
1967 return SWAP_CLUSTER_MAX;
1968 }
1969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 lru_add_drain();
Minchan Kimf80c0672011-10-31 17:06:55 -07001971
Mel Gorman599d0c92016-07-28 15:45:31 -07001972 spin_lock_irq(&pgdat->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07001974 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
Kirill Tkhaia9e7c392019-03-05 15:46:55 -08001975 &nr_scanned, sc, lru);
Konstantin Khlebnikov95d918f2012-05-29 15:06:59 -07001976
Mel Gorman599d0c92016-07-28 15:45:31 -07001977 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
Kirill Tkhaif46b7912019-05-13 17:22:33 -07001978 item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
Johannes Weinerb5ead352019-11-30 17:55:40 -08001979 if (!cgroup_reclaim(sc))
Kirill Tkhaif46b7912019-05-13 17:22:33 -07001980 __count_vm_events(item, nr_scanned);
1981 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
Johannes Weiner497a6c12020-06-03 16:02:34 -07001982 __count_vm_events(PGSCAN_ANON + file, nr_scanned);
1983
Mel Gorman599d0c92016-07-28 15:45:31 -07001984 spin_unlock_irq(&pgdat->lru_lock);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07001985
Hillf Dantond563c052012-03-21 16:34:02 -07001986 if (nr_taken == 0)
Mel Gorman66635622010-08-09 17:19:30 -07001987 return 0;
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001988
Shakeel Buttdd156e32020-12-14 19:06:39 -08001989 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001990
Mel Gorman599d0c92016-07-28 15:45:31 -07001991 spin_lock_irq(&pgdat->lru_lock);
Hugh Dickins3f797682012-01-12 17:20:07 -08001992
Johannes Weiner497a6c12020-06-03 16:02:34 -07001993 move_pages_to_lru(lruvec, &page_list);
1994
1995 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
Johannes Weiner96f8bf42020-06-03 16:03:09 -07001996 lru_note_cost(lruvec, file, stat.nr_pageout);
Kirill Tkhaif46b7912019-05-13 17:22:33 -07001997 item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
Johannes Weinerb5ead352019-11-30 17:55:40 -08001998 if (!cgroup_reclaim(sc))
Kirill Tkhaif46b7912019-05-13 17:22:33 -07001999 __count_vm_events(item, nr_reclaimed);
2000 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
Johannes Weiner497a6c12020-06-03 16:02:34 -07002001 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
Hugh Dickins3f797682012-01-12 17:20:07 -08002002
Mel Gorman599d0c92016-07-28 15:45:31 -07002003 spin_unlock_irq(&pgdat->lru_lock);
Hugh Dickins3f797682012-01-12 17:20:07 -08002004
Johannes Weiner747db952014-08-08 14:19:24 -07002005 mem_cgroup_uncharge_list(&page_list);
Mel Gorman2d4894b2017-11-15 17:37:59 -08002006 free_unref_page_list(&page_list);
Mel Gormane11da5b2010-10-26 14:21:40 -07002007
Mel Gorman92df3a72011-10-31 17:07:56 -07002008 /*
Andrey Ryabinin1c610d52018-03-22 16:17:42 -07002009 * If dirty pages are scanned that are not queued for IO, it
2010 * implies that flushers are not doing their job. This can
2011 * happen when memory pressure pushes dirty pages to the end of
2012 * the LRU before the dirty limits are breached and the dirty
2013 * data has expired. It can also happen when the proportion of
2014 * dirty pages grows not through writes but through memory
2015 * pressure reclaiming all the clean cache. And in some cases,
2016 * the flushers simply cannot keep up with the allocation
2017 * rate. Nudge the flusher threads in case they are asleep.
2018 */
2019 if (stat.nr_unqueued_dirty == nr_taken)
2020 wakeup_flusher_threads(WB_REASON_VMSCAN);
2021
Andrey Ryabinind108c772018-04-10 16:27:59 -07002022 sc->nr.dirty += stat.nr_dirty;
2023 sc->nr.congested += stat.nr_congested;
2024 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2025 sc->nr.writeback += stat.nr_writeback;
2026 sc->nr.immediate += stat.nr_immediate;
2027 sc->nr.taken += nr_taken;
2028 if (file)
2029 sc->nr.file_taken += nr_taken;
Mel Gorman8e950282013-07-03 15:02:02 -07002030
Mel Gorman599d0c92016-07-28 15:45:31 -07002031 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
Steven Rostedtd51d1e62018-04-10 16:28:07 -07002032 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
Andrew Morton05ff5132006-03-22 00:08:20 -08002033 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034}
2035
Hugh Dickinsf6260122012-01-12 17:20:06 -08002036static void shrink_active_list(unsigned long nr_to_scan,
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07002037 struct lruvec *lruvec,
Johannes Weinerf16015f2012-01-12 17:17:52 -08002038 struct scan_control *sc,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002039 enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040{
KOSAKI Motohiro44c241f2009-09-21 17:01:35 -07002041 unsigned long nr_taken;
Hugh Dickinsf6260122012-01-12 17:20:06 -08002042 unsigned long nr_scanned;
Wu Fengguang6fe6b7e2009-06-16 15:33:05 -07002043 unsigned long vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 LIST_HEAD(l_hold); /* The pages which were snipped off */
Wu Fengguang8cab4752009-06-16 15:33:12 -07002045 LIST_HEAD(l_active);
Christoph Lameterb69408e2008-10-18 20:26:14 -07002046 LIST_HEAD(l_inactive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 struct page *page;
Michal Hocko9d998b42017-02-22 15:44:18 -08002048 unsigned nr_deactivate, nr_activate;
2049 unsigned nr_rotated = 0;
Konstantin Khlebnikov3cb99452012-05-29 15:06:53 -07002050 int file = is_file_lru(lru);
Mel Gorman599d0c92016-07-28 15:45:31 -07002051 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
2053 lru_add_drain();
Minchan Kimf80c0672011-10-31 17:06:55 -07002054
Mel Gorman599d0c92016-07-28 15:45:31 -07002055 spin_lock_irq(&pgdat->lru_lock);
Johannes Weiner925b7672012-01-12 17:18:15 -08002056
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07002057 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
Kirill Tkhaia9e7c392019-03-05 15:46:55 -08002058 &nr_scanned, sc, lru);
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002059
Mel Gorman599d0c92016-07-28 15:45:31 -07002060 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002061
Shakeel Butt912c0572020-08-06 23:26:32 -07002062 if (!cgroup_reclaim(sc))
2063 __count_vm_events(PGREFILL, nr_scanned);
Yafang Shao2fa26902019-05-13 17:23:02 -07002064 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
Hugh Dickins9d5e6a92016-05-19 17:12:38 -07002065
Mel Gorman599d0c92016-07-28 15:45:31 -07002066 spin_unlock_irq(&pgdat->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 while (!list_empty(&l_hold)) {
2069 cond_resched();
2070 page = lru_to_page(&l_hold);
2071 list_del(&page->lru);
Rik van Riel7e9cd482008-10-18 20:26:35 -07002072
Hugh Dickins39b5f292012-10-08 16:33:18 -07002073 if (unlikely(!page_evictable(page))) {
Lee Schermerhorn894bc312008-10-18 20:26:39 -07002074 putback_lru_page(page);
2075 continue;
2076 }
2077
Mel Gormancc715d92012-03-21 16:34:00 -07002078 if (unlikely(buffer_heads_over_limit)) {
2079 if (page_has_private(page) && trylock_page(page)) {
2080 if (page_has_private(page))
2081 try_to_release_page(page, 0);
2082 unlock_page(page);
2083 }
2084 }
2085
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07002086 if (page_referenced(page, 0, sc->target_mem_cgroup,
2087 &vm_flags)) {
Wu Fengguang8cab4752009-06-16 15:33:12 -07002088 /*
2089 * Identify referenced, file-backed active pages and
2090 * give them one more trip around the active list. So
2091 * that executable code get better chances to stay in
2092 * memory under moderate memory pressure. Anon pages
2093 * are not likely to be evicted by use-once streaming
2094 * IO, plus JVM can create lots of anon VM_EXEC pages,
2095 * so we ignore them here.
2096 */
Huang Ying9de4f222020-04-06 20:04:41 -07002097 if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07002098 nr_rotated += thp_nr_pages(page);
Wu Fengguang8cab4752009-06-16 15:33:12 -07002099 list_add(&page->lru, &l_active);
2100 continue;
2101 }
2102 }
Rik van Riel7e9cd482008-10-18 20:26:35 -07002103
KOSAKI Motohiro5205e562009-09-21 17:01:44 -07002104 ClearPageActive(page); /* we are de-activating */
Johannes Weiner1899ad12018-10-26 15:06:04 -07002105 SetPageWorkingset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 list_add(&page->lru, &l_inactive);
2107 }
2108
Andrew Mortonb5557492009-01-06 14:40:13 -08002109 /*
Wu Fengguang8cab4752009-06-16 15:33:12 -07002110 * Move pages back to the lru list.
Andrew Mortonb5557492009-01-06 14:40:13 -08002111 */
Mel Gorman599d0c92016-07-28 15:45:31 -07002112 spin_lock_irq(&pgdat->lru_lock);
Rik van Riel556adec2008-10-18 20:26:34 -07002113
Kirill Tkhaia222f342019-05-13 17:17:00 -07002114 nr_activate = move_pages_to_lru(lruvec, &l_active);
2115 nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
Kirill Tkhaif372d892019-05-13 17:16:57 -07002116 /* Keep all free pages in l_active list */
2117 list_splice(&l_inactive, &l_active);
Kirill Tkhai9851ac12019-05-13 17:16:54 -07002118
2119 __count_vm_events(PGDEACTIVATE, nr_deactivate);
2120 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2121
Mel Gorman599d0c92016-07-28 15:45:31 -07002122 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2123 spin_unlock_irq(&pgdat->lru_lock);
Hugh Dickins2bcf8872012-01-12 17:19:56 -08002124
Kirill Tkhaif372d892019-05-13 17:16:57 -07002125 mem_cgroup_uncharge_list(&l_active);
2126 free_unref_page_list(&l_active);
Michal Hocko9d998b42017-02-22 15:44:18 -08002127 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2128 nr_deactivate, nr_rotated, sc->priority, file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129}
2130
Minchan Kim1a4e58c2019-09-25 16:49:15 -07002131unsigned long reclaim_pages(struct list_head *page_list)
2132{
Yang Shif661d002020-04-01 21:10:05 -07002133 int nid = NUMA_NO_NODE;
Maninder Singh730ec8c2020-06-03 16:01:18 -07002134 unsigned int nr_reclaimed = 0;
Minchan Kim1a4e58c2019-09-25 16:49:15 -07002135 LIST_HEAD(node_page_list);
2136 struct reclaim_stat dummy_stat;
2137 struct page *page;
2138 struct scan_control sc = {
2139 .gfp_mask = GFP_KERNEL,
2140 .priority = DEF_PRIORITY,
2141 .may_writepage = 1,
2142 .may_unmap = 1,
2143 .may_swap = 1,
2144 };
2145
2146 while (!list_empty(page_list)) {
2147 page = lru_to_page(page_list);
Yang Shif661d002020-04-01 21:10:05 -07002148 if (nid == NUMA_NO_NODE) {
Minchan Kim1a4e58c2019-09-25 16:49:15 -07002149 nid = page_to_nid(page);
2150 INIT_LIST_HEAD(&node_page_list);
2151 }
2152
2153 if (nid == page_to_nid(page)) {
2154 ClearPageActive(page);
2155 list_move(&page->lru, &node_page_list);
2156 continue;
2157 }
2158
2159 nr_reclaimed += shrink_page_list(&node_page_list,
2160 NODE_DATA(nid),
Shakeel Buttdd156e32020-12-14 19:06:39 -08002161 &sc, &dummy_stat, false);
Minchan Kim1a4e58c2019-09-25 16:49:15 -07002162 while (!list_empty(&node_page_list)) {
2163 page = lru_to_page(&node_page_list);
2164 list_del(&page->lru);
2165 putback_lru_page(page);
2166 }
2167
Yang Shif661d002020-04-01 21:10:05 -07002168 nid = NUMA_NO_NODE;
Minchan Kim1a4e58c2019-09-25 16:49:15 -07002169 }
2170
2171 if (!list_empty(&node_page_list)) {
2172 nr_reclaimed += shrink_page_list(&node_page_list,
2173 NODE_DATA(nid),
Shakeel Buttdd156e32020-12-14 19:06:39 -08002174 &sc, &dummy_stat, false);
Minchan Kim1a4e58c2019-09-25 16:49:15 -07002175 while (!list_empty(&node_page_list)) {
2176 page = lru_to_page(&node_page_list);
2177 list_del(&page->lru);
2178 putback_lru_page(page);
2179 }
2180 }
2181
2182 return nr_reclaimed;
2183}
2184
Johannes Weinerb91ac372019-11-30 17:56:02 -08002185static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2186 struct lruvec *lruvec, struct scan_control *sc)
2187{
2188 if (is_active_lru(lru)) {
2189 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2190 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2191 else
2192 sc->skipped_deactivate = 1;
2193 return 0;
2194 }
2195
2196 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2197}
2198
Rik van Riel59dc76b2016-05-20 16:56:31 -07002199/*
2200 * The inactive anon list should be small enough that the VM never has
2201 * to do too much work.
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08002202 *
Rik van Riel59dc76b2016-05-20 16:56:31 -07002203 * The inactive file list should be small enough to leave most memory
2204 * to the established workingset on the scan-resistant active list,
2205 * but large enough to avoid thrashing the aggregate readahead window.
2206 *
2207 * Both inactive lists should also be large enough that each inactive
2208 * page has a chance to be referenced again before it is reclaimed.
2209 *
Johannes Weiner2a2e4882017-05-03 14:55:03 -07002210 * If that fails and refaulting is observed, the inactive list grows.
2211 *
Rik van Riel59dc76b2016-05-20 16:56:31 -07002212 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
Andrey Ryabinin3a50d142017-11-15 17:34:15 -08002213 * on this LRU, maintained by the pageout code. An inactive_ratio
Rik van Riel59dc76b2016-05-20 16:56:31 -07002214 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2215 *
2216 * total target max
2217 * memory ratio inactive
2218 * -------------------------------------
2219 * 10MB 1 5MB
2220 * 100MB 1 50MB
2221 * 1GB 3 250MB
2222 * 10GB 10 0.9GB
2223 * 100GB 31 3GB
2224 * 1TB 101 10GB
2225 * 10TB 320 32GB
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08002226 */
Johannes Weinerb91ac372019-11-30 17:56:02 -08002227static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08002228{
Johannes Weinerb91ac372019-11-30 17:56:02 -08002229 enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
Johannes Weiner2a2e4882017-05-03 14:55:03 -07002230 unsigned long inactive, active;
2231 unsigned long inactive_ratio;
Rik van Riel59dc76b2016-05-20 16:56:31 -07002232 unsigned long gb;
2233
Johannes Weinerb91ac372019-11-30 17:56:02 -08002234 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2235 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
Mel Gormanf8d1a312016-07-28 15:47:34 -07002236
Johannes Weinerb91ac372019-11-30 17:56:02 -08002237 gb = (inactive + active) >> (30 - PAGE_SHIFT);
Joonsoo Kim40025702020-08-11 18:30:54 -07002238 if (gb)
Johannes Weinerb91ac372019-11-30 17:56:02 -08002239 inactive_ratio = int_sqrt(10 * gb);
2240 else
2241 inactive_ratio = 1;
Michal Hockofd538802017-02-22 15:45:58 -08002242
zhouhuacai577f7342021-04-29 17:23:05 +08002243 trace_android_vh_tune_inactive_ratio(&inactive_ratio, is_file_lru(inactive_lru));
2244
Rik van Riel59dc76b2016-05-20 16:56:31 -07002245 return inactive * inactive_ratio < active;
Rik van Rielb39415b2009-12-14 17:59:48 -08002246}
2247
Johannes Weiner9a265112013-02-22 16:32:17 -08002248enum scan_balance {
2249 SCAN_EQUAL,
2250 SCAN_FRACT,
2251 SCAN_ANON,
2252 SCAN_FILE,
2253};
2254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255/*
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002256 * Determine how aggressively the anon and file LRU lists should be
2257 * scanned. The relative value of each set of LRU lists is determined
2258 * by looking at the fraction of the pages scanned we did rotate back
2259 * onto the active list instead of evict.
2260 *
Wanpeng Libe7bd592012-06-14 20:41:02 +08002261 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2262 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002263 */
Johannes Weinerafaf07a2019-11-30 17:55:46 -08002264static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2265 unsigned long *nr)
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002266{
Johannes Weinerafaf07a2019-11-30 17:55:46 -08002267 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
Johannes Weinerd483a5d2020-06-03 16:03:13 -07002268 unsigned long anon_cost, file_cost, total_cost;
Vladimir Davydov33377672016-01-20 15:02:59 -08002269 int swappiness = mem_cgroup_swappiness(memcg);
Yu Zhaoed017372020-10-15 20:09:55 -07002270 u64 fraction[ANON_AND_FILE];
Johannes Weiner9a265112013-02-22 16:32:17 -08002271 u64 denominator = 0; /* gcc */
Johannes Weiner9a265112013-02-22 16:32:17 -08002272 enum scan_balance scan_balance;
Johannes Weiner9a265112013-02-22 16:32:17 -08002273 unsigned long ap, fp;
2274 enum lru_list lru;
Shaohua Li76a33fc2010-05-24 14:32:36 -07002275
2276 /* If we have no swap space, do not bother scanning anon pages. */
Vladimir Davydovd8b38432016-01-20 15:03:07 -08002277 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
Johannes Weiner9a265112013-02-22 16:32:17 -08002278 scan_balance = SCAN_FILE;
Shaohua Li76a33fc2010-05-24 14:32:36 -07002279 goto out;
2280 }
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002281
xiaofeng35dafe72021-04-15 15:02:58 +08002282 trace_android_vh_tune_swappiness(&swappiness);
Johannes Weiner10316b32013-02-22 16:32:14 -08002283 /*
2284 * Global reclaim will swap to prevent OOM even with no
2285 * swappiness, but memcg users want to use this knob to
2286 * disable swapping for individual groups completely when
2287 * using the memory controller's swap limit feature would be
2288 * too expensive.
2289 */
Johannes Weinerb5ead352019-11-30 17:55:40 -08002290 if (cgroup_reclaim(sc) && !swappiness) {
Johannes Weiner9a265112013-02-22 16:32:17 -08002291 scan_balance = SCAN_FILE;
Johannes Weiner10316b32013-02-22 16:32:14 -08002292 goto out;
2293 }
2294
2295 /*
2296 * Do not apply any pressure balancing cleverness when the
2297 * system is close to OOM, scan both anon and file equally
2298 * (unless the swappiness setting disagrees with swapping).
2299 */
Johannes Weiner02695172014-08-06 16:06:17 -07002300 if (!sc->priority && swappiness) {
Johannes Weiner9a265112013-02-22 16:32:17 -08002301 scan_balance = SCAN_EQUAL;
Johannes Weiner10316b32013-02-22 16:32:14 -08002302 goto out;
2303 }
2304
Johannes Weiner11d16c22013-02-22 16:32:15 -08002305 /*
Johannes Weiner53138ce2019-11-30 17:55:56 -08002306 * If the system is almost out of file pages, force-scan anon.
Johannes Weiner62376252014-05-06 12:50:07 -07002307 */
Johannes Weinerb91ac372019-11-30 17:56:02 -08002308 if (sc->file_is_tiny) {
Johannes Weiner53138ce2019-11-30 17:55:56 -08002309 scan_balance = SCAN_ANON;
2310 goto out;
Johannes Weiner62376252014-05-06 12:50:07 -07002311 }
2312
2313 /*
Johannes Weinerb91ac372019-11-30 17:56:02 -08002314 * If there is enough inactive page cache, we do not reclaim
2315 * anything from the anonymous working right now.
Johannes Weiner7c5bd702013-02-22 16:32:10 -08002316 */
Johannes Weinerb91ac372019-11-30 17:56:02 -08002317 if (sc->cache_trim_mode) {
Johannes Weiner9a265112013-02-22 16:32:17 -08002318 scan_balance = SCAN_FILE;
Johannes Weiner7c5bd702013-02-22 16:32:10 -08002319 goto out;
2320 }
2321
Johannes Weiner9a265112013-02-22 16:32:17 -08002322 scan_balance = SCAN_FRACT;
Johannes Weiner7c5bd702013-02-22 16:32:10 -08002323 /*
Johannes Weiner314b57f2020-06-03 16:03:03 -07002324 * Calculate the pressure balance between anon and file pages.
2325 *
2326 * The amount of pressure we put on each LRU is inversely
2327 * proportional to the cost of reclaiming each list, as
2328 * determined by the share of pages that are refaulting, times
2329 * the relative IO cost of bringing back a swapped out
2330 * anonymous page vs reloading a filesystem page (swappiness).
2331 *
Johannes Weinerd483a5d2020-06-03 16:03:13 -07002332 * Although we limit that influence to ensure no list gets
2333 * left behind completely: at least a third of the pressure is
2334 * applied, before swappiness.
2335 *
Johannes Weiner314b57f2020-06-03 16:03:03 -07002336 * With swappiness at 100, anon and file have equal IO cost.
KOSAKI Motohiro58c37f62010-08-09 17:19:51 -07002337 */
Johannes Weinerd483a5d2020-06-03 16:03:13 -07002338 total_cost = sc->anon_cost + sc->file_cost;
2339 anon_cost = total_cost + sc->anon_cost;
2340 file_cost = total_cost + sc->file_cost;
2341 total_cost = anon_cost + file_cost;
KOSAKI Motohiro58c37f62010-08-09 17:19:51 -07002342
Johannes Weinerd483a5d2020-06-03 16:03:13 -07002343 ap = swappiness * (total_cost + 1);
2344 ap /= anon_cost + 1;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002345
Johannes Weinerd483a5d2020-06-03 16:03:13 -07002346 fp = (200 - swappiness) * (total_cost + 1);
2347 fp /= file_cost + 1;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002348
Shaohua Li76a33fc2010-05-24 14:32:36 -07002349 fraction[0] = ap;
2350 fraction[1] = fp;
Johannes Weinera4fe1632020-06-03 16:02:50 -07002351 denominator = ap + fp;
Shaohua Li76a33fc2010-05-24 14:32:36 -07002352out:
xiaofeng35dafe72021-04-15 15:02:58 +08002353 trace_android_vh_tune_scan_type((char *)(&scan_balance));
Johannes Weiner688035f2017-05-03 14:52:07 -07002354 for_each_evictable_lru(lru) {
2355 int file = is_file_lru(lru);
Chris Down9783aa92019-10-06 17:58:32 -07002356 unsigned long lruvec_size;
Johannes Weiner688035f2017-05-03 14:52:07 -07002357 unsigned long scan;
Chris Down1bc63fb2019-10-06 17:58:38 -07002358 unsigned long protection;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002359
Chris Down9783aa92019-10-06 17:58:32 -07002360 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
Yafang Shao22f74962020-08-06 23:22:01 -07002361 protection = mem_cgroup_protection(sc->target_mem_cgroup,
2362 memcg,
Chris Down1bc63fb2019-10-06 17:58:38 -07002363 sc->memcg_low_reclaim);
Chris Down9783aa92019-10-06 17:58:32 -07002364
Chris Down1bc63fb2019-10-06 17:58:38 -07002365 if (protection) {
Chris Down9783aa92019-10-06 17:58:32 -07002366 /*
2367 * Scale a cgroup's reclaim pressure by proportioning
2368 * its current usage to its memory.low or memory.min
2369 * setting.
2370 *
2371 * This is important, as otherwise scanning aggression
2372 * becomes extremely binary -- from nothing as we
2373 * approach the memory protection threshold, to totally
2374 * nominal as we exceed it. This results in requiring
2375 * setting extremely liberal protection thresholds. It
2376 * also means we simply get no protection at all if we
2377 * set it too low, which is not ideal.
Chris Down1bc63fb2019-10-06 17:58:38 -07002378 *
2379 * If there is any protection in place, we reduce scan
2380 * pressure by how much of the total memory used is
2381 * within protection thresholds.
Chris Down9783aa92019-10-06 17:58:32 -07002382 *
Chris Down9de7ca42019-10-06 17:58:35 -07002383 * There is one special case: in the first reclaim pass,
2384 * we skip over all groups that are within their low
2385 * protection. If that fails to reclaim enough pages to
2386 * satisfy the reclaim goal, we come back and override
2387 * the best-effort low protection. However, we still
2388 * ideally want to honor how well-behaved groups are in
2389 * that case instead of simply punishing them all
2390 * equally. As such, we reclaim them based on how much
Chris Down1bc63fb2019-10-06 17:58:38 -07002391 * memory they are using, reducing the scan pressure
2392 * again by how much of the total memory used is under
2393 * hard protection.
Chris Down9783aa92019-10-06 17:58:32 -07002394 */
Chris Down1bc63fb2019-10-06 17:58:38 -07002395 unsigned long cgroup_size = mem_cgroup_size(memcg);
2396
2397 /* Avoid TOCTOU with earlier protection check */
2398 cgroup_size = max(cgroup_size, protection);
2399
2400 scan = lruvec_size - lruvec_size * protection /
2401 cgroup_size;
Chris Down9783aa92019-10-06 17:58:32 -07002402
2403 /*
Chris Down1bc63fb2019-10-06 17:58:38 -07002404 * Minimally target SWAP_CLUSTER_MAX pages to keep
Ethon Paul55b65a52020-06-04 16:49:10 -07002405 * reclaim moving forwards, avoiding decrementing
Chris Down9de7ca42019-10-06 17:58:35 -07002406 * sc->priority further than desirable.
Chris Down9783aa92019-10-06 17:58:32 -07002407 */
Chris Down1bc63fb2019-10-06 17:58:38 -07002408 scan = max(scan, SWAP_CLUSTER_MAX);
Chris Down9783aa92019-10-06 17:58:32 -07002409 } else {
2410 scan = lruvec_size;
2411 }
2412
2413 scan >>= sc->priority;
2414
Johannes Weiner688035f2017-05-03 14:52:07 -07002415 /*
2416 * If the cgroup's already been deleted, make sure to
2417 * scrape out the remaining cache.
2418 */
2419 if (!scan && !mem_cgroup_online(memcg))
Chris Down9783aa92019-10-06 17:58:32 -07002420 scan = min(lruvec_size, SWAP_CLUSTER_MAX);
Johannes Weiner9a265112013-02-22 16:32:17 -08002421
Johannes Weiner688035f2017-05-03 14:52:07 -07002422 switch (scan_balance) {
2423 case SCAN_EQUAL:
2424 /* Scan lists relative to size */
2425 break;
2426 case SCAN_FRACT:
Johannes Weiner9a265112013-02-22 16:32:17 -08002427 /*
Johannes Weiner688035f2017-05-03 14:52:07 -07002428 * Scan types proportional to swappiness and
2429 * their relative recent reclaim efficiency.
Gavin Shan76073c62020-02-20 20:04:24 -08002430 * Make sure we don't miss the last page on
2431 * the offlined memory cgroups because of a
2432 * round-off error.
Johannes Weiner9a265112013-02-22 16:32:17 -08002433 */
Gavin Shan76073c62020-02-20 20:04:24 -08002434 scan = mem_cgroup_online(memcg) ?
2435 div64_u64(scan * fraction[file], denominator) :
2436 DIV64_U64_ROUND_UP(scan * fraction[file],
Roman Gushchin68600f62018-10-26 15:03:27 -07002437 denominator);
Johannes Weiner688035f2017-05-03 14:52:07 -07002438 break;
2439 case SCAN_FILE:
2440 case SCAN_ANON:
2441 /* Scan one type exclusively */
Mateusz Noseke072bff2020-04-01 21:10:15 -07002442 if ((scan_balance == SCAN_FILE) != file)
Johannes Weiner688035f2017-05-03 14:52:07 -07002443 scan = 0;
Johannes Weiner688035f2017-05-03 14:52:07 -07002444 break;
2445 default:
2446 /* Look ma, no brain */
2447 BUG();
Johannes Weiner9a265112013-02-22 16:32:17 -08002448 }
Johannes Weiner688035f2017-05-03 14:52:07 -07002449
Johannes Weiner688035f2017-05-03 14:52:07 -07002450 nr[lru] = scan;
Shaohua Li76a33fc2010-05-24 14:32:36 -07002451 }
Wu Fengguang6e08a362009-06-16 15:32:29 -07002452}
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002453
Johannes Weinerafaf07a2019-11-30 17:55:46 -08002454static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002455{
2456 unsigned long nr[NR_LRU_LISTS];
Mel Gormane82e0562013-07-03 15:01:44 -07002457 unsigned long targets[NR_LRU_LISTS];
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002458 unsigned long nr_to_scan;
2459 enum lru_list lru;
2460 unsigned long nr_reclaimed = 0;
2461 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2462 struct blk_plug plug;
Mel Gorman1a501902014-06-04 16:10:49 -07002463 bool scan_adjusted;
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002464
Johannes Weinerafaf07a2019-11-30 17:55:46 -08002465 get_scan_count(lruvec, sc, nr);
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002466
Mel Gormane82e0562013-07-03 15:01:44 -07002467 /* Record the original scan target for proportional adjustments later */
2468 memcpy(targets, nr, sizeof(nr));
2469
Mel Gorman1a501902014-06-04 16:10:49 -07002470 /*
2471 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2472 * event that can occur when there is little memory pressure e.g.
2473 * multiple streaming readers/writers. Hence, we do not abort scanning
2474 * when the requested number of pages are reclaimed when scanning at
2475 * DEF_PRIORITY on the assumption that the fact we are direct
2476 * reclaiming implies that kswapd is not keeping up and it is best to
2477 * do a batch of work at once. For memcg reclaim one check is made to
2478 * abort proportional reclaim if either the file or anon lru has already
2479 * dropped to zero at the first pass.
2480 */
Johannes Weinerb5ead352019-11-30 17:55:40 -08002481 scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
Mel Gorman1a501902014-06-04 16:10:49 -07002482 sc->priority == DEF_PRIORITY);
2483
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002484 blk_start_plug(&plug);
2485 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2486 nr[LRU_INACTIVE_FILE]) {
Mel Gormane82e0562013-07-03 15:01:44 -07002487 unsigned long nr_anon, nr_file, percentage;
2488 unsigned long nr_scanned;
2489
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002490 for_each_evictable_lru(lru) {
2491 if (nr[lru]) {
2492 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2493 nr[lru] -= nr_to_scan;
2494
2495 nr_reclaimed += shrink_list(lru, nr_to_scan,
Johannes Weiner3b991202019-04-18 17:50:34 -07002496 lruvec, sc);
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002497 }
2498 }
Mel Gormane82e0562013-07-03 15:01:44 -07002499
Michal Hockobd041732016-12-02 17:26:48 -08002500 cond_resched();
2501
Mel Gormane82e0562013-07-03 15:01:44 -07002502 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2503 continue;
2504
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002505 /*
Mel Gormane82e0562013-07-03 15:01:44 -07002506 * For kswapd and memcg, reclaim at least the number of pages
Mel Gorman1a501902014-06-04 16:10:49 -07002507 * requested. Ensure that the anon and file LRUs are scanned
Mel Gormane82e0562013-07-03 15:01:44 -07002508 * proportionally what was requested by get_scan_count(). We
2509 * stop reclaiming one LRU and reduce the amount scanning
2510 * proportional to the original scan target.
2511 */
2512 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2513 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2514
Mel Gorman1a501902014-06-04 16:10:49 -07002515 /*
2516 * It's just vindictive to attack the larger once the smaller
2517 * has gone to zero. And given the way we stop scanning the
2518 * smaller below, this makes sure that we only make one nudge
2519 * towards proportionality once we've got nr_to_reclaim.
2520 */
2521 if (!nr_file || !nr_anon)
2522 break;
2523
Mel Gormane82e0562013-07-03 15:01:44 -07002524 if (nr_file > nr_anon) {
2525 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2526 targets[LRU_ACTIVE_ANON] + 1;
2527 lru = LRU_BASE;
2528 percentage = nr_anon * 100 / scan_target;
2529 } else {
2530 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2531 targets[LRU_ACTIVE_FILE] + 1;
2532 lru = LRU_FILE;
2533 percentage = nr_file * 100 / scan_target;
2534 }
2535
2536 /* Stop scanning the smaller of the LRU */
2537 nr[lru] = 0;
2538 nr[lru + LRU_ACTIVE] = 0;
2539
2540 /*
2541 * Recalculate the other LRU scan count based on its original
2542 * scan target and the percentage scanning already complete
2543 */
2544 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2545 nr_scanned = targets[lru] - nr[lru];
2546 nr[lru] = targets[lru] * (100 - percentage) / 100;
2547 nr[lru] -= min(nr[lru], nr_scanned);
2548
2549 lru += LRU_ACTIVE;
2550 nr_scanned = targets[lru] - nr[lru];
2551 nr[lru] = targets[lru] * (100 - percentage) / 100;
2552 nr[lru] -= min(nr[lru], nr_scanned);
2553
2554 scan_adjusted = true;
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002555 }
2556 blk_finish_plug(&plug);
2557 sc->nr_reclaimed += nr_reclaimed;
2558
2559 /*
2560 * Even if we did not try to evict anon pages at all, we want to
2561 * rebalance the anon lru active/inactive ratio.
2562 */
Johannes Weinerb91ac372019-11-30 17:56:02 -08002563 if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002564 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2565 sc, LRU_ACTIVE_ANON);
Johannes Weiner9b4f98c2013-02-22 16:32:19 -08002566}
2567
Mel Gorman23b9da52012-05-29 15:06:20 -07002568/* Use reclaim/compaction for costly allocs or under memory pressure */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002569static bool in_reclaim_compaction(struct scan_control *sc)
Mel Gorman23b9da52012-05-29 15:06:20 -07002570{
Kirill A. Shutemovd84da3f2012-12-11 16:00:31 -08002571 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
Mel Gorman23b9da52012-05-29 15:06:20 -07002572 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002573 sc->priority < DEF_PRIORITY - 2))
Mel Gorman23b9da52012-05-29 15:06:20 -07002574 return true;
2575
2576 return false;
2577}
2578
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002579/*
Mel Gorman23b9da52012-05-29 15:06:20 -07002580 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2581 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2582 * true if more pages should be reclaimed such that when the page allocator
Qiwu Chendf3a45f2020-06-03 16:01:21 -07002583 * calls try_to_compact_pages() that it will have enough free pages to succeed.
Mel Gorman23b9da52012-05-29 15:06:20 -07002584 * It will give up earlier than that if there is difficulty reclaiming pages.
Mel Gorman3e7d3442011-01-13 15:45:56 -08002585 */
Mel Gormana9dd0a82016-07-28 15:46:02 -07002586static inline bool should_continue_reclaim(struct pglist_data *pgdat,
Mel Gorman3e7d3442011-01-13 15:45:56 -08002587 unsigned long nr_reclaimed,
Mel Gorman3e7d3442011-01-13 15:45:56 -08002588 struct scan_control *sc)
2589{
2590 unsigned long pages_for_compaction;
2591 unsigned long inactive_lru_pages;
Mel Gormana9dd0a82016-07-28 15:46:02 -07002592 int z;
Mel Gorman3e7d3442011-01-13 15:45:56 -08002593
2594 /* If not in reclaim/compaction mode, stop */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002595 if (!in_reclaim_compaction(sc))
Mel Gorman3e7d3442011-01-13 15:45:56 -08002596 return false;
2597
Vlastimil Babka5ee04712019-09-23 15:37:29 -07002598 /*
2599 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
2600 * number of pages that were scanned. This will return to the caller
2601 * with the risk reclaim/compaction and the resulting allocation attempt
2602 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
2603 * allocations through requiring that the full LRU list has been scanned
2604 * first, by assuming that zero delta of sc->nr_scanned means full LRU
2605 * scan, but that approximation was wrong, and there were corner cases
2606 * where always a non-zero amount of pages were scanned.
2607 */
2608 if (!nr_reclaimed)
2609 return false;
Mel Gorman3e7d3442011-01-13 15:45:56 -08002610
Mel Gorman3e7d3442011-01-13 15:45:56 -08002611 /* If compaction would go ahead or the allocation would succeed, stop */
Mel Gormana9dd0a82016-07-28 15:46:02 -07002612 for (z = 0; z <= sc->reclaim_idx; z++) {
2613 struct zone *zone = &pgdat->node_zones[z];
Mel Gorman6aa303d2016-09-01 16:14:55 -07002614 if (!managed_zone(zone))
Mel Gormana9dd0a82016-07-28 15:46:02 -07002615 continue;
2616
2617 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
Vlastimil Babkacf378312016-10-07 16:57:41 -07002618 case COMPACT_SUCCESS:
Mel Gormana9dd0a82016-07-28 15:46:02 -07002619 case COMPACT_CONTINUE:
2620 return false;
2621 default:
2622 /* check next zone */
2623 ;
2624 }
Mel Gorman3e7d3442011-01-13 15:45:56 -08002625 }
Hillf Danton1c6c1592019-09-23 15:37:26 -07002626
2627 /*
2628 * If we have not reclaimed enough pages for compaction and the
2629 * inactive lists are large enough, continue reclaiming
2630 */
2631 pages_for_compaction = compact_gap(sc->order);
2632 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2633 if (get_nr_swap_pages() > 0)
2634 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2635
Vlastimil Babka5ee04712019-09-23 15:37:29 -07002636 return inactive_lru_pages > pages_for_compaction;
Mel Gorman3e7d3442011-01-13 15:45:56 -08002637}
2638
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002639static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
Johannes Weinerf16015f2012-01-12 17:17:52 -08002640{
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002641 struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
Johannes Weinerd2af3392019-11-30 17:55:43 -08002642 struct mem_cgroup *memcg;
Johannes Weinerf16015f2012-01-12 17:17:52 -08002643
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002644 memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
Johannes Weiner56600482012-01-12 17:17:59 -08002645 do {
Johannes Weinerafaf07a2019-11-30 17:55:46 -08002646 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
Johannes Weinerd2af3392019-11-30 17:55:43 -08002647 unsigned long reclaimed;
2648 unsigned long scanned;
Johannes Weiner56600482012-01-12 17:17:59 -08002649
Xunlei Pange3336ca2020-09-04 16:35:27 -07002650 /*
2651 * This loop can become CPU-bound when target memcgs
2652 * aren't eligible for reclaim - either because they
2653 * don't have any reclaimable pages, or because their
2654 * memory is explicitly protected. Avoid soft lockups.
2655 */
2656 cond_resched();
2657
Chris Down45c7f7e2020-08-06 23:22:05 -07002658 mem_cgroup_calculate_protection(target_memcg, memcg);
2659
2660 if (mem_cgroup_below_min(memcg)) {
Johannes Weinerd2af3392019-11-30 17:55:43 -08002661 /*
2662 * Hard protection.
2663 * If there is no reclaimable memory, OOM.
2664 */
2665 continue;
Chris Down45c7f7e2020-08-06 23:22:05 -07002666 } else if (mem_cgroup_below_low(memcg)) {
Johannes Weinerd2af3392019-11-30 17:55:43 -08002667 /*
2668 * Soft protection.
2669 * Respect the protection only as long as
2670 * there is an unprotected supply
2671 * of reclaimable memory from other cgroups.
2672 */
2673 if (!sc->memcg_low_reclaim) {
2674 sc->memcg_low_skipped = 1;
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07002675 continue;
Johannes Weiner241994ed2015-02-11 15:26:06 -08002676 }
Johannes Weinerd2af3392019-11-30 17:55:43 -08002677 memcg_memory_event(memcg, MEMCG_LOW);
Johannes Weiner6b4f7792014-12-12 16:56:13 -08002678 }
2679
Johannes Weinerd2af3392019-11-30 17:55:43 -08002680 reclaimed = sc->nr_reclaimed;
2681 scanned = sc->nr_scanned;
Johannes Weinerafaf07a2019-11-30 17:55:46 -08002682
2683 shrink_lruvec(lruvec, sc);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07002684
Johannes Weinerd2af3392019-11-30 17:55:43 -08002685 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2686 sc->priority);
Johannes Weiner2344d7e2014-08-06 16:06:15 -07002687
Johannes Weinerd2af3392019-11-30 17:55:43 -08002688 /* Record the group's reclaim efficiency */
2689 vmpressure(sc->gfp_mask, memcg, false,
2690 sc->nr_scanned - scanned,
2691 sc->nr_reclaimed - reclaimed);
Andrey Ryabinind108c772018-04-10 16:27:59 -07002692
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002693 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
2694}
2695
Liu Song6c9e09072020-01-30 22:14:08 -08002696static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002697{
2698 struct reclaim_state *reclaim_state = current->reclaim_state;
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002699 unsigned long nr_reclaimed, nr_scanned;
Johannes Weiner1b051172019-11-30 17:55:52 -08002700 struct lruvec *target_lruvec;
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002701 bool reclaimable = false;
Johannes Weinerb91ac372019-11-30 17:56:02 -08002702 unsigned long file;
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002703
Johannes Weiner1b051172019-11-30 17:55:52 -08002704 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2705
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002706again:
2707 memset(&sc->nr, 0, sizeof(sc->nr));
2708
2709 nr_reclaimed = sc->nr_reclaimed;
2710 nr_scanned = sc->nr_scanned;
2711
Johannes Weiner53138ce2019-11-30 17:55:56 -08002712 /*
Johannes Weiner7cf111b2020-06-03 16:03:06 -07002713 * Determine the scan balance between anon and file LRUs.
2714 */
2715 spin_lock_irq(&pgdat->lru_lock);
2716 sc->anon_cost = target_lruvec->anon_cost;
2717 sc->file_cost = target_lruvec->file_cost;
2718 spin_unlock_irq(&pgdat->lru_lock);
2719
2720 /*
Johannes Weinerb91ac372019-11-30 17:56:02 -08002721 * Target desirable inactive:active list ratios for the anon
2722 * and file LRU lists.
2723 */
2724 if (!sc->force_deactivate) {
2725 unsigned long refaults;
2726
Joonsoo Kim170b04b72020-08-11 18:30:43 -07002727 refaults = lruvec_page_state(target_lruvec,
2728 WORKINGSET_ACTIVATE_ANON);
2729 if (refaults != target_lruvec->refaults[0] ||
2730 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
Johannes Weinerb91ac372019-11-30 17:56:02 -08002731 sc->may_deactivate |= DEACTIVATE_ANON;
2732 else
2733 sc->may_deactivate &= ~DEACTIVATE_ANON;
2734
2735 /*
2736 * When refaults are being observed, it means a new
2737 * workingset is being established. Deactivate to get
2738 * rid of any stale active pages quickly.
2739 */
2740 refaults = lruvec_page_state(target_lruvec,
Joonsoo Kim170b04b72020-08-11 18:30:43 -07002741 WORKINGSET_ACTIVATE_FILE);
2742 if (refaults != target_lruvec->refaults[1] ||
Johannes Weinerb91ac372019-11-30 17:56:02 -08002743 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2744 sc->may_deactivate |= DEACTIVATE_FILE;
2745 else
2746 sc->may_deactivate &= ~DEACTIVATE_FILE;
2747 } else
2748 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2749
2750 /*
2751 * If we have plenty of inactive file pages that aren't
2752 * thrashing, try to reclaim those first before touching
2753 * anonymous pages.
2754 */
2755 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2756 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2757 sc->cache_trim_mode = 1;
2758 else
2759 sc->cache_trim_mode = 0;
2760
2761 /*
Johannes Weiner53138ce2019-11-30 17:55:56 -08002762 * Prevent the reclaimer from falling into the cache trap: as
2763 * cache pages start out inactive, every cache fault will tip
2764 * the scan balance towards the file LRU. And as the file LRU
2765 * shrinks, so does the window for rotation from references.
2766 * This means we have a runaway feedback loop where a tiny
2767 * thrashing file LRU becomes infinitely more attractive than
2768 * anon pages. Try to detect this based on file LRU size.
2769 */
2770 if (!cgroup_reclaim(sc)) {
Johannes Weiner53138ce2019-11-30 17:55:56 -08002771 unsigned long total_high_wmark = 0;
Johannes Weinerb91ac372019-11-30 17:56:02 -08002772 unsigned long free, anon;
2773 int z;
Johannes Weiner53138ce2019-11-30 17:55:56 -08002774
2775 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2776 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2777 node_page_state(pgdat, NR_INACTIVE_FILE);
2778
2779 for (z = 0; z < MAX_NR_ZONES; z++) {
2780 struct zone *zone = &pgdat->node_zones[z];
2781 if (!managed_zone(zone))
2782 continue;
2783
2784 total_high_wmark += high_wmark_pages(zone);
2785 }
2786
Johannes Weinerb91ac372019-11-30 17:56:02 -08002787 /*
2788 * Consider anon: if that's low too, this isn't a
2789 * runaway file reclaim problem, but rather just
2790 * extreme pressure. Reclaim as per usual then.
2791 */
2792 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2793
2794 sc->file_is_tiny =
2795 file + free <= total_high_wmark &&
2796 !(sc->may_deactivate & DEACTIVATE_ANON) &&
2797 anon >> sc->priority;
Johannes Weiner53138ce2019-11-30 17:55:56 -08002798 }
2799
Johannes Weiner0f6a5cf2019-11-30 17:55:49 -08002800 shrink_node_memcgs(pgdat, sc);
Andrey Ryabinind108c772018-04-10 16:27:59 -07002801
Johannes Weinerd2af3392019-11-30 17:55:43 -08002802 if (reclaim_state) {
2803 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2804 reclaim_state->reclaimed_slab = 0;
2805 }
Andrey Ryabinind108c772018-04-10 16:27:59 -07002806
Johannes Weinerd2af3392019-11-30 17:55:43 -08002807 /* Record the subtree's reclaim efficiency */
Johannes Weiner1b051172019-11-30 17:55:52 -08002808 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
Johannes Weinerd2af3392019-11-30 17:55:43 -08002809 sc->nr_scanned - nr_scanned,
2810 sc->nr_reclaimed - nr_reclaimed);
2811
2812 if (sc->nr_reclaimed - nr_reclaimed)
2813 reclaimable = true;
2814
2815 if (current_is_kswapd()) {
2816 /*
2817 * If reclaim is isolating dirty pages under writeback,
2818 * it implies that the long-lived page allocation rate
2819 * is exceeding the page laundering rate. Either the
2820 * global limits are not being effective at throttling
2821 * processes due to the page distribution throughout
2822 * zones or there is heavy usage of a slow backing
2823 * device. The only option is to throttle from reclaim
2824 * context which is not ideal as there is no guarantee
2825 * the dirtying process is throttled in the same way
2826 * balance_dirty_pages() manages.
2827 *
2828 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
2829 * count the number of pages under pages flagged for
2830 * immediate reclaim and stall if any are encountered
2831 * in the nr_immediate check below.
2832 */
2833 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
2834 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
Andrey Ryabinind108c772018-04-10 16:27:59 -07002835
Johannes Weinerd2af3392019-11-30 17:55:43 -08002836 /* Allow kswapd to start writing pages during reclaim.*/
2837 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
2838 set_bit(PGDAT_DIRTY, &pgdat->flags);
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -07002839
2840 /*
Randy Dunlap1eba09c2020-08-11 18:33:26 -07002841 * If kswapd scans pages marked for immediate
Johannes Weinerd2af3392019-11-30 17:55:43 -08002842 * reclaim and under writeback (nr_immediate), it
2843 * implies that pages are cycling through the LRU
2844 * faster than they are written so also forcibly stall.
Andrey Ryabinind108c772018-04-10 16:27:59 -07002845 */
Johannes Weinerd2af3392019-11-30 17:55:43 -08002846 if (sc->nr.immediate)
2847 congestion_wait(BLK_RW_ASYNC, HZ/10);
2848 }
Andrey Ryabinind108c772018-04-10 16:27:59 -07002849
Johannes Weinerd2af3392019-11-30 17:55:43 -08002850 /*
Johannes Weiner1b051172019-11-30 17:55:52 -08002851 * Tag a node/memcg as congested if all the dirty pages
2852 * scanned were backed by a congested BDI and
2853 * wait_iff_congested will stall.
2854 *
Johannes Weinerd2af3392019-11-30 17:55:43 -08002855 * Legacy memcg will stall in page writeback so avoid forcibly
2856 * stalling in wait_iff_congested().
2857 */
Johannes Weiner1b051172019-11-30 17:55:52 -08002858 if ((current_is_kswapd() ||
2859 (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
Johannes Weinerd2af3392019-11-30 17:55:43 -08002860 sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
Johannes Weiner1b051172019-11-30 17:55:52 -08002861 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
Johannes Weinerd2af3392019-11-30 17:55:43 -08002862
2863 /*
2864 * Stall direct reclaim for IO completions if underlying BDIs
2865 * and node is congested. Allow kswapd to continue until it
2866 * starts encountering unqueued dirty pages or cycling through
2867 * the LRU too quickly.
2868 */
Johannes Weiner1b051172019-11-30 17:55:52 -08002869 if (!current_is_kswapd() && current_may_throttle() &&
2870 !sc->hibernation_mode &&
2871 test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
Johannes Weinerd2af3392019-11-30 17:55:43 -08002872 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
2873
2874 if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2875 sc))
2876 goto again;
Johannes Weiner2344d7e2014-08-06 16:06:15 -07002877
Johannes Weinerc73322d2017-05-03 14:51:51 -07002878 /*
2879 * Kswapd gives up on balancing particular nodes after too
2880 * many failures to reclaim anything from them and goes to
2881 * sleep. On reclaim progress, reset the failure counter. A
2882 * successful direct reclaim run will revive a dormant kswapd.
2883 */
2884 if (reclaimable)
2885 pgdat->kswapd_failures = 0;
Johannes Weinerf16015f2012-01-12 17:17:52 -08002886}
2887
Vlastimil Babka53853e22014-10-09 15:27:02 -07002888/*
Vlastimil Babkafdd4c6142016-10-07 16:58:03 -07002889 * Returns true if compaction should go ahead for a costly-order request, or
2890 * the allocation would already succeed without compaction. Return false if we
2891 * should reclaim first.
Vlastimil Babka53853e22014-10-09 15:27:02 -07002892 */
Mel Gorman4f588332016-07-28 15:46:38 -07002893static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002894{
Mel Gorman31483b62016-07-28 15:45:46 -07002895 unsigned long watermark;
Vlastimil Babkafdd4c6142016-10-07 16:58:03 -07002896 enum compact_result suitable;
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002897
Vlastimil Babkafdd4c6142016-10-07 16:58:03 -07002898 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2899 if (suitable == COMPACT_SUCCESS)
2900 /* Allocation should succeed already. Don't reclaim. */
2901 return true;
2902 if (suitable == COMPACT_SKIPPED)
2903 /* Compaction cannot yet proceed. Do reclaim. */
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002904 return false;
2905
Vlastimil Babkafdd4c6142016-10-07 16:58:03 -07002906 /*
2907 * Compaction is already possible, but it takes time to run and there
2908 * are potentially other callers using the pages just freed. So proceed
2909 * with reclaim to make a buffer of free pages available to give
2910 * compaction a reasonable chance of completing and allocating the page.
2911 * Note that we won't actually reclaim the whole buffer in one attempt
2912 * as the target watermark in should_continue_reclaim() is lower. But if
2913 * we are already above the high+gap watermark, don't reclaim at all.
2914 */
2915 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2916
2917 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
Mel Gormanfe4b1b22012-01-12 17:19:45 -08002918}
2919
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920/*
2921 * This is the direct reclaim path, for page-allocating processes. We only
2922 * try to reclaim pages from zones which will satisfy the caller's allocation
2923 * request.
2924 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 * If a zone is deemed to be full of pinned pages then just give it a light
2926 * scan then give up on it.
2927 */
Michal Hocko0a0337e2016-05-20 16:57:00 -07002928static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929{
Mel Gormandd1a2392008-04-28 02:12:17 -07002930 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002931 struct zone *zone;
Andrew Morton0608f432013-09-24 15:27:41 -07002932 unsigned long nr_soft_reclaimed;
2933 unsigned long nr_soft_scanned;
Weijie Yang619d0d762014-04-07 15:36:59 -07002934 gfp_t orig_mask;
Mel Gorman79dafcd2016-07-28 15:45:53 -07002935 pg_data_t *last_pgdat = NULL;
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002936
Mel Gormancc715d92012-03-21 16:34:00 -07002937 /*
2938 * If the number of buffer_heads in the machine exceeds the maximum
2939 * allowed level, force direct reclaim to scan the highmem zone as
2940 * highmem pages could be pinning lowmem pages storing buffer_heads
2941 */
Weijie Yang619d0d762014-04-07 15:36:59 -07002942 orig_mask = sc->gfp_mask;
Mel Gormanb2e18752016-07-28 15:45:37 -07002943 if (buffer_heads_over_limit) {
Mel Gormancc715d92012-03-21 16:34:00 -07002944 sc->gfp_mask |= __GFP_HIGHMEM;
Mel Gorman4f588332016-07-28 15:46:38 -07002945 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
Mel Gormanb2e18752016-07-28 15:45:37 -07002946 }
Mel Gormancc715d92012-03-21 16:34:00 -07002947
Mel Gormand4debc62010-08-09 17:19:29 -07002948 for_each_zone_zonelist_nodemask(zone, z, zonelist,
Mel Gormanb2e18752016-07-28 15:45:37 -07002949 sc->reclaim_idx, sc->nodemask) {
Mel Gormanb2e18752016-07-28 15:45:37 -07002950 /*
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002951 * Take care memory controller reclaiming has small influence
2952 * to global LRU.
2953 */
Johannes Weinerb5ead352019-11-30 17:55:40 -08002954 if (!cgroup_reclaim(sc)) {
Vladimir Davydov344736f2014-10-20 15:50:30 +04002955 if (!cpuset_zone_allowed(zone,
2956 GFP_KERNEL | __GFP_HARDWALL))
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002957 continue;
Vladimir Davydov65ec02c2014-04-03 14:47:20 -07002958
Johannes Weiner0b064962014-08-06 16:06:12 -07002959 /*
2960 * If we already have plenty of memory free for
2961 * compaction in this zone, don't free any more.
2962 * Even though compaction is invoked for any
2963 * non-zero order, only frequent costly order
2964 * reclamation is disruptive enough to become a
2965 * noticeable problem, like transparent huge
2966 * page allocations.
2967 */
2968 if (IS_ENABLED(CONFIG_COMPACTION) &&
2969 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
Mel Gorman4f588332016-07-28 15:46:38 -07002970 compaction_ready(zone, sc)) {
Johannes Weiner0b064962014-08-06 16:06:12 -07002971 sc->compaction_ready = true;
2972 continue;
Rik van Riele0887c12011-10-31 17:09:31 -07002973 }
Johannes Weiner0b064962014-08-06 16:06:12 -07002974
Andrew Morton0608f432013-09-24 15:27:41 -07002975 /*
Mel Gorman79dafcd2016-07-28 15:45:53 -07002976 * Shrink each node in the zonelist once. If the
2977 * zonelist is ordered by zone (not the default) then a
2978 * node may be shrunk multiple times but in that case
2979 * the user prefers lower zones being preserved.
2980 */
2981 if (zone->zone_pgdat == last_pgdat)
2982 continue;
2983
2984 /*
Andrew Morton0608f432013-09-24 15:27:41 -07002985 * This steals pages from memory cgroups over softlimit
2986 * and returns the number of reclaimed pages and
2987 * scanned pages. This works for global memory pressure
2988 * and balancing, not for a memcg's limit.
2989 */
2990 nr_soft_scanned = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002991 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07002992 sc->order, sc->gfp_mask,
2993 &nr_soft_scanned);
2994 sc->nr_reclaimed += nr_soft_reclaimed;
2995 sc->nr_scanned += nr_soft_scanned;
KAMEZAWA Hiroyukiac34a1a2011-06-27 16:18:12 -07002996 /* need some check for avoid more shrink_zone() */
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002997 }
Nick Piggin408d8542006-09-25 23:31:27 -07002998
Mel Gorman79dafcd2016-07-28 15:45:53 -07002999 /* See comment about same check for global reclaim above */
3000 if (zone->zone_pgdat == last_pgdat)
3001 continue;
3002 last_pgdat = zone->zone_pgdat;
Mel Gorman970a39a2016-07-28 15:46:35 -07003003 shrink_node(zone->zone_pgdat, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 }
Mel Gormane0c23272011-10-31 17:09:33 -07003005
Vladimir Davydov65ec02c2014-04-03 14:47:20 -07003006 /*
Weijie Yang619d0d762014-04-07 15:36:59 -07003007 * Restore to original mask to avoid the impact on the caller if we
3008 * promoted it to __GFP_HIGHMEM.
3009 */
3010 sc->gfp_mask = orig_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011}
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003012
Johannes Weinerb9107182019-11-30 17:55:59 -08003013static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003014{
Johannes Weinerb9107182019-11-30 17:55:59 -08003015 struct lruvec *target_lruvec;
3016 unsigned long refaults;
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003017
Johannes Weinerb9107182019-11-30 17:55:59 -08003018 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
Joonsoo Kim170b04b72020-08-11 18:30:43 -07003019 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
3020 target_lruvec->refaults[0] = refaults;
3021 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
3022 target_lruvec->refaults[1] = refaults;
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003023}
3024
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025/*
3026 * This is the main entry point to direct page reclaim.
3027 *
3028 * If a full scan of the inactive list fails to free enough memory then we
3029 * are "out of memory" and something needs to be killed.
3030 *
3031 * If the caller is !__GFP_FS then the probability of a failure is reasonably
3032 * high - the zone may be full of dirty or under-writeback pages, which this
Jens Axboe5b0830c2009-09-23 19:37:09 +02003033 * caller can't do much about. We kick the writeback threads and take explicit
3034 * naps in the hope that some of these pages can be written. But if the
3035 * allocating task holds filesystem locks which prevent writeout this might not
3036 * work, and the allocation attempt will fail.
Nishanth Aravamudana41f24e2008-04-29 00:58:25 -07003037 *
3038 * returns: 0, if no pages reclaimed
3039 * else, the number of pages reclaimed
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 */
Mel Gormandac1d272008-04-28 02:12:12 -07003041static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
Vladimir Davydov3115cd92014-04-03 14:47:22 -07003042 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043{
Johannes Weiner241994ed2015-02-11 15:26:06 -08003044 int initial_priority = sc->priority;
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003045 pg_data_t *last_pgdat;
3046 struct zoneref *z;
3047 struct zone *zone;
Johannes Weiner241994ed2015-02-11 15:26:06 -08003048retry:
Keika Kobayashi873b4772008-07-25 01:48:52 -07003049 delayacct_freepages_start();
3050
Johannes Weinerb5ead352019-11-30 17:55:40 -08003051 if (!cgroup_reclaim(sc))
Mel Gorman7cc30fc2016-07-28 15:46:59 -07003052 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07003054 do {
Anton Vorontsov70ddf632013-04-29 15:08:31 -07003055 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
3056 sc->priority);
Balbir Singh66e17072008-02-07 00:13:56 -08003057 sc->nr_scanned = 0;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003058 shrink_zones(zonelist, sc);
Mel Gormane0c23272011-10-31 17:09:33 -07003059
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07003060 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
Johannes Weiner0b064962014-08-06 16:06:12 -07003061 break;
3062
3063 if (sc->compaction_ready)
3064 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065
3066 /*
Minchan Kim0e50ce32013-02-22 16:35:37 -08003067 * If we're getting trouble reclaiming, start doing
3068 * writepage even in laptop mode.
3069 */
3070 if (sc->priority < DEF_PRIORITY - 2)
3071 sc->may_writepage = 1;
Johannes Weiner0b064962014-08-06 16:06:12 -07003072 } while (--sc->priority >= 0);
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07003073
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003074 last_pgdat = NULL;
3075 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
3076 sc->nodemask) {
3077 if (zone->zone_pgdat == last_pgdat)
3078 continue;
3079 last_pgdat = zone->zone_pgdat;
Johannes Weiner1b051172019-11-30 17:55:52 -08003080
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003081 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
Johannes Weiner1b051172019-11-30 17:55:52 -08003082
3083 if (cgroup_reclaim(sc)) {
3084 struct lruvec *lruvec;
3085
3086 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
3087 zone->zone_pgdat);
3088 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3089 }
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003090 }
3091
Keika Kobayashi873b4772008-07-25 01:48:52 -07003092 delayacct_freepages_end();
3093
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07003094 if (sc->nr_reclaimed)
3095 return sc->nr_reclaimed;
3096
Mel Gorman0cee34f2012-01-12 17:19:49 -08003097 /* Aborted reclaim to try compaction? don't OOM, then */
Johannes Weiner0b064962014-08-06 16:06:12 -07003098 if (sc->compaction_ready)
Mel Gorman73350842012-01-12 17:19:33 -08003099 return 1;
3100
Johannes Weinerb91ac372019-11-30 17:56:02 -08003101 /*
3102 * We make inactive:active ratio decisions based on the node's
3103 * composition of memory, but a restrictive reclaim_idx or a
3104 * memory.low cgroup setting can exempt large amounts of
3105 * memory from reclaim. Neither of which are very common, so
3106 * instead of doing costly eligibility calculations of the
3107 * entire cgroup subtree up front, we assume the estimates are
3108 * good, and retry with forcible deactivation if that fails.
3109 */
3110 if (sc->skipped_deactivate) {
3111 sc->priority = initial_priority;
3112 sc->force_deactivate = 1;
3113 sc->skipped_deactivate = 0;
3114 goto retry;
3115 }
3116
Johannes Weiner241994ed2015-02-11 15:26:06 -08003117 /* Untapped cgroup reserves? Don't OOM, retry. */
Yisheng Xied6622f62017-05-03 14:53:57 -07003118 if (sc->memcg_low_skipped) {
Johannes Weiner241994ed2015-02-11 15:26:06 -08003119 sc->priority = initial_priority;
Johannes Weinerb91ac372019-11-30 17:56:02 -08003120 sc->force_deactivate = 0;
Yisheng Xied6622f62017-05-03 14:53:57 -07003121 sc->memcg_low_reclaim = 1;
3122 sc->memcg_low_skipped = 0;
Johannes Weiner241994ed2015-02-11 15:26:06 -08003123 goto retry;
3124 }
3125
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07003126 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127}
3128
Johannes Weinerc73322d2017-05-03 14:51:51 -07003129static bool allow_direct_reclaim(pg_data_t *pgdat)
Mel Gorman55150612012-07-31 16:44:35 -07003130{
3131 struct zone *zone;
3132 unsigned long pfmemalloc_reserve = 0;
3133 unsigned long free_pages = 0;
3134 int i;
3135 bool wmark_ok;
3136
Johannes Weinerc73322d2017-05-03 14:51:51 -07003137 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3138 return true;
3139
Mel Gorman55150612012-07-31 16:44:35 -07003140 for (i = 0; i <= ZONE_NORMAL; i++) {
3141 zone = &pgdat->node_zones[i];
Johannes Weinerd450abd2017-05-03 14:51:54 -07003142 if (!managed_zone(zone))
3143 continue;
3144
3145 if (!zone_reclaimable_pages(zone))
Mel Gorman675becc2014-06-04 16:07:35 -07003146 continue;
3147
Mel Gorman55150612012-07-31 16:44:35 -07003148 pfmemalloc_reserve += min_wmark_pages(zone);
3149 free_pages += zone_page_state(zone, NR_FREE_PAGES);
3150 }
3151
Mel Gorman675becc2014-06-04 16:07:35 -07003152 /* If there are no reserves (unexpected config) then do not throttle */
3153 if (!pfmemalloc_reserve)
3154 return true;
3155
Mel Gorman55150612012-07-31 16:44:35 -07003156 wmark_ok = free_pages > pfmemalloc_reserve / 2;
3157
3158 /* kswapd must be awake if processes are being throttled */
3159 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003160 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
3161 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
Qian Cai5644e1fb2020-04-01 21:10:12 -07003162
Mel Gorman55150612012-07-31 16:44:35 -07003163 wake_up_interruptible(&pgdat->kswapd_wait);
3164 }
3165
3166 return wmark_ok;
3167}
3168
3169/*
3170 * Throttle direct reclaimers if backing storage is backed by the network
3171 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
3172 * depleted. kswapd will continue to make progress and wake the processes
Mel Gorman50694c22012-11-26 16:29:48 -08003173 * when the low watermark is reached.
3174 *
3175 * Returns true if a fatal signal was delivered during throttling. If this
3176 * happens, the page allocator should not consider triggering the OOM killer.
Mel Gorman55150612012-07-31 16:44:35 -07003177 */
Mel Gorman50694c22012-11-26 16:29:48 -08003178static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
Mel Gorman55150612012-07-31 16:44:35 -07003179 nodemask_t *nodemask)
3180{
Mel Gorman675becc2014-06-04 16:07:35 -07003181 struct zoneref *z;
Mel Gorman55150612012-07-31 16:44:35 -07003182 struct zone *zone;
Mel Gorman675becc2014-06-04 16:07:35 -07003183 pg_data_t *pgdat = NULL;
Mel Gorman55150612012-07-31 16:44:35 -07003184
3185 /*
3186 * Kernel threads should not be throttled as they may be indirectly
3187 * responsible for cleaning pages necessary for reclaim to make forward
3188 * progress. kjournald for example may enter direct reclaim while
3189 * committing a transaction where throttling it could forcing other
3190 * processes to block on log_wait_commit().
3191 */
3192 if (current->flags & PF_KTHREAD)
Mel Gorman50694c22012-11-26 16:29:48 -08003193 goto out;
3194
3195 /*
3196 * If a fatal signal is pending, this process should not throttle.
3197 * It should return quickly so it can exit and free its memory
3198 */
3199 if (fatal_signal_pending(current))
3200 goto out;
Mel Gorman55150612012-07-31 16:44:35 -07003201
Mel Gorman675becc2014-06-04 16:07:35 -07003202 /*
3203 * Check if the pfmemalloc reserves are ok by finding the first node
3204 * with a usable ZONE_NORMAL or lower zone. The expectation is that
3205 * GFP_KERNEL will be required for allocating network buffers when
3206 * swapping over the network so ZONE_HIGHMEM is unusable.
3207 *
3208 * Throttling is based on the first usable node and throttled processes
3209 * wait on a queue until kswapd makes progress and wakes them. There
3210 * is an affinity then between processes waking up and where reclaim
3211 * progress has been made assuming the process wakes on the same node.
3212 * More importantly, processes running on remote nodes will not compete
3213 * for remote pfmemalloc reserves and processes on different nodes
3214 * should make reasonable progress.
3215 */
3216 for_each_zone_zonelist_nodemask(zone, z, zonelist,
Michael S. Tsirkin17636fa2015-01-26 12:58:41 -08003217 gfp_zone(gfp_mask), nodemask) {
Mel Gorman675becc2014-06-04 16:07:35 -07003218 if (zone_idx(zone) > ZONE_NORMAL)
3219 continue;
3220
3221 /* Throttle based on the first usable node */
3222 pgdat = zone->zone_pgdat;
Johannes Weinerc73322d2017-05-03 14:51:51 -07003223 if (allow_direct_reclaim(pgdat))
Mel Gorman675becc2014-06-04 16:07:35 -07003224 goto out;
3225 break;
3226 }
3227
3228 /* If no zone was usable by the allocation flags then do not throttle */
3229 if (!pgdat)
Mel Gorman50694c22012-11-26 16:29:48 -08003230 goto out;
Mel Gorman55150612012-07-31 16:44:35 -07003231
Mel Gorman68243e72012-07-31 16:44:39 -07003232 /* Account for the throttling */
3233 count_vm_event(PGSCAN_DIRECT_THROTTLE);
3234
Mel Gorman55150612012-07-31 16:44:35 -07003235 /*
3236 * If the caller cannot enter the filesystem, it's possible that it
3237 * is due to the caller holding an FS lock or performing a journal
3238 * transaction in the case of a filesystem like ext[3|4]. In this case,
3239 * it is not safe to block on pfmemalloc_wait as kswapd could be
3240 * blocked waiting on the same lock. Instead, throttle for up to a
3241 * second before continuing.
3242 */
3243 if (!(gfp_mask & __GFP_FS)) {
3244 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
Johannes Weinerc73322d2017-05-03 14:51:51 -07003245 allow_direct_reclaim(pgdat), HZ);
Mel Gorman50694c22012-11-26 16:29:48 -08003246
3247 goto check_pending;
Mel Gorman55150612012-07-31 16:44:35 -07003248 }
3249
3250 /* Throttle until kswapd wakes the process */
3251 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
Johannes Weinerc73322d2017-05-03 14:51:51 -07003252 allow_direct_reclaim(pgdat));
Mel Gorman50694c22012-11-26 16:29:48 -08003253
3254check_pending:
3255 if (fatal_signal_pending(current))
3256 return true;
3257
3258out:
3259 return false;
Mel Gorman55150612012-07-31 16:44:35 -07003260}
3261
Mel Gormandac1d272008-04-28 02:12:12 -07003262unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -07003263 gfp_t gfp_mask, nodemask_t *nodemask)
Balbir Singh66e17072008-02-07 00:13:56 -08003264{
Mel Gorman33906bc2010-08-09 17:19:16 -07003265 unsigned long nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08003266 struct scan_control sc = {
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08003267 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Nick Desaulniersf2f43e52017-07-06 15:36:50 -07003268 .gfp_mask = current_gfp_context(gfp_mask),
Mel Gormanb2e18752016-07-28 15:45:37 -07003269 .reclaim_idx = gfp_zone(gfp_mask),
Johannes Weineree814fe2014-08-06 16:06:19 -07003270 .order = order,
3271 .nodemask = nodemask,
3272 .priority = DEF_PRIORITY,
3273 .may_writepage = !laptop_mode,
Johannes Weinera6dc60f82009-03-31 15:19:30 -07003274 .may_unmap = 1,
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07003275 .may_swap = 1,
Balbir Singh66e17072008-02-07 00:13:56 -08003276 };
3277
Mel Gorman55150612012-07-31 16:44:35 -07003278 /*
Greg Thelenbb451fd2018-08-17 15:45:19 -07003279 * scan_control uses s8 fields for order, priority, and reclaim_idx.
3280 * Confirm they are large enough for max values.
3281 */
3282 BUILD_BUG_ON(MAX_ORDER > S8_MAX);
3283 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
3284 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
3285
3286 /*
Mel Gorman50694c22012-11-26 16:29:48 -08003287 * Do not enter reclaim if fatal signal was delivered while throttled.
3288 * 1 is returned so that the page allocator does not OOM kill at this
3289 * point.
Mel Gorman55150612012-07-31 16:44:35 -07003290 */
Nick Desaulniersf2f43e52017-07-06 15:36:50 -07003291 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
Mel Gorman55150612012-07-31 16:44:35 -07003292 return 1;
3293
Andrew Morton1732d2b012019-07-16 16:26:15 -07003294 set_task_reclaim_state(current, &sc.reclaim_state);
Yafang Shao3481c372019-05-13 17:19:14 -07003295 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
Mel Gorman33906bc2010-08-09 17:19:16 -07003296
Vladimir Davydov3115cd92014-04-03 14:47:22 -07003297 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
Mel Gorman33906bc2010-08-09 17:19:16 -07003298
3299 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
Andrew Morton1732d2b012019-07-16 16:26:15 -07003300 set_task_reclaim_state(current, NULL);
Mel Gorman33906bc2010-08-09 17:19:16 -07003301
3302 return nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08003303}
3304
Andrew Mortonc255a452012-07-31 16:43:02 -07003305#ifdef CONFIG_MEMCG
Balbir Singh66e17072008-02-07 00:13:56 -08003306
Michal Hockod2e5fb92019-08-30 16:04:50 -07003307/* Only used by soft limit reclaim. Do not reuse for anything else. */
Mel Gormana9dd0a82016-07-28 15:46:02 -07003308unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
Balbir Singh4e416952009-09-23 15:56:39 -07003309 gfp_t gfp_mask, bool noswap,
Mel Gormanef8f2322016-07-28 15:46:05 -07003310 pg_data_t *pgdat,
Ying Han0ae5e892011-05-26 16:25:25 -07003311 unsigned long *nr_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -07003312{
Johannes Weinerafaf07a2019-11-30 17:55:46 -08003313 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
Balbir Singh4e416952009-09-23 15:56:39 -07003314 struct scan_control sc = {
KOSAKI Motohirob8f5c562010-08-10 18:03:02 -07003315 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Johannes Weineree814fe2014-08-06 16:06:19 -07003316 .target_mem_cgroup = memcg,
Balbir Singh4e416952009-09-23 15:56:39 -07003317 .may_writepage = !laptop_mode,
3318 .may_unmap = 1,
Mel Gormanb2e18752016-07-28 15:45:37 -07003319 .reclaim_idx = MAX_NR_ZONES - 1,
Balbir Singh4e416952009-09-23 15:56:39 -07003320 .may_swap = !noswap,
Balbir Singh4e416952009-09-23 15:56:39 -07003321 };
Ying Han0ae5e892011-05-26 16:25:25 -07003322
Michal Hockod2e5fb92019-08-30 16:04:50 -07003323 WARN_ON_ONCE(!current->reclaim_state);
3324
Balbir Singh4e416952009-09-23 15:56:39 -07003325 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3326 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07003327
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07003328 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
Yafang Shao3481c372019-05-13 17:19:14 -07003329 sc.gfp_mask);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07003330
Balbir Singh4e416952009-09-23 15:56:39 -07003331 /*
3332 * NOTE: Although we can get the priority field, using it
3333 * here is not a good idea, since it limits the pages we can scan.
Mel Gormana9dd0a82016-07-28 15:46:02 -07003334 * if we don't reclaim here, the shrink_node from balance_pgdat
Balbir Singh4e416952009-09-23 15:56:39 -07003335 * will pick up pages from other mem cgroup's as well. We hack
3336 * the priority and make it zero.
3337 */
Johannes Weinerafaf07a2019-11-30 17:55:46 -08003338 shrink_lruvec(lruvec, &sc);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07003339
3340 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3341
Ying Han0ae5e892011-05-26 16:25:25 -07003342 *nr_scanned = sc.nr_scanned;
Yafang Shao0308f7c2019-07-16 16:26:12 -07003343
Balbir Singh4e416952009-09-23 15:56:39 -07003344 return sc.nr_reclaimed;
3345}
3346
Johannes Weiner72835c82012-01-12 17:18:32 -08003347unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003348 unsigned long nr_pages,
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003349 gfp_t gfp_mask,
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003350 bool may_swap)
Balbir Singh66e17072008-02-07 00:13:56 -08003351{
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07003352 unsigned long nr_reclaimed;
Vlastimil Babka499118e2017-05-08 15:59:50 -07003353 unsigned int noreclaim_flag;
Balbir Singh66e17072008-02-07 00:13:56 -08003354 struct scan_control sc = {
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003355 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
Michal Hocko7dea19f2017-05-03 14:53:15 -07003356 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
Johannes Weineree814fe2014-08-06 16:06:19 -07003357 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
Mel Gormanb2e18752016-07-28 15:45:37 -07003358 .reclaim_idx = MAX_NR_ZONES - 1,
Johannes Weineree814fe2014-08-06 16:06:19 -07003359 .target_mem_cgroup = memcg,
3360 .priority = DEF_PRIORITY,
Balbir Singh66e17072008-02-07 00:13:56 -08003361 .may_writepage = !laptop_mode,
Johannes Weinera6dc60f82009-03-31 15:19:30 -07003362 .may_unmap = 1,
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003363 .may_swap = may_swap,
Ying Hana09ed5e2011-05-24 17:12:26 -07003364 };
Shakeel Buttfa40d1e2019-11-30 17:50:16 -08003365 /*
3366 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
3367 * equal pressure on all the nodes. This is based on the assumption that
3368 * the reclaim does not bail out early.
3369 */
3370 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
Balbir Singh66e17072008-02-07 00:13:56 -08003371
Andrew Morton1732d2b012019-07-16 16:26:15 -07003372 set_task_reclaim_state(current, &sc.reclaim_state);
Yafang Shao3481c372019-05-13 17:19:14 -07003373 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
Vlastimil Babka499118e2017-05-08 15:59:50 -07003374 noreclaim_flag = memalloc_noreclaim_save();
Johannes Weinereb414682018-10-26 15:06:27 -07003375
Vladimir Davydov3115cd92014-04-03 14:47:22 -07003376 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
Johannes Weinereb414682018-10-26 15:06:27 -07003377
Vlastimil Babka499118e2017-05-08 15:59:50 -07003378 memalloc_noreclaim_restore(noreclaim_flag);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07003379 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
Andrew Morton1732d2b012019-07-16 16:26:15 -07003380 set_task_reclaim_state(current, NULL);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07003381
3382 return nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08003383}
3384#endif
3385
Mel Gorman1d82de62016-07-28 15:45:43 -07003386static void age_active_anon(struct pglist_data *pgdat,
Mel Gormanef8f2322016-07-28 15:46:05 -07003387 struct scan_control *sc)
Johannes Weinerf16015f2012-01-12 17:17:52 -08003388{
Johannes Weinerb95a2f22012-01-12 17:18:06 -08003389 struct mem_cgroup *memcg;
Johannes Weinerb91ac372019-11-30 17:56:02 -08003390 struct lruvec *lruvec;
Johannes Weinerf16015f2012-01-12 17:17:52 -08003391
Johannes Weinerb95a2f22012-01-12 17:18:06 -08003392 if (!total_swap_pages)
3393 return;
3394
Johannes Weinerb91ac372019-11-30 17:56:02 -08003395 lruvec = mem_cgroup_lruvec(NULL, pgdat);
3396 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
3397 return;
3398
Johannes Weinerb95a2f22012-01-12 17:18:06 -08003399 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3400 do {
Johannes Weinerb91ac372019-11-30 17:56:02 -08003401 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3402 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3403 sc, LRU_ACTIVE_ANON);
Johannes Weinerb95a2f22012-01-12 17:18:06 -08003404 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3405 } while (memcg);
Johannes Weinerf16015f2012-01-12 17:17:52 -08003406}
3407
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003408static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
Mel Gorman1c308442018-12-28 00:35:52 -08003409{
3410 int i;
3411 struct zone *zone;
3412
3413 /*
3414 * Check for watermark boosts top-down as the higher zones
3415 * are more likely to be boosted. Both watermarks and boosts
Randy Dunlap1eba09c2020-08-11 18:33:26 -07003416 * should not be checked at the same time as reclaim would
Mel Gorman1c308442018-12-28 00:35:52 -08003417 * start prematurely when there is no boosting and a lower
3418 * zone is balanced.
3419 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003420 for (i = highest_zoneidx; i >= 0; i--) {
Mel Gorman1c308442018-12-28 00:35:52 -08003421 zone = pgdat->node_zones + i;
3422 if (!managed_zone(zone))
3423 continue;
3424
3425 if (zone->watermark_boost)
3426 return true;
3427 }
3428
3429 return false;
3430}
3431
Mel Gormane716f2e2017-05-03 14:53:45 -07003432/*
3433 * Returns true if there is an eligible zone balanced for the request order
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003434 * and highest_zoneidx
Mel Gormane716f2e2017-05-03 14:53:45 -07003435 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003436static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
Johannes Weiner60cefed2012-11-29 13:54:23 -08003437{
Mel Gormane716f2e2017-05-03 14:53:45 -07003438 int i;
3439 unsigned long mark = -1;
3440 struct zone *zone;
Johannes Weiner60cefed2012-11-29 13:54:23 -08003441
Mel Gorman1c308442018-12-28 00:35:52 -08003442 /*
3443 * Check watermarks bottom-up as lower zones are more likely to
3444 * meet watermarks.
3445 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003446 for (i = 0; i <= highest_zoneidx; i++) {
Mel Gormane716f2e2017-05-03 14:53:45 -07003447 zone = pgdat->node_zones + i;
Mel Gorman6256c6b2016-07-28 15:45:56 -07003448
Mel Gormane716f2e2017-05-03 14:53:45 -07003449 if (!managed_zone(zone))
3450 continue;
3451
3452 mark = high_wmark_pages(zone);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003453 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
Mel Gormane716f2e2017-05-03 14:53:45 -07003454 return true;
3455 }
3456
3457 /*
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003458 * If a node has no populated zone within highest_zoneidx, it does not
Mel Gormane716f2e2017-05-03 14:53:45 -07003459 * need balancing by definition. This can happen if a zone-restricted
3460 * allocation tries to wake a remote kswapd.
3461 */
3462 if (mark == -1)
3463 return true;
3464
3465 return false;
Johannes Weiner60cefed2012-11-29 13:54:23 -08003466}
3467
Mel Gorman631b6e02017-05-03 14:53:41 -07003468/* Clear pgdat state for congested, dirty or under writeback. */
3469static void clear_pgdat_congested(pg_data_t *pgdat)
3470{
Johannes Weiner1b051172019-11-30 17:55:52 -08003471 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
3472
3473 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
Mel Gorman631b6e02017-05-03 14:53:41 -07003474 clear_bit(PGDAT_DIRTY, &pgdat->flags);
3475 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3476}
3477
Mel Gorman1741c872011-01-13 15:46:21 -08003478/*
Mel Gorman55150612012-07-31 16:44:35 -07003479 * Prepare kswapd for sleeping. This verifies that there are no processes
3480 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3481 *
3482 * Returns true if kswapd is ready to sleep
3483 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003484static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
3485 int highest_zoneidx)
Mel Gormanf50de2d2009-12-14 17:58:53 -08003486{
Mel Gorman55150612012-07-31 16:44:35 -07003487 /*
Vlastimil Babka9e5e3662015-01-08 14:32:40 -08003488 * The throttled processes are normally woken up in balance_pgdat() as
Johannes Weinerc73322d2017-05-03 14:51:51 -07003489 * soon as allow_direct_reclaim() is true. But there is a potential
Vlastimil Babka9e5e3662015-01-08 14:32:40 -08003490 * race between when kswapd checks the watermarks and a process gets
3491 * throttled. There is also a potential race if processes get
3492 * throttled, kswapd wakes, a large process exits thereby balancing the
3493 * zones, which causes kswapd to exit balance_pgdat() before reaching
3494 * the wake up checks. If kswapd is going to sleep, no process should
3495 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3496 * the wake up is premature, processes will wake kswapd and get
3497 * throttled again. The difference from wake ups in balance_pgdat() is
3498 * that here we are under prepare_to_wait().
Mel Gorman55150612012-07-31 16:44:35 -07003499 */
Vlastimil Babka9e5e3662015-01-08 14:32:40 -08003500 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3501 wake_up_all(&pgdat->pfmemalloc_wait);
Mel Gormanf50de2d2009-12-14 17:58:53 -08003502
Johannes Weinerc73322d2017-05-03 14:51:51 -07003503 /* Hopeless node, leave it to direct reclaim */
3504 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3505 return true;
3506
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003507 if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
Mel Gormane716f2e2017-05-03 14:53:45 -07003508 clear_pgdat_congested(pgdat);
3509 return true;
Mel Gorman1d82de62016-07-28 15:45:43 -07003510 }
3511
Shantanu Goel333b0a42017-05-03 14:53:38 -07003512 return false;
Mel Gormanf50de2d2009-12-14 17:58:53 -08003513}
3514
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515/*
Mel Gorman1d82de62016-07-28 15:45:43 -07003516 * kswapd shrinks a node of pages that are at or below the highest usable
3517 * zone that is currently unbalanced.
Mel Gormanb8e83b92013-07-03 15:01:45 -07003518 *
3519 * Returns true if kswapd scanned at least the requested number of pages to
Mel Gorman283aba92013-07-03 15:01:51 -07003520 * reclaim or if the lack of progress was due to pages under writeback.
3521 * This is used to determine if the scanning priority needs to be raised.
Mel Gorman75485362013-07-03 15:01:42 -07003522 */
Mel Gorman1d82de62016-07-28 15:45:43 -07003523static bool kswapd_shrink_node(pg_data_t *pgdat,
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07003524 struct scan_control *sc)
Mel Gorman75485362013-07-03 15:01:42 -07003525{
Mel Gorman1d82de62016-07-28 15:45:43 -07003526 struct zone *zone;
3527 int z;
Mel Gorman75485362013-07-03 15:01:42 -07003528
Mel Gorman1d82de62016-07-28 15:45:43 -07003529 /* Reclaim a number of pages proportional to the number of zones */
3530 sc->nr_to_reclaim = 0;
Mel Gorman970a39a2016-07-28 15:46:35 -07003531 for (z = 0; z <= sc->reclaim_idx; z++) {
Mel Gorman1d82de62016-07-28 15:45:43 -07003532 zone = pgdat->node_zones + z;
Mel Gorman6aa303d2016-09-01 16:14:55 -07003533 if (!managed_zone(zone))
Mel Gorman1d82de62016-07-28 15:45:43 -07003534 continue;
Mel Gorman7c954f62013-07-03 15:01:54 -07003535
Mel Gorman1d82de62016-07-28 15:45:43 -07003536 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
Mel Gorman7c954f62013-07-03 15:01:54 -07003537 }
3538
Mel Gorman1d82de62016-07-28 15:45:43 -07003539 /*
3540 * Historically care was taken to put equal pressure on all zones but
3541 * now pressure is applied based on node LRU order.
3542 */
Mel Gorman970a39a2016-07-28 15:46:35 -07003543 shrink_node(pgdat, sc);
Mel Gorman1d82de62016-07-28 15:45:43 -07003544
3545 /*
3546 * Fragmentation may mean that the system cannot be rebalanced for
3547 * high-order allocations. If twice the allocation size has been
3548 * reclaimed then recheck watermarks only at order-0 to prevent
3549 * excessive reclaim. Assume that a process requested a high-order
3550 * can direct reclaim/compact.
3551 */
Vlastimil Babka9861a622016-10-07 16:57:53 -07003552 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
Mel Gorman1d82de62016-07-28 15:45:43 -07003553 sc->order = 0;
3554
Mel Gormanb8e83b92013-07-03 15:01:45 -07003555 return sc->nr_scanned >= sc->nr_to_reclaim;
Mel Gorman75485362013-07-03 15:01:42 -07003556}
3557
3558/*
Mel Gorman1d82de62016-07-28 15:45:43 -07003559 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3560 * that are eligible for use by the caller until at least one zone is
3561 * balanced.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 *
Mel Gorman1d82de62016-07-28 15:45:43 -07003563 * Returns the order kswapd finished reclaiming at.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 *
3565 * kswapd scans the zones in the highmem->normal->dma direction. It skips
Mel Gorman41858962009-06-16 15:32:12 -07003566 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
Wei Yang8bb4e7a2019-03-05 15:46:22 -08003567 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
Mel Gorman1d82de62016-07-28 15:45:43 -07003568 * or lower is eligible for reclaim until at least one usable zone is
3569 * balanced.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003571static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 int i;
Andrew Morton0608f432013-09-24 15:27:41 -07003574 unsigned long nr_soft_reclaimed;
3575 unsigned long nr_soft_scanned;
Johannes Weinereb414682018-10-26 15:06:27 -07003576 unsigned long pflags;
Mel Gorman1c308442018-12-28 00:35:52 -08003577 unsigned long nr_boost_reclaim;
3578 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
3579 bool boosted;
Mel Gorman1d82de62016-07-28 15:45:43 -07003580 struct zone *zone;
Andrew Morton179e9632006-03-22 00:08:18 -08003581 struct scan_control sc = {
3582 .gfp_mask = GFP_KERNEL,
Johannes Weineree814fe2014-08-06 16:06:19 -07003583 .order = order,
Johannes Weinera6dc60f82009-03-31 15:19:30 -07003584 .may_unmap = 1,
Andrew Morton179e9632006-03-22 00:08:18 -08003585 };
Omar Sandoval93781322018-06-07 17:07:02 -07003586
Andrew Morton1732d2b012019-07-16 16:26:15 -07003587 set_task_reclaim_state(current, &sc.reclaim_state);
Johannes Weinereb414682018-10-26 15:06:27 -07003588 psi_memstall_enter(&pflags);
Omar Sandoval93781322018-06-07 17:07:02 -07003589 __fs_reclaim_acquire();
3590
Christoph Lameterf8891e52006-06-30 01:55:45 -07003591 count_vm_event(PAGEOUTRUN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
Mel Gorman1c308442018-12-28 00:35:52 -08003593 /*
3594 * Account for the reclaim boost. Note that the zone boost is left in
3595 * place so that parallel allocations that are near the watermark will
3596 * stall or direct reclaim until kswapd is finished.
3597 */
3598 nr_boost_reclaim = 0;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003599 for (i = 0; i <= highest_zoneidx; i++) {
Mel Gorman1c308442018-12-28 00:35:52 -08003600 zone = pgdat->node_zones + i;
3601 if (!managed_zone(zone))
3602 continue;
3603
3604 nr_boost_reclaim += zone->watermark_boost;
3605 zone_boosts[i] = zone->watermark_boost;
3606 }
3607 boosted = nr_boost_reclaim;
3608
3609restart:
3610 sc.priority = DEF_PRIORITY;
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07003611 do {
Johannes Weinerc73322d2017-05-03 14:51:51 -07003612 unsigned long nr_reclaimed = sc.nr_reclaimed;
Mel Gormanb8e83b92013-07-03 15:01:45 -07003613 bool raise_priority = true;
Mel Gorman1c308442018-12-28 00:35:52 -08003614 bool balanced;
Omar Sandoval93781322018-06-07 17:07:02 -07003615 bool ret;
Mel Gormanb8e83b92013-07-03 15:01:45 -07003616
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003617 sc.reclaim_idx = highest_zoneidx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618
Mel Gorman86c79f62016-07-28 15:45:59 -07003619 /*
Mel Gorman84c7a772016-07-28 15:46:44 -07003620 * If the number of buffer_heads exceeds the maximum allowed
3621 * then consider reclaiming from all zones. This has a dual
3622 * purpose -- on 64-bit systems it is expected that
3623 * buffer_heads are stripped during active rotation. On 32-bit
3624 * systems, highmem pages can pin lowmem memory and shrinking
3625 * buffers can relieve lowmem pressure. Reclaim may still not
3626 * go ahead if all eligible zones for the original allocation
3627 * request are balanced to avoid excessive reclaim from kswapd.
Mel Gorman86c79f62016-07-28 15:45:59 -07003628 */
3629 if (buffer_heads_over_limit) {
3630 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3631 zone = pgdat->node_zones + i;
Mel Gorman6aa303d2016-09-01 16:14:55 -07003632 if (!managed_zone(zone))
Mel Gorman86c79f62016-07-28 15:45:59 -07003633 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
Mel Gorman970a39a2016-07-28 15:46:35 -07003635 sc.reclaim_idx = i;
Andrew Mortone1dbeda2006-12-06 20:32:01 -08003636 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638 }
Zlatko Calusicdafcb732013-02-22 16:32:34 -08003639
Mel Gorman86c79f62016-07-28 15:45:59 -07003640 /*
Mel Gorman1c308442018-12-28 00:35:52 -08003641 * If the pgdat is imbalanced then ignore boosting and preserve
3642 * the watermarks for a later time and restart. Note that the
3643 * zone watermarks will be still reset at the end of balancing
3644 * on the grounds that the normal reclaim should be enough to
3645 * re-evaluate if boosting is required when kswapd next wakes.
Mel Gorman86c79f62016-07-28 15:45:59 -07003646 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003647 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
Mel Gorman1c308442018-12-28 00:35:52 -08003648 if (!balanced && nr_boost_reclaim) {
3649 nr_boost_reclaim = 0;
3650 goto restart;
3651 }
3652
3653 /*
3654 * If boosting is not active then only reclaim if there are no
3655 * eligible zones. Note that sc.reclaim_idx is not used as
3656 * buffer_heads_over_limit may have adjusted it.
3657 */
3658 if (!nr_boost_reclaim && balanced)
Mel Gormane716f2e2017-05-03 14:53:45 -07003659 goto out;
Andrew Mortone1dbeda2006-12-06 20:32:01 -08003660
Mel Gorman1c308442018-12-28 00:35:52 -08003661 /* Limit the priority of boosting to avoid reclaim writeback */
3662 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
3663 raise_priority = false;
3664
3665 /*
3666 * Do not writeback or swap pages for boosted reclaim. The
3667 * intent is to relieve pressure not issue sub-optimal IO
3668 * from reclaim context. If no pages are reclaimed, the
3669 * reclaim will be aborted.
3670 */
3671 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3672 sc.may_swap = !nr_boost_reclaim;
Mel Gorman1c308442018-12-28 00:35:52 -08003673
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 /*
Mel Gorman1d82de62016-07-28 15:45:43 -07003675 * Do some background aging of the anon list, to give
3676 * pages a chance to be referenced before reclaiming. All
3677 * pages are rotated regardless of classzone as this is
3678 * about consistent aging.
3679 */
Mel Gormanef8f2322016-07-28 15:46:05 -07003680 age_active_anon(pgdat, &sc);
Mel Gorman1d82de62016-07-28 15:45:43 -07003681
3682 /*
Mel Gormanb7ea3c42013-07-03 15:01:53 -07003683 * If we're getting trouble reclaiming, start doing writepage
3684 * even in laptop mode.
3685 */
Johannes Weiner047d72c2017-05-03 14:51:57 -07003686 if (sc.priority < DEF_PRIORITY - 2)
Mel Gormanb7ea3c42013-07-03 15:01:53 -07003687 sc.may_writepage = 1;
3688
Mel Gorman1d82de62016-07-28 15:45:43 -07003689 /* Call soft limit reclaim before calling shrink_node. */
3690 sc.nr_scanned = 0;
3691 nr_soft_scanned = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07003692 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
Mel Gorman1d82de62016-07-28 15:45:43 -07003693 sc.gfp_mask, &nr_soft_scanned);
3694 sc.nr_reclaimed += nr_soft_reclaimed;
3695
Mel Gormanb7ea3c42013-07-03 15:01:53 -07003696 /*
Mel Gorman1d82de62016-07-28 15:45:43 -07003697 * There should be no need to raise the scanning priority if
3698 * enough pages are already being scanned that that high
3699 * watermark would be met at 100% efficiency.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 */
Mel Gorman970a39a2016-07-28 15:46:35 -07003701 if (kswapd_shrink_node(pgdat, &sc))
Mel Gorman1d82de62016-07-28 15:45:43 -07003702 raise_priority = false;
Mel Gorman55150612012-07-31 16:44:35 -07003703
3704 /*
3705 * If the low watermark is met there is no need for processes
3706 * to be throttled on pfmemalloc_wait as they should not be
3707 * able to safely make forward progress. Wake them
3708 */
3709 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
Johannes Weinerc73322d2017-05-03 14:51:51 -07003710 allow_direct_reclaim(pgdat))
Vlastimil Babkacfc51152015-02-11 15:25:12 -08003711 wake_up_all(&pgdat->pfmemalloc_wait);
Mel Gorman55150612012-07-31 16:44:35 -07003712
Mel Gormanb8e83b92013-07-03 15:01:45 -07003713 /* Check if kswapd should be suspending */
Omar Sandoval93781322018-06-07 17:07:02 -07003714 __fs_reclaim_release();
3715 ret = try_to_freeze();
3716 __fs_reclaim_acquire();
3717 if (ret || kthread_should_stop())
Mel Gormanb8e83b92013-07-03 15:01:45 -07003718 break;
3719
3720 /*
3721 * Raise priority if scanning rate is too low or there was no
3722 * progress in reclaiming pages
3723 */
Johannes Weinerc73322d2017-05-03 14:51:51 -07003724 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
Mel Gorman1c308442018-12-28 00:35:52 -08003725 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
3726
3727 /*
3728 * If reclaim made no progress for a boost, stop reclaim as
3729 * IO cannot be queued and it could be an infinite loop in
3730 * extreme circumstances.
3731 */
3732 if (nr_boost_reclaim && !nr_reclaimed)
3733 break;
3734
Johannes Weinerc73322d2017-05-03 14:51:51 -07003735 if (raise_priority || !nr_reclaimed)
Mel Gormanb8e83b92013-07-03 15:01:45 -07003736 sc.priority--;
Mel Gorman1d82de62016-07-28 15:45:43 -07003737 } while (sc.priority >= 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738
Johannes Weinerc73322d2017-05-03 14:51:51 -07003739 if (!sc.nr_reclaimed)
3740 pgdat->kswapd_failures++;
3741
Mel Gormanb8e83b92013-07-03 15:01:45 -07003742out:
Mel Gorman1c308442018-12-28 00:35:52 -08003743 /* If reclaim was boosted, account for the reclaim done in this pass */
3744 if (boosted) {
3745 unsigned long flags;
3746
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003747 for (i = 0; i <= highest_zoneidx; i++) {
Mel Gorman1c308442018-12-28 00:35:52 -08003748 if (!zone_boosts[i])
3749 continue;
3750
3751 /* Increments are under the zone lock */
3752 zone = pgdat->node_zones + i;
3753 spin_lock_irqsave(&zone->lock, flags);
3754 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
3755 spin_unlock_irqrestore(&zone->lock, flags);
3756 }
3757
3758 /*
3759 * As there is now likely space, wakeup kcompact to defragment
3760 * pageblocks.
3761 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003762 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
Mel Gorman1c308442018-12-28 00:35:52 -08003763 }
3764
Johannes Weiner2a2e4882017-05-03 14:55:03 -07003765 snapshot_refaults(NULL, pgdat);
Omar Sandoval93781322018-06-07 17:07:02 -07003766 __fs_reclaim_release();
Johannes Weinereb414682018-10-26 15:06:27 -07003767 psi_memstall_leave(&pflags);
Andrew Morton1732d2b012019-07-16 16:26:15 -07003768 set_task_reclaim_state(current, NULL);
Yafang Shaoe5ca8072019-07-16 16:26:09 -07003769
Mel Gorman0abdee22011-01-13 15:46:22 -08003770 /*
Mel Gorman1d82de62016-07-28 15:45:43 -07003771 * Return the order kswapd stopped reclaiming at as
3772 * prepare_kswapd_sleep() takes it into account. If another caller
3773 * entered the allocator slow path while kswapd was awake, order will
3774 * remain at the higher level.
Mel Gorman0abdee22011-01-13 15:46:22 -08003775 */
Mel Gorman1d82de62016-07-28 15:45:43 -07003776 return sc.order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777}
3778
Mel Gormane716f2e2017-05-03 14:53:45 -07003779/*
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003780 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
3781 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
3782 * not a valid index then either kswapd runs for first time or kswapd couldn't
3783 * sleep after previous reclaim attempt (node is still unbalanced). In that
3784 * case return the zone index of the previous kswapd reclaim cycle.
Mel Gormane716f2e2017-05-03 14:53:45 -07003785 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003786static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
3787 enum zone_type prev_highest_zoneidx)
Mel Gormane716f2e2017-05-03 14:53:45 -07003788{
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003789 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
Qian Cai5644e1fb2020-04-01 21:10:12 -07003790
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003791 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
Mel Gormane716f2e2017-05-03 14:53:45 -07003792}
3793
Mel Gorman38087d92016-07-28 15:45:49 -07003794static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003795 unsigned int highest_zoneidx)
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08003796{
3797 long remaining = 0;
3798 DEFINE_WAIT(wait);
3799
3800 if (freezing(current) || kthread_should_stop())
3801 return;
3802
3803 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3804
Shantanu Goel333b0a42017-05-03 14:53:38 -07003805 /*
3806 * Try to sleep for a short interval. Note that kcompactd will only be
3807 * woken if it is possible to sleep for a short interval. This is
3808 * deliberate on the assumption that if reclaim cannot keep an
3809 * eligible zone balanced that it's also unlikely that compaction will
3810 * succeed.
3811 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003812 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
Vlastimil Babkafd901c92016-04-28 16:18:49 -07003813 /*
3814 * Compaction records what page blocks it recently failed to
3815 * isolate pages from and skips them in the future scanning.
3816 * When kswapd is going to sleep, it is reasonable to assume
3817 * that pages and compaction may succeed so reset the cache.
3818 */
3819 reset_isolation_suitable(pgdat);
3820
3821 /*
3822 * We have freed the memory, now we should compact it to make
3823 * allocation of the requested order possible.
3824 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003825 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
Vlastimil Babkafd901c92016-04-28 16:18:49 -07003826
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08003827 remaining = schedule_timeout(HZ/10);
Mel Gorman38087d92016-07-28 15:45:49 -07003828
3829 /*
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003830 * If woken prematurely then reset kswapd_highest_zoneidx and
Mel Gorman38087d92016-07-28 15:45:49 -07003831 * order. The values will either be from a wakeup request or
3832 * the previous request that slept prematurely.
3833 */
3834 if (remaining) {
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003835 WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
3836 kswapd_highest_zoneidx(pgdat,
3837 highest_zoneidx));
Qian Cai5644e1fb2020-04-01 21:10:12 -07003838
3839 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
3840 WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
Mel Gorman38087d92016-07-28 15:45:49 -07003841 }
3842
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08003843 finish_wait(&pgdat->kswapd_wait, &wait);
3844 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3845 }
3846
3847 /*
3848 * After a short sleep, check if it was a premature sleep. If not, then
3849 * go fully to sleep until explicitly woken up.
3850 */
Mel Gormand9f21d42016-07-28 15:46:41 -07003851 if (!remaining &&
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003852 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08003853 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3854
3855 /*
3856 * vmstat counters are not perfectly accurate and the estimated
3857 * value for counters such as NR_FREE_PAGES can deviate from the
3858 * true value by nr_online_cpus * threshold. To avoid the zone
3859 * watermarks being breached while under pressure, we reduce the
3860 * per-cpu vmstat threshold while kswapd is awake and restore
3861 * them before going back to sleep.
3862 */
3863 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
Aaditya Kumar1c7e7f62012-07-17 15:48:07 -07003864
3865 if (!kthread_should_stop())
3866 schedule();
3867
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08003868 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3869 } else {
3870 if (remaining)
3871 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3872 else
3873 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3874 }
3875 finish_wait(&pgdat->kswapd_wait, &wait);
3876}
3877
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878/*
3879 * The background pageout daemon, started as a kernel thread
Rik van Riel4f98a2f2008-10-18 20:26:32 -07003880 * from the init process.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 *
3882 * This basically trickles out pages so that we have _some_
3883 * free memory available even if there is no other activity
3884 * that frees anything up. This is needed for things like routing
3885 * etc, where we otherwise might have all activity going on in
3886 * asynchronous contexts that cannot page things out.
3887 *
3888 * If there are applications that are active memory-allocators
3889 * (most normal use), this basically shouldn't matter.
3890 */
3891static int kswapd(void *p)
3892{
Mel Gormane716f2e2017-05-03 14:53:45 -07003893 unsigned int alloc_order, reclaim_order;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003894 unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 pg_data_t *pgdat = (pg_data_t*)p;
3896 struct task_struct *tsk = current;
Rusty Russella70f7302009-03-13 14:49:46 +10303897 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898
Rusty Russell174596a2009-01-01 10:12:29 +10303899 if (!cpumask_empty(cpumask))
Mike Travisc5f59f02008-04-04 18:11:10 -07003900 set_cpus_allowed_ptr(tsk, cpumask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901
3902 /*
3903 * Tell the memory management that we're a "memory allocator",
3904 * and that if we need more memory we should get access to it
3905 * regardless (see "__alloc_pages()"). "kswapd" should
3906 * never get caught in the normal page freeing logic.
3907 *
3908 * (Kswapd normally doesn't need memory anyway, but sometimes
3909 * you need a small amount of memory in order to be able to
3910 * page out something else, and this flag essentially protects
3911 * us from recursively trying to free more memory as we're
3912 * trying to free the first piece of memory in the first place).
3913 */
Christoph Lameter930d9152006-01-08 01:00:47 -08003914 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
Rafael J. Wysocki83144182007-07-17 04:03:35 -07003915 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916
Qian Cai5644e1fb2020-04-01 21:10:12 -07003917 WRITE_ONCE(pgdat->kswapd_order, 0);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003918 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919 for ( ; ; ) {
Jeff Liu6f6313d2012-12-11 16:02:48 -08003920 bool ret;
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07003921
Qian Cai5644e1fb2020-04-01 21:10:12 -07003922 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003923 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
3924 highest_zoneidx);
Mel Gormane716f2e2017-05-03 14:53:45 -07003925
Mel Gorman38087d92016-07-28 15:45:49 -07003926kswapd_try_sleep:
3927 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003928 highest_zoneidx);
Mel Gorman215ddd62011-07-08 15:39:40 -07003929
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003930 /* Read the new order and highest_zoneidx */
Qian Cai5644e1fb2020-04-01 21:10:12 -07003931 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003932 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
3933 highest_zoneidx);
Qian Cai5644e1fb2020-04-01 21:10:12 -07003934 WRITE_ONCE(pgdat->kswapd_order, 0);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003935 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936
David Rientjes8fe23e02009-12-14 17:58:33 -08003937 ret = try_to_freeze();
3938 if (kthread_should_stop())
3939 break;
3940
3941 /*
3942 * We can speed up thawing tasks if we don't call balance_pgdat
3943 * after returning from the refrigerator
3944 */
Mel Gorman38087d92016-07-28 15:45:49 -07003945 if (ret)
3946 continue;
Mel Gorman1d82de62016-07-28 15:45:43 -07003947
Mel Gorman38087d92016-07-28 15:45:49 -07003948 /*
3949 * Reclaim begins at the requested order but if a high-order
3950 * reclaim fails then kswapd falls back to reclaiming for
3951 * order-0. If that happens, kswapd will consider sleeping
3952 * for the order it finished reclaiming at (reclaim_order)
3953 * but kcompactd is woken to compact for the original
3954 * request (alloc_order).
3955 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003956 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
Mel Gormane5146b12016-07-28 15:46:47 -07003957 alloc_order);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07003958 reclaim_order = balance_pgdat(pgdat, alloc_order,
3959 highest_zoneidx);
Mel Gorman38087d92016-07-28 15:45:49 -07003960 if (reclaim_order < alloc_order)
3961 goto kswapd_try_sleep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 }
Takamori Yamaguchib0a8cc52012-11-08 15:53:39 -08003963
Johannes Weiner71abdc12014-06-06 14:35:35 -07003964 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
Johannes Weiner71abdc12014-06-06 14:35:35 -07003965
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 return 0;
3967}
3968
Charan Teja Reddy0d61a652021-02-05 17:47:57 +05303969static int kswapd_per_node_run(int nid)
3970{
3971 pg_data_t *pgdat = NODE_DATA(nid);
3972 int hid;
3973 int ret = 0;
3974
3975 for (hid = 0; hid < kswapd_threads; ++hid) {
3976 pgdat->mkswapd[hid] = kthread_run(kswapd, pgdat, "kswapd%d:%d",
3977 nid, hid);
3978 if (IS_ERR(pgdat->mkswapd[hid])) {
3979 /* failure at boot is fatal */
3980 WARN_ON(system_state < SYSTEM_RUNNING);
3981 pr_err("Failed to start kswapd%d on node %d\n",
3982 hid, nid);
3983 ret = PTR_ERR(pgdat->mkswapd[hid]);
3984 pgdat->mkswapd[hid] = NULL;
3985 continue;
3986 }
3987 if (!pgdat->kswapd)
3988 pgdat->kswapd = pgdat->mkswapd[hid];
3989 }
3990
3991 return ret;
3992}
3993
3994static void kswapd_per_node_stop(int nid)
3995{
3996 int hid = 0;
3997 struct task_struct *kswapd;
3998
3999 for (hid = 0; hid < kswapd_threads; hid++) {
4000 kswapd = NODE_DATA(nid)->mkswapd[hid];
4001 if (kswapd) {
4002 kthread_stop(kswapd);
4003 NODE_DATA(nid)->mkswapd[hid] = NULL;
4004 }
4005 }
4006 NODE_DATA(nid)->kswapd = NULL;
4007}
4008
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009/*
David Rientjes5ecd9d42018-04-05 16:25:16 -07004010 * A zone is low on free memory or too fragmented for high-order memory. If
4011 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
4012 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
4013 * has failed or is not needed, still wake up kcompactd if only compaction is
4014 * needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 */
David Rientjes5ecd9d42018-04-05 16:25:16 -07004016void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004017 enum zone_type highest_zoneidx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018{
4019 pg_data_t *pgdat;
Qian Cai5644e1fb2020-04-01 21:10:12 -07004020 enum zone_type curr_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021
Mel Gorman6aa303d2016-09-01 16:14:55 -07004022 if (!managed_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 return;
4024
David Rientjes5ecd9d42018-04-05 16:25:16 -07004025 if (!cpuset_zone_allowed(zone, gfp_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026 return;
Shakeel Buttdffcac2c2019-07-04 15:14:42 -07004027
Qian Cai5644e1fb2020-04-01 21:10:12 -07004028 pgdat = zone->zone_pgdat;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004029 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
Qian Cai5644e1fb2020-04-01 21:10:12 -07004030
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004031 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
4032 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
Qian Cai5644e1fb2020-04-01 21:10:12 -07004033
4034 if (READ_ONCE(pgdat->kswapd_order) < order)
4035 WRITE_ONCE(pgdat->kswapd_order, order);
4036
Con Kolivas8d0986e2005-09-13 01:25:07 -07004037 if (!waitqueue_active(&pgdat->kswapd_wait))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 return;
Mel Gormane1a55632016-07-28 15:46:26 -07004039
David Rientjes5ecd9d42018-04-05 16:25:16 -07004040 /* Hopeless node, leave it to direct reclaim if possible */
4041 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004042 (pgdat_balanced(pgdat, order, highest_zoneidx) &&
4043 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
David Rientjes5ecd9d42018-04-05 16:25:16 -07004044 /*
4045 * There may be plenty of free memory available, but it's too
4046 * fragmented for high-order allocations. Wake up kcompactd
4047 * and rely on compaction_suitable() to determine if it's
4048 * needed. If it fails, it will defer subsequent attempts to
4049 * ratelimit its work.
4050 */
4051 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004052 wakeup_kcompactd(pgdat, order, highest_zoneidx);
Johannes Weinerc73322d2017-05-03 14:51:51 -07004053 return;
David Rientjes5ecd9d42018-04-05 16:25:16 -07004054 }
Johannes Weinerc73322d2017-05-03 14:51:51 -07004055
Joonsoo Kim97a225e2020-06-03 15:59:01 -07004056 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
David Rientjes5ecd9d42018-04-05 16:25:16 -07004057 gfp_flags);
Con Kolivas8d0986e2005-09-13 01:25:07 -07004058 wake_up_interruptible(&pgdat->kswapd_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059}
4060
Rafael J. Wysockic6f37f12009-05-24 22:16:31 +02004061#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062/*
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08004063 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07004064 * freed pages.
4065 *
4066 * Rather than trying to age LRUs the aim is to preserve the overall
4067 * LRU order by reclaiming preferentially
4068 * inactive > active > active referenced > active mapped
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 */
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08004070unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071{
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07004072 struct scan_control sc = {
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08004073 .nr_to_reclaim = nr_to_reclaim,
Johannes Weineree814fe2014-08-06 16:06:19 -07004074 .gfp_mask = GFP_HIGHUSER_MOVABLE,
Mel Gormanb2e18752016-07-28 15:45:37 -07004075 .reclaim_idx = MAX_NR_ZONES - 1,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07004076 .priority = DEF_PRIORITY,
Johannes Weineree814fe2014-08-06 16:06:19 -07004077 .may_writepage = 1,
4078 .may_unmap = 1,
4079 .may_swap = 1,
4080 .hibernation_mode = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 };
Ying Hana09ed5e2011-05-24 17:12:26 -07004082 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08004083 unsigned long nr_reclaimed;
Vlastimil Babka499118e2017-05-08 15:59:50 -07004084 unsigned int noreclaim_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004086 fs_reclaim_acquire(sc.gfp_mask);
Omar Sandoval93781322018-06-07 17:07:02 -07004087 noreclaim_flag = memalloc_noreclaim_save();
Andrew Morton1732d2b012019-07-16 16:26:15 -07004088 set_task_reclaim_state(current, &sc.reclaim_state);
Andrew Morton69e05942006-03-22 00:08:19 -08004089
Vladimir Davydov3115cd92014-04-03 14:47:22 -07004090 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07004091
Andrew Morton1732d2b012019-07-16 16:26:15 -07004092 set_task_reclaim_state(current, NULL);
Vlastimil Babka499118e2017-05-08 15:59:50 -07004093 memalloc_noreclaim_restore(noreclaim_flag);
Omar Sandoval93781322018-06-07 17:07:02 -07004094 fs_reclaim_release(sc.gfp_mask);
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07004095
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08004096 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097}
Rafael J. Wysockic6f37f12009-05-24 22:16:31 +02004098#endif /* CONFIG_HIBERNATION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099
Yasunori Goto3218ae12006-06-27 02:53:33 -07004100/*
4101 * This kswapd start function will be called by init and node-hot-add.
4102 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
4103 */
4104int kswapd_run(int nid)
4105{
4106 pg_data_t *pgdat = NODE_DATA(nid);
4107 int ret = 0;
4108
4109 if (pgdat->kswapd)
4110 return 0;
4111
Charan Teja Reddy0d61a652021-02-05 17:47:57 +05304112 if (kswapd_threads > 1)
4113 return kswapd_per_node_run(nid);
4114
Yasunori Goto3218ae12006-06-27 02:53:33 -07004115 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
4116 if (IS_ERR(pgdat->kswapd)) {
4117 /* failure at boot is fatal */
Thomas Gleixnerc6202ad2017-05-16 20:42:46 +02004118 BUG_ON(system_state < SYSTEM_RUNNING);
Gavin Shand5dc0ad2012-10-08 16:29:27 -07004119 pr_err("Failed to start kswapd on node %d\n", nid);
4120 ret = PTR_ERR(pgdat->kswapd);
Xishi Qiud72515b2013-04-17 15:58:34 -07004121 pgdat->kswapd = NULL;
Yasunori Goto3218ae12006-06-27 02:53:33 -07004122 }
4123 return ret;
4124}
4125
David Rientjes8fe23e02009-12-14 17:58:33 -08004126/*
Jiang Liud8adde12012-07-11 14:01:52 -07004127 * Called by memory hotplug when all memory in a node is offlined. Caller must
Vladimir Davydovbfc8c902014-06-04 16:07:18 -07004128 * hold mem_hotplug_begin/end().
David Rientjes8fe23e02009-12-14 17:58:33 -08004129 */
4130void kswapd_stop(int nid)
4131{
4132 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
4133
Charan Teja Reddy0d61a652021-02-05 17:47:57 +05304134 if (kswapd_threads > 1) {
4135 kswapd_per_node_stop(nid);
4136 return;
4137 }
4138
Jiang Liud8adde12012-07-11 14:01:52 -07004139 if (kswapd) {
David Rientjes8fe23e02009-12-14 17:58:33 -08004140 kthread_stop(kswapd);
Jiang Liud8adde12012-07-11 14:01:52 -07004141 NODE_DATA(nid)->kswapd = NULL;
4142 }
David Rientjes8fe23e02009-12-14 17:58:33 -08004143}
4144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145static int __init kswapd_init(void)
4146{
Wei Yang6b700b52020-04-01 21:10:09 -07004147 int nid;
Andrew Morton69e05942006-03-22 00:08:19 -08004148
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 swap_setup();
Lai Jiangshan48fb2e22012-12-12 13:51:43 -08004150 for_each_node_state(nid, N_MEMORY)
Yasunori Goto3218ae12006-06-27 02:53:33 -07004151 kswapd_run(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 return 0;
4153}
4154
4155module_init(kswapd_init)
Christoph Lameter9eeff232006-01-18 17:42:31 -08004156
4157#ifdef CONFIG_NUMA
4158/*
Mel Gormana5f5f912016-07-28 15:46:32 -07004159 * Node reclaim mode
Christoph Lameter9eeff232006-01-18 17:42:31 -08004160 *
Mel Gormana5f5f912016-07-28 15:46:32 -07004161 * If non-zero call node_reclaim when the number of free pages falls below
Christoph Lameter9eeff232006-01-18 17:42:31 -08004162 * the watermarks.
Christoph Lameter9eeff232006-01-18 17:42:31 -08004163 */
Mel Gormana5f5f912016-07-28 15:46:32 -07004164int node_reclaim_mode __read_mostly;
Christoph Lameter9eeff232006-01-18 17:42:31 -08004165
Dave Hansen54683f82021-02-24 12:09:15 -08004166/*
4167 * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
4168 * ABI. New bits are OK, but existing bits can never change.
4169 */
4170#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
4171#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
4172#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08004173
Christoph Lameter9eeff232006-01-18 17:42:31 -08004174/*
Mel Gormana5f5f912016-07-28 15:46:32 -07004175 * Priority for NODE_RECLAIM. This determines the fraction of pages
Christoph Lametera92f7122006-02-01 03:05:32 -08004176 * of a node considered for each zone_reclaim. 4 scans 1/16th of
4177 * a zone.
4178 */
Mel Gormana5f5f912016-07-28 15:46:32 -07004179#define NODE_RECLAIM_PRIORITY 4
Christoph Lametera92f7122006-02-01 03:05:32 -08004180
Christoph Lameter9eeff232006-01-18 17:42:31 -08004181/*
Mel Gormana5f5f912016-07-28 15:46:32 -07004182 * Percentage of pages in a zone that must be unmapped for node_reclaim to
Christoph Lameter96146342006-07-03 00:24:13 -07004183 * occur.
4184 */
4185int sysctl_min_unmapped_ratio = 1;
4186
4187/*
Christoph Lameter0ff38492006-09-25 23:31:52 -07004188 * If the number of slab pages in a zone grows beyond this percentage then
4189 * slab reclaim needs to occur.
4190 */
4191int sysctl_min_slab_ratio = 5;
4192
Mel Gorman11fb9982016-07-28 15:46:20 -07004193static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
Mel Gorman90afa5d2009-06-16 15:33:20 -07004194{
Mel Gorman11fb9982016-07-28 15:46:20 -07004195 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
4196 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
4197 node_page_state(pgdat, NR_ACTIVE_FILE);
Mel Gorman90afa5d2009-06-16 15:33:20 -07004198
4199 /*
4200 * It's possible for there to be more file mapped pages than
4201 * accounted for by the pages on the file LRU lists because
4202 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
4203 */
4204 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
4205}
4206
4207/* Work out how many page cache pages we can reclaim in this reclaim_mode */
Mel Gormana5f5f912016-07-28 15:46:32 -07004208static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
Mel Gorman90afa5d2009-06-16 15:33:20 -07004209{
Alexandru Moised031a152015-11-05 18:48:08 -08004210 unsigned long nr_pagecache_reclaimable;
4211 unsigned long delta = 0;
Mel Gorman90afa5d2009-06-16 15:33:20 -07004212
4213 /*
Zhihui Zhang95bbc0c2015-06-24 16:56:42 -07004214 * If RECLAIM_UNMAP is set, then all file pages are considered
Mel Gorman90afa5d2009-06-16 15:33:20 -07004215 * potentially reclaimable. Otherwise, we have to worry about
Mel Gorman11fb9982016-07-28 15:46:20 -07004216 * pages like swapcache and node_unmapped_file_pages() provides
Mel Gorman90afa5d2009-06-16 15:33:20 -07004217 * a better estimate
4218 */
Mel Gormana5f5f912016-07-28 15:46:32 -07004219 if (node_reclaim_mode & RECLAIM_UNMAP)
4220 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
Mel Gorman90afa5d2009-06-16 15:33:20 -07004221 else
Mel Gormana5f5f912016-07-28 15:46:32 -07004222 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
Mel Gorman90afa5d2009-06-16 15:33:20 -07004223
4224 /* If we can't clean pages, remove dirty pages from consideration */
Mel Gormana5f5f912016-07-28 15:46:32 -07004225 if (!(node_reclaim_mode & RECLAIM_WRITE))
4226 delta += node_page_state(pgdat, NR_FILE_DIRTY);
Mel Gorman90afa5d2009-06-16 15:33:20 -07004227
4228 /* Watch for any possible underflows due to delta */
4229 if (unlikely(delta > nr_pagecache_reclaimable))
4230 delta = nr_pagecache_reclaimable;
4231
4232 return nr_pagecache_reclaimable - delta;
4233}
4234
Christoph Lameter0ff38492006-09-25 23:31:52 -07004235/*
Mel Gormana5f5f912016-07-28 15:46:32 -07004236 * Try to free up some pages from this node through reclaim.
Christoph Lameter9eeff232006-01-18 17:42:31 -08004237 */
Mel Gormana5f5f912016-07-28 15:46:32 -07004238static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
Christoph Lameter9eeff232006-01-18 17:42:31 -08004239{
Christoph Lameter7fb2d462006-03-22 00:08:22 -08004240 /* Minimum pages needed in order to stay on node */
Andrew Morton69e05942006-03-22 00:08:19 -08004241 const unsigned long nr_pages = 1 << order;
Christoph Lameter9eeff232006-01-18 17:42:31 -08004242 struct task_struct *p = current;
Vlastimil Babka499118e2017-05-08 15:59:50 -07004243 unsigned int noreclaim_flag;
Andrew Morton179e9632006-03-22 00:08:18 -08004244 struct scan_control sc = {
Andrew Morton62b726c2013-02-22 16:32:24 -08004245 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
Nick Desaulniersf2f43e52017-07-06 15:36:50 -07004246 .gfp_mask = current_gfp_context(gfp_mask),
Johannes Weinerbd2f6192009-03-31 15:19:38 -07004247 .order = order,
Mel Gormana5f5f912016-07-28 15:46:32 -07004248 .priority = NODE_RECLAIM_PRIORITY,
4249 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
4250 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
Johannes Weineree814fe2014-08-06 16:06:19 -07004251 .may_swap = 1,
Nick Desaulniersf2f43e52017-07-06 15:36:50 -07004252 .reclaim_idx = gfp_zone(gfp_mask),
Andrew Morton179e9632006-03-22 00:08:18 -08004253 };
Christoph Lameter9eeff232006-01-18 17:42:31 -08004254
Yafang Shao132bb8c2019-05-13 17:17:53 -07004255 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
4256 sc.gfp_mask);
4257
Christoph Lameter9eeff232006-01-18 17:42:31 -08004258 cond_resched();
Omar Sandoval93781322018-06-07 17:07:02 -07004259 fs_reclaim_acquire(sc.gfp_mask);
Christoph Lameterd4f77962006-02-24 13:04:22 -08004260 /*
Zhihui Zhang95bbc0c2015-06-24 16:56:42 -07004261 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
Christoph Lameterd4f77962006-02-24 13:04:22 -08004262 * and we also need to be able to write out pages for RECLAIM_WRITE
Zhihui Zhang95bbc0c2015-06-24 16:56:42 -07004263 * and RECLAIM_UNMAP.
Christoph Lameterd4f77962006-02-24 13:04:22 -08004264 */
Vlastimil Babka499118e2017-05-08 15:59:50 -07004265 noreclaim_flag = memalloc_noreclaim_save();
4266 p->flags |= PF_SWAPWRITE;
Andrew Morton1732d2b012019-07-16 16:26:15 -07004267 set_task_reclaim_state(p, &sc.reclaim_state);
Christoph Lameterc84db23c2006-02-01 03:05:29 -08004268
Mel Gormana5f5f912016-07-28 15:46:32 -07004269 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
Christoph Lameter0ff38492006-09-25 23:31:52 -07004270 /*
Andrey Ryabinin894befe2018-04-10 16:27:51 -07004271 * Free memory by calling shrink node with increasing
Christoph Lameter0ff38492006-09-25 23:31:52 -07004272 * priorities until we have enough memory freed.
4273 */
Christoph Lameter0ff38492006-09-25 23:31:52 -07004274 do {
Mel Gorman970a39a2016-07-28 15:46:35 -07004275 shrink_node(pgdat, &sc);
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07004276 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
Christoph Lameter0ff38492006-09-25 23:31:52 -07004277 }
Christoph Lameterc84db23c2006-02-01 03:05:29 -08004278
Andrew Morton1732d2b012019-07-16 16:26:15 -07004279 set_task_reclaim_state(p, NULL);
Vlastimil Babka499118e2017-05-08 15:59:50 -07004280 current->flags &= ~PF_SWAPWRITE;
4281 memalloc_noreclaim_restore(noreclaim_flag);
Omar Sandoval93781322018-06-07 17:07:02 -07004282 fs_reclaim_release(sc.gfp_mask);
Yafang Shao132bb8c2019-05-13 17:17:53 -07004283
4284 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
4285
Rik van Riela79311c2009-01-06 14:40:01 -08004286 return sc.nr_reclaimed >= nr_pages;
Christoph Lameter9eeff232006-01-18 17:42:31 -08004287}
Andrew Morton179e9632006-03-22 00:08:18 -08004288
Mel Gormana5f5f912016-07-28 15:46:32 -07004289int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
Andrew Morton179e9632006-03-22 00:08:18 -08004290{
David Rientjesd773ed62007-10-16 23:26:01 -07004291 int ret;
Andrew Morton179e9632006-03-22 00:08:18 -08004292
4293 /*
Mel Gormana5f5f912016-07-28 15:46:32 -07004294 * Node reclaim reclaims unmapped file backed pages and
Christoph Lameter0ff38492006-09-25 23:31:52 -07004295 * slab pages if we are over the defined limits.
Christoph Lameter34aa1332006-06-30 01:55:37 -07004296 *
Christoph Lameter96146342006-07-03 00:24:13 -07004297 * A small portion of unmapped file backed pages is needed for
4298 * file I/O otherwise pages read by file I/O will be immediately
Mel Gormana5f5f912016-07-28 15:46:32 -07004299 * thrown out if the node is overallocated. So we do not reclaim
4300 * if less than a specified percentage of the node is used by
Christoph Lameter96146342006-07-03 00:24:13 -07004301 * unmapped file backed pages.
Andrew Morton179e9632006-03-22 00:08:18 -08004302 */
Mel Gormana5f5f912016-07-28 15:46:32 -07004303 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
Roman Gushchind42f3242020-08-06 23:20:39 -07004304 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
4305 pgdat->min_slab_pages)
Mel Gormana5f5f912016-07-28 15:46:32 -07004306 return NODE_RECLAIM_FULL;
Andrew Morton179e9632006-03-22 00:08:18 -08004307
4308 /*
David Rientjesd773ed62007-10-16 23:26:01 -07004309 * Do not scan if the allocation should not be delayed.
Andrew Morton179e9632006-03-22 00:08:18 -08004310 */
Mel Gormand0164ad2015-11-06 16:28:21 -08004311 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
Mel Gormana5f5f912016-07-28 15:46:32 -07004312 return NODE_RECLAIM_NOSCAN;
Andrew Morton179e9632006-03-22 00:08:18 -08004313
4314 /*
Mel Gormana5f5f912016-07-28 15:46:32 -07004315 * Only run node reclaim on the local node or on nodes that do not
Andrew Morton179e9632006-03-22 00:08:18 -08004316 * have associated processors. This will favor the local processor
4317 * over remote processors and spread off node memory allocations
4318 * as wide as possible.
4319 */
Mel Gormana5f5f912016-07-28 15:46:32 -07004320 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
4321 return NODE_RECLAIM_NOSCAN;
David Rientjesd773ed62007-10-16 23:26:01 -07004322
Mel Gormana5f5f912016-07-28 15:46:32 -07004323 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
4324 return NODE_RECLAIM_NOSCAN;
Mel Gormanfa5e0842009-06-16 15:33:22 -07004325
Mel Gormana5f5f912016-07-28 15:46:32 -07004326 ret = __node_reclaim(pgdat, gfp_mask, order);
4327 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
David Rientjesd773ed62007-10-16 23:26:01 -07004328
Mel Gorman24cf725182009-06-16 15:33:23 -07004329 if (!ret)
4330 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
4331
David Rientjesd773ed62007-10-16 23:26:01 -07004332 return ret;
Andrew Morton179e9632006-03-22 00:08:18 -08004333}
Christoph Lameter9eeff232006-01-18 17:42:31 -08004334#endif
Lee Schermerhorn894bc312008-10-18 20:26:39 -07004335
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004336/**
Kuo-Hsin Yang64e3d122018-11-06 13:23:24 +00004337 * check_move_unevictable_pages - check pages for evictability and move to
4338 * appropriate zone lru list
4339 * @pvec: pagevec with lru pages to check
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004340 *
Kuo-Hsin Yang64e3d122018-11-06 13:23:24 +00004341 * Checks pages for evictability, if an evictable page is in the unevictable
4342 * lru list, moves it to the appropriate evictable lru list. This function
4343 * should be only used for lru pages.
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004344 */
Kuo-Hsin Yang64e3d122018-11-06 13:23:24 +00004345void check_move_unevictable_pages(struct pagevec *pvec)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004346{
Johannes Weiner925b7672012-01-12 17:18:15 -08004347 struct lruvec *lruvec;
Mel Gorman785b99f2016-07-28 15:47:23 -07004348 struct pglist_data *pgdat = NULL;
Hugh Dickins24513262012-01-20 14:34:21 -08004349 int pgscanned = 0;
4350 int pgrescued = 0;
4351 int i;
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004352
Kuo-Hsin Yang64e3d122018-11-06 13:23:24 +00004353 for (i = 0; i < pvec->nr; i++) {
4354 struct page *page = pvec->pages[i];
Mel Gorman785b99f2016-07-28 15:47:23 -07004355 struct pglist_data *pagepgdat = page_pgdat(page);
Hugh Dickins8d8869c2020-09-18 21:20:12 -07004356 int nr_pages;
Lee Schermerhornaf936a12008-10-18 20:26:53 -07004357
Hugh Dickins8d8869c2020-09-18 21:20:12 -07004358 if (PageTransTail(page))
4359 continue;
4360
4361 nr_pages = thp_nr_pages(page);
4362 pgscanned += nr_pages;
4363
Mel Gorman785b99f2016-07-28 15:47:23 -07004364 if (pagepgdat != pgdat) {
4365 if (pgdat)
4366 spin_unlock_irq(&pgdat->lru_lock);
4367 pgdat = pagepgdat;
4368 spin_lock_irq(&pgdat->lru_lock);
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004369 }
Mel Gorman785b99f2016-07-28 15:47:23 -07004370 lruvec = mem_cgroup_page_lruvec(page, pgdat);
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004371
Hugh Dickins24513262012-01-20 14:34:21 -08004372 if (!PageLRU(page) || !PageUnevictable(page))
4373 continue;
4374
Hugh Dickins39b5f292012-10-08 16:33:18 -07004375 if (page_evictable(page)) {
Hugh Dickins24513262012-01-20 14:34:21 -08004376 enum lru_list lru = page_lru_base_type(page);
4377
Sasha Levin309381fea2014-01-23 15:52:54 -08004378 VM_BUG_ON_PAGE(PageActive(page), page);
Hugh Dickins24513262012-01-20 14:34:21 -08004379 ClearPageUnevictable(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07004380 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4381 add_page_to_lru_list(page, lruvec, lru);
Hugh Dickins8d8869c2020-09-18 21:20:12 -07004382 pgrescued += nr_pages;
Hugh Dickins24513262012-01-20 14:34:21 -08004383 }
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07004384 }
Hugh Dickins24513262012-01-20 14:34:21 -08004385
Mel Gorman785b99f2016-07-28 15:47:23 -07004386 if (pgdat) {
Hugh Dickins24513262012-01-20 14:34:21 -08004387 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4388 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
Mel Gorman785b99f2016-07-28 15:47:23 -07004389 spin_unlock_irq(&pgdat->lru_lock);
Hugh Dickins24513262012-01-20 14:34:21 -08004390 }
Hugh Dickins850465792012-01-20 14:34:19 -08004391}
Kuo-Hsin Yang64e3d122018-11-06 13:23:24 +00004392EXPORT_SYMBOL_GPL(check_move_unevictable_pages);