blob: 58abb6d96f43601a419c63d0f0946d01733699a1 [file] [log] [blame]
Thomas Gleixner46aeb7e2019-05-28 10:10:27 -07001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07006 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020071#include <linux/pagewalk.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010076#include <linux/sched/mm.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010077#include <linux/sched/numa_balancing.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010078#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/nodemask.h>
80#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <linux/slab.h>
82#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040083#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070084#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
Otto Ebeling31367462017-11-15 17:38:14 -080088#include <linux/ptrace.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080089#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080090#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080092#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080093#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070094#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070095#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070096#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070097#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080098#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020099#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700100#include <linux/printk.h>
Naoya Horiguchic8633792017-09-08 16:11:08 -0700101#include <linux/swapops.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800104#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Nick Piggin62695a82008-10-18 20:26:09 -0700106#include "internal.h"
107
Christoph Lameter38e35862006-01-08 01:01:01 -0800108/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800111
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800117enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700119/*
120 * run-time system-wide default policy => local allocation
121 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700122static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700124 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700125 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126};
127
Mel Gorman5606e382012-11-02 18:19:13 +0000128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
Dan Williamsb2ca9162020-02-16 12:00:48 -0800130/**
131 * numa_map_to_online_node - Find closest online node
Krzysztof Kozlowskif6e92f42020-08-11 18:31:13 -0700132 * @node: Node id to start the search
Dan Williamsb2ca9162020-02-16 12:00:48 -0800133 *
134 * Lookup the next closest node by distance if @nid is not online.
135 */
136int numa_map_to_online_node(int node)
137{
Dan Williams4fcbe962020-02-16 12:00:53 -0800138 int min_dist = INT_MAX, dist, n, min_node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800139
Dan Williams4fcbe962020-02-16 12:00:53 -0800140 if (node == NUMA_NO_NODE || node_online(node))
141 return node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800142
143 min_node = node;
Dan Williams4fcbe962020-02-16 12:00:53 -0800144 for_each_online_node(n) {
145 dist = node_distance(node, n);
146 if (dist < min_dist) {
147 min_dist = dist;
148 min_node = n;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800149 }
150 }
151
152 return min_node;
153}
154EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700156struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000157{
158 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700159 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000160
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700161 if (pol)
162 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000163
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700164 node = numa_node_id();
165 if (node != NUMA_NO_NODE) {
166 pol = &preferred_node_policy[node];
167 /* preferred_node_policy is not initialised early in boot */
168 if (pol->mode)
169 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000170 }
171
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700172 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000173}
174
David Rientjes37012942008-04-28 02:12:33 -0700175static const struct mempolicy_operations {
176 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700177 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
David Rientjes37012942008-04-28 02:12:33 -0700178} mpol_ops[MPOL_MAX];
179
David Rientjesf5b087b2008-04-28 02:12:27 -0700180static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181{
Bob Liu6d556292010-05-24 14:31:59 -0700182 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700183}
184
185static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186 const nodemask_t *rel)
187{
188 nodemask_t tmp;
189 nodes_fold(tmp, *orig, nodes_weight(*rel));
190 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700191}
192
David Rientjes37012942008-04-28 02:12:33 -0700193static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194{
195 if (nodes_empty(*nodes))
196 return -EINVAL;
197 pol->v.nodes = *nodes;
198 return 0;
199}
200
201static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202{
203 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700204 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700205 else if (nodes_empty(*nodes))
206 return -EINVAL; /* no allowed nodes */
207 else
208 pol->v.preferred_node = first_node(*nodes);
209 return 0;
210}
211
212static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213{
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800214 if (nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700215 return -EINVAL;
216 pol->v.nodes = *nodes;
217 return 0;
218}
219
Miao Xie58568d22009-06-16 15:31:49 -0700220/*
221 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222 * any, for the new policy. mpol_new() has already validated the nodes
223 * parameter with respect to the policy mode and flags. But, we need to
224 * handle an empty nodemask with MPOL_PREFERRED here.
225 *
226 * Must be called holding task's alloc_lock to protect task's mems_allowed
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700227 * and mempolicy. May also be called holding the mmap_lock for write.
Miao Xie58568d22009-06-16 15:31:49 -0700228 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700229static int mpol_set_nodemask(struct mempolicy *pol,
230 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700231{
Miao Xie58568d22009-06-16 15:31:49 -0700232 int ret;
233
234 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235 if (pol == NULL)
236 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800237 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700238 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700240
241 VM_BUG_ON(!nodes);
242 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243 nodes = NULL; /* explicit local allocation */
244 else {
245 if (pol->flags & MPOL_F_RELATIVE_NODES)
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800246 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700247 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700248 nodes_and(nsc->mask2, *nodes, nsc->mask1);
249
Miao Xie58568d22009-06-16 15:31:49 -0700250 if (mpol_store_user_nodemask(pol))
251 pol->w.user_nodemask = *nodes;
252 else
253 pol->w.cpuset_mems_allowed =
254 cpuset_current_mems_allowed;
255 }
256
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700257 if (nodes)
258 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259 else
260 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700261 return ret;
262}
263
264/*
265 * This function just creates a new policy, does some check and simple
266 * initialization. You must invoke mpol_set_nodemask() to set nodes.
267 */
David Rientjes028fec42008-04-28 02:12:25 -0700268static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
271 struct mempolicy *policy;
272
David Rientjes028fec42008-04-28 02:12:25 -0700273 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800274 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700275
David Rientjes3e1f06452008-04-28 02:12:34 -0700276 if (mode == MPOL_DEFAULT) {
277 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700278 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200279 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700280 }
David Rientjes3e1f06452008-04-28 02:12:34 -0700281 VM_BUG_ON(!nodes);
282
283 /*
284 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286 * All other modes require a valid pointer to a non-empty nodemask.
287 */
288 if (mode == MPOL_PREFERRED) {
289 if (nodes_empty(*nodes)) {
290 if (((flags & MPOL_F_STATIC_NODES) ||
291 (flags & MPOL_F_RELATIVE_NODES)))
292 return ERR_PTR(-EINVAL);
David Rientjes3e1f06452008-04-28 02:12:34 -0700293 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200294 } else if (mode == MPOL_LOCAL) {
Piotr Kwapulinski8d303e42016-12-12 16:42:49 -0800295 if (!nodes_empty(*nodes) ||
296 (flags & MPOL_F_STATIC_NODES) ||
297 (flags & MPOL_F_RELATIVE_NODES))
Peter Zijlstra479e2802012-10-25 14:16:28 +0200298 return ERR_PTR(-EINVAL);
299 mode = MPOL_PREFERRED;
David Rientjes3e1f06452008-04-28 02:12:34 -0700300 } else if (nodes_empty(*nodes))
301 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303 if (!policy)
304 return ERR_PTR(-ENOMEM);
305 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700306 policy->mode = mode;
David Rientjes3e1f06452008-04-28 02:12:34 -0700307 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700308
David Rientjes37012942008-04-28 02:12:33 -0700309 return policy;
310}
311
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700312/* Slow path of a mpol destructor. */
313void __mpol_put(struct mempolicy *p)
314{
315 if (!atomic_dec_and_test(&p->refcnt))
316 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700317 kmem_cache_free(policy_cache, p);
318}
319
Vlastimil Babka213980c2017-07-06 15:40:06 -0700320static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700321{
322}
323
Vlastimil Babka213980c2017-07-06 15:40:06 -0700324static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700325{
326 nodemask_t tmp;
327
328 if (pol->flags & MPOL_F_STATIC_NODES)
329 nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 else if (pol->flags & MPOL_F_RELATIVE_NODES)
331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 else {
Vlastimil Babka213980c2017-07-06 15:40:06 -0700333 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334 *nodes);
zhong jiang29b190f2019-06-28 12:06:43 -0700335 pol->w.cpuset_mems_allowed = *nodes;
David Rientjes37012942008-04-28 02:12:33 -0700336 }
337
Miao Xie708c1bb2010-05-24 14:32:07 -0700338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
Vlastimil Babka213980c2017-07-06 15:40:06 -0700341 pol->v.nodes = tmp;
David Rientjes37012942008-04-28 02:12:33 -0700342}
343
344static void mpol_rebind_preferred(struct mempolicy *pol,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700345 const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700346{
347 nodemask_t tmp;
348
David Rientjes37012942008-04-28 02:12:33 -0700349 if (pol->flags & MPOL_F_STATIC_NODES) {
350 int node = first_node(pol->w.user_nodemask);
351
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700352 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700353 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700354 pol->flags &= ~MPOL_F_LOCAL;
355 } else
356 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700357 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700360 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700361 pol->v.preferred_node = node_remap(pol->v.preferred_node,
362 pol->w.cpuset_mems_allowed,
363 *nodes);
364 pol->w.cpuset_mems_allowed = *nodes;
365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366}
367
Miao Xie708c1bb2010-05-24 14:32:07 -0700368/*
369 * mpol_rebind_policy - Migrate a policy to a different set of nodes
370 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700371 * Per-vma policies are protected by mmap_lock. Allocations using per-task
Vlastimil Babka213980c2017-07-06 15:40:06 -0700372 * policies are protected by task->mems_allowed_seq to prevent a premature
373 * OOM/allocation failure due to parallel nodemask modification.
Miao Xie708c1bb2010-05-24 14:32:07 -0700374 */
Vlastimil Babka213980c2017-07-06 15:40:06 -0700375static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
David Rientjes1d0d2682008-04-28 02:12:32 -0700376{
David Rientjes1d0d2682008-04-28 02:12:32 -0700377 if (!pol)
378 return;
Vlastimil Babka2e256442019-03-05 15:46:50 -0800379 if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700380 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700382
Vlastimil Babka213980c2017-07-06 15:40:06 -0700383 mpol_ops[pol->mode].rebind(pol, newmask);
David Rientjes1d0d2682008-04-28 02:12:32 -0700384}
385
386/*
387 * Wrapper for mpol_rebind_policy() that just requires task
388 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700389 *
390 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700391 */
392
Vlastimil Babka213980c2017-07-06 15:40:06 -0700393void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
David Rientjes1d0d2682008-04-28 02:12:32 -0700394{
Vlastimil Babka213980c2017-07-06 15:40:06 -0700395 mpol_rebind_policy(tsk->mempolicy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700396}
397
398/*
399 * Rebind each vma in mm to new nodemask.
400 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700401 * Call holding a reference to mm. Takes mm->mmap_lock during call.
David Rientjes1d0d2682008-04-28 02:12:32 -0700402 */
403
404void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405{
406 struct vm_area_struct *vma;
407
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700408 mmap_write_lock(mm);
Laurent Dufour9cfe1682018-04-17 16:33:15 +0200409 for (vma = mm->mmap; vma; vma = vma->vm_next) {
410 vm_write_begin(vma);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700411 mpol_rebind_policy(vma->vm_policy, new);
Laurent Dufour9cfe1682018-04-17 16:33:15 +0200412 vm_write_end(vma);
413 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700414 mmap_write_unlock(mm);
David Rientjes1d0d2682008-04-28 02:12:32 -0700415}
416
David Rientjes37012942008-04-28 02:12:33 -0700417static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
418 [MPOL_DEFAULT] = {
419 .rebind = mpol_rebind_default,
420 },
421 [MPOL_INTERLEAVE] = {
422 .create = mpol_new_interleave,
423 .rebind = mpol_rebind_nodemask,
424 },
425 [MPOL_PREFERRED] = {
426 .create = mpol_new_preferred,
427 .rebind = mpol_rebind_preferred,
428 },
429 [MPOL_BIND] = {
430 .create = mpol_new_bind,
431 .rebind = mpol_rebind_nodemask,
432 },
433};
434
Yang Shia53190a2019-08-13 15:37:18 -0700435static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -0800436 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800437
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800438struct queue_pages {
439 struct list_head *pagelist;
440 unsigned long flags;
441 nodemask_t *nmask;
Li Xinhaif18da662019-11-30 17:56:18 -0800442 unsigned long start;
443 unsigned long end;
444 struct vm_area_struct *first;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800445};
446
Naoya Horiguchi98094942013-09-11 14:22:14 -0700447/*
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700448 * Check if the page's nid is in qp->nmask.
449 *
450 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
451 * in the invert of qp->nmask.
452 */
453static inline bool queue_pages_required(struct page *page,
454 struct queue_pages *qp)
455{
456 int nid = page_to_nid(page);
457 unsigned long flags = qp->flags;
458
459 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
460}
461
Yang Shia7f40cf2019-03-28 20:43:55 -0700462/*
Yang Shid8835442019-08-13 15:37:15 -0700463 * queue_pages_pmd() has four possible return values:
464 * 0 - pages are placed on the right node or queued successfully.
465 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
466 * specified.
467 * 2 - THP was split.
468 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
469 * existing page was already on a node that does not follow the
470 * policy.
Yang Shia7f40cf2019-03-28 20:43:55 -0700471 */
Naoya Horiguchic8633792017-09-08 16:11:08 -0700472static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
473 unsigned long end, struct mm_walk *walk)
Jules Irenge959a7e12020-04-06 20:08:12 -0700474 __releases(ptl)
Naoya Horiguchic8633792017-09-08 16:11:08 -0700475{
476 int ret = 0;
477 struct page *page;
478 struct queue_pages *qp = walk->private;
479 unsigned long flags;
480
481 if (unlikely(is_pmd_migration_entry(*pmd))) {
Yang Shia7f40cf2019-03-28 20:43:55 -0700482 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700483 goto unlock;
484 }
485 page = pmd_page(*pmd);
486 if (is_huge_zero_page(page)) {
487 spin_unlock(ptl);
488 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
Yang Shid8835442019-08-13 15:37:15 -0700489 ret = 2;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700490 goto out;
491 }
Yang Shid8835442019-08-13 15:37:15 -0700492 if (!queue_pages_required(page, qp))
Naoya Horiguchic8633792017-09-08 16:11:08 -0700493 goto unlock;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700494
Naoya Horiguchic8633792017-09-08 16:11:08 -0700495 flags = qp->flags;
496 /* go to thp migration */
Yang Shia7f40cf2019-03-28 20:43:55 -0700497 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shia53190a2019-08-13 15:37:18 -0700498 if (!vma_migratable(walk->vma) ||
499 migrate_page_add(page, qp->pagelist, flags)) {
Yang Shid8835442019-08-13 15:37:15 -0700500 ret = 1;
Yang Shia7f40cf2019-03-28 20:43:55 -0700501 goto unlock;
502 }
Yang Shia7f40cf2019-03-28 20:43:55 -0700503 } else
504 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700505unlock:
506 spin_unlock(ptl);
507out:
508 return ret;
509}
510
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700511/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700512 * Scan through pages checking if pages follow certain conditions,
513 * and move them to the pagelist if they do.
Yang Shid8835442019-08-13 15:37:15 -0700514 *
515 * queue_pages_pte_range() has three possible return values:
516 * 0 - pages are placed on the right node or queued successfully.
517 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
518 * specified.
519 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
520 * on a node that does not follow the policy.
Naoya Horiguchi98094942013-09-11 14:22:14 -0700521 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800522static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
523 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800525 struct vm_area_struct *vma = walk->vma;
526 struct page *page;
527 struct queue_pages *qp = walk->private;
528 unsigned long flags = qp->flags;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700529 int ret;
Yang Shid8835442019-08-13 15:37:15 -0700530 bool has_unmovable = false;
Shijie Luo3f088422020-11-01 17:07:40 -0800531 pte_t *pte, *mapped_pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700532 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700533
Naoya Horiguchic8633792017-09-08 16:11:08 -0700534 ptl = pmd_trans_huge_lock(pmd, vma);
535 if (ptl) {
536 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
Yang Shid8835442019-08-13 15:37:15 -0700537 if (ret != 2)
Yang Shia7f40cf2019-03-28 20:43:55 -0700538 return ret;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800539 }
Yang Shid8835442019-08-13 15:37:15 -0700540 /* THP was split, fall through to pte walk */
Hugh Dickins91612e02005-06-21 17:15:07 -0700541
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700542 if (pmd_trans_unstable(pmd))
543 return 0;
Michal Hocko94723aa2018-04-10 16:30:07 -0700544
Shijie Luo3f088422020-11-01 17:07:40 -0800545 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800546 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700547 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800549 page = vm_normal_page(vma, addr, *pte);
550 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800552 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800553 * vm_normal_page() filters out zero pages, but there might
554 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800555 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800556 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800557 continue;
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700558 if (!queue_pages_required(page, qp))
Christoph Lameter38e35862006-01-08 01:01:01 -0800559 continue;
Yang Shia7f40cf2019-03-28 20:43:55 -0700560 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shid8835442019-08-13 15:37:15 -0700561 /* MPOL_MF_STRICT must be specified if we get here */
562 if (!vma_migratable(vma)) {
563 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700564 break;
Yang Shid8835442019-08-13 15:37:15 -0700565 }
Yang Shia53190a2019-08-13 15:37:18 -0700566
567 /*
568 * Do not abort immediately since there may be
569 * temporary off LRU pages in the range. Still
570 * need migrate other LRU pages.
571 */
572 if (migrate_page_add(page, qp->pagelist, flags))
573 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700574 } else
575 break;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800576 }
Shijie Luo3f088422020-11-01 17:07:40 -0800577 pte_unmap_unlock(mapped_pte, ptl);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800578 cond_resched();
Yang Shid8835442019-08-13 15:37:15 -0700579
580 if (has_unmovable)
581 return 1;
582
Yang Shia7f40cf2019-03-28 20:43:55 -0700583 return addr != end ? -EIO : 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700584}
585
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800586static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
587 unsigned long addr, unsigned long end,
588 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700589{
Li Xinhaidcf17632020-04-01 21:10:48 -0700590 int ret = 0;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700591#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800592 struct queue_pages *qp = walk->private;
Li Xinhaidcf17632020-04-01 21:10:48 -0700593 unsigned long flags = (qp->flags & MPOL_MF_VALID);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700594 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800595 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400596 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700597
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800598 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
599 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400600 if (!pte_present(entry))
601 goto unlock;
602 page = pte_page(entry);
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700603 if (!queue_pages_required(page, qp))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700604 goto unlock;
Li Xinhaidcf17632020-04-01 21:10:48 -0700605
606 if (flags == MPOL_MF_STRICT) {
607 /*
608 * STRICT alone means only detecting misplaced page and no
609 * need to further check other vma.
610 */
611 ret = -EIO;
612 goto unlock;
613 }
614
615 if (!vma_migratable(walk->vma)) {
616 /*
617 * Must be STRICT with MOVE*, otherwise .test_walk() have
618 * stopped walking current vma.
619 * Detecting misplaced page but allow migrating pages which
620 * have been queued.
621 */
622 ret = 1;
623 goto unlock;
624 }
625
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700626 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
627 if (flags & (MPOL_MF_MOVE_ALL) ||
Li Xinhaidcf17632020-04-01 21:10:48 -0700628 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
629 if (!isolate_huge_page(page, qp->pagelist) &&
630 (flags & MPOL_MF_STRICT))
631 /*
632 * Failed to isolate page but allow migrating pages
633 * which have been queued.
634 */
635 ret = 1;
636 }
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700637unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800638 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700639#else
640 BUG();
641#endif
Li Xinhaidcf17632020-04-01 21:10:48 -0700642 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530645#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200646/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200647 * This is used to mark a range of virtual addresses to be inaccessible.
648 * These are later cleared by a NUMA hinting fault. Depending on these
649 * faults, pages may be migrated for better NUMA placement.
650 *
651 * This is assuming that NUMA faults are handled using PROT_NONE. If
652 * an architecture makes a different choice, it will need further
653 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200654 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200655unsigned long change_prot_numa(struct vm_area_struct *vma,
656 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200657{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200658 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200659
Laurent Dufour9cfe1682018-04-17 16:33:15 +0200660 vm_write_begin(vma);
Peter Xu58705442020-04-06 20:05:45 -0700661 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000662 if (nr_updated)
663 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Laurent Dufour9cfe1682018-04-17 16:33:15 +0200664 vm_write_end(vma);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200665
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200666 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200667}
668#else
669static unsigned long change_prot_numa(struct vm_area_struct *vma,
670 unsigned long addr, unsigned long end)
671{
672 return 0;
673}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530674#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200675
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800676static int queue_pages_test_walk(unsigned long start, unsigned long end,
677 struct mm_walk *walk)
678{
679 struct vm_area_struct *vma = walk->vma;
680 struct queue_pages *qp = walk->private;
681 unsigned long endvma = vma->vm_end;
682 unsigned long flags = qp->flags;
683
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800684 /* range check first */
Yang Shid888fb22020-04-01 21:10:55 -0700685 VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
Li Xinhaif18da662019-11-30 17:56:18 -0800686
687 if (!qp->first) {
688 qp->first = vma;
689 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
690 (qp->start < vma->vm_start))
691 /* hole at head side of range */
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800692 return -EFAULT;
693 }
Li Xinhaif18da662019-11-30 17:56:18 -0800694 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
695 ((vma->vm_end < qp->end) &&
696 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
697 /* hole at middle or tail of range */
698 return -EFAULT;
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800699
Yang Shia7f40cf2019-03-28 20:43:55 -0700700 /*
701 * Need check MPOL_MF_STRICT to return -EIO if possible
702 * regardless of vma_migratable
703 */
704 if (!vma_migratable(vma) &&
705 !(flags & MPOL_MF_STRICT))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800706 return 1;
707
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800708 if (endvma > end)
709 endvma = end;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800710
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800711 if (flags & MPOL_MF_LAZY) {
712 /* Similar to task_numa_work, skip inaccessible VMAs */
Anshuman Khandual3122e802020-04-06 20:03:47 -0700713 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
Liang Chen4355c012016-03-15 14:56:42 -0700714 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800715 change_prot_numa(vma, start, endvma);
716 return 1;
717 }
718
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800719 /* queue pages from current vma */
Yang Shia7f40cf2019-03-28 20:43:55 -0700720 if (flags & MPOL_MF_VALID)
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800721 return 0;
722 return 1;
723}
724
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200725static const struct mm_walk_ops queue_pages_walk_ops = {
726 .hugetlb_entry = queue_pages_hugetlb,
727 .pmd_entry = queue_pages_pte_range,
728 .test_walk = queue_pages_test_walk,
729};
730
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800731/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700732 * Walk through page tables and collect pages to be migrated.
733 *
734 * If pages found in a given range are on a set of nodes (determined by
735 * @nodes and @flags,) it's isolated and queued to the pagelist which is
Yang Shid8835442019-08-13 15:37:15 -0700736 * passed via @private.
737 *
738 * queue_pages_range() has three possible return values:
739 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
740 * specified.
741 * 0 - queue pages successfully or no misplaced page.
Yang Shia85dfc32019-11-15 17:34:33 -0800742 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
743 * memory range specified by nodemask and maxnode points outside
744 * your accessible address space (-EFAULT)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800745 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700746static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700747queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800748 nodemask_t *nodes, unsigned long flags,
749 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Li Xinhaif18da662019-11-30 17:56:18 -0800751 int err;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800752 struct queue_pages qp = {
753 .pagelist = pagelist,
754 .flags = flags,
755 .nmask = nodes,
Li Xinhaif18da662019-11-30 17:56:18 -0800756 .start = start,
757 .end = end,
758 .first = NULL,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800759 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Li Xinhaif18da662019-11-30 17:56:18 -0800761 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
762
763 if (!qp.first)
764 /* whole range in hole */
765 err = -EFAULT;
766
767 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768}
769
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700770/*
771 * Apply policy to a single VMA
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700772 * This must be called with the mmap_lock held for writing.
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700773 */
774static int vma_replace_policy(struct vm_area_struct *vma,
775 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700776{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700777 int err;
778 struct mempolicy *old;
779 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700780
781 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
782 vma->vm_start, vma->vm_end, vma->vm_pgoff,
783 vma->vm_ops, vma->vm_file,
784 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
785
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700786 new = mpol_dup(pol);
787 if (IS_ERR(new))
788 return PTR_ERR(new);
789
Laurent Dufour9cfe1682018-04-17 16:33:15 +0200790 vm_write_begin(vma);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700791 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700792 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700793 if (err)
794 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700795 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700796
797 old = vma->vm_policy;
Laurent Dufour9cfe1682018-04-17 16:33:15 +0200798 /*
799 * The speculative page fault handler accesses this field without
800 * hodling the mmap_sem.
801 */
802 WRITE_ONCE(vma->vm_policy, new);
803 vm_write_end(vma);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700804 mpol_put(old);
805
806 return 0;
807 err_out:
Laurent Dufour9cfe1682018-04-17 16:33:15 +0200808 vm_write_end(vma);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700809 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700810 return err;
811}
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800814static int mbind_range(struct mm_struct *mm, unsigned long start,
815 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816{
817 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800818 struct vm_area_struct *prev;
819 struct vm_area_struct *vma;
820 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800821 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800822 unsigned long vmstart;
823 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
Linus Torvalds097d5912012-03-06 18:23:36 -0800825 vma = find_vma(mm, start);
Li Xinhaif18da662019-11-30 17:56:18 -0800826 VM_BUG_ON(!vma);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800827
Linus Torvalds097d5912012-03-06 18:23:36 -0800828 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800829 if (start > vma->vm_start)
830 prev = vma;
831
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800832 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800834 vmstart = max(start, vma->vm_start);
835 vmend = min(end, vma->vm_end);
836
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800837 if (mpol_equal(vma_policy(vma), new_pol))
838 continue;
839
840 pgoff = vma->vm_pgoff +
841 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800842 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700843 vma->anon_vma, vma->vm_file, pgoff,
Colin Cross60500a42015-10-27 16:42:08 -0700844 new_pol, vma->vm_userfaultfd_ctx,
845 vma_get_anon_name(vma));
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800846 if (prev) {
847 vma = prev;
848 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700849 if (mpol_equal(vma_policy(vma), new_pol))
850 continue;
851 /* vma_merge() joined vma && vma->next, case 8 */
852 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800853 }
854 if (vma->vm_start != vmstart) {
855 err = split_vma(vma->vm_mm, vma, vmstart, 1);
856 if (err)
857 goto out;
858 }
859 if (vma->vm_end != vmend) {
860 err = split_vma(vma->vm_mm, vma, vmend, 0);
861 if (err)
862 goto out;
863 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700864 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700865 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700866 if (err)
867 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800869
870 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 return err;
872}
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700875static long do_set_mempolicy(unsigned short mode, unsigned short flags,
876 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877{
Miao Xie58568d22009-06-16 15:31:49 -0700878 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700879 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700880 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700882 if (!scratch)
883 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700884
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700885 new = mpol_new(mode, flags, nodes);
886 if (IS_ERR(new)) {
887 ret = PTR_ERR(new);
888 goto out;
889 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700890
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700891 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700892 if (ret) {
Miao Xie58568d22009-06-16 15:31:49 -0700893 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700894 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700895 }
Wei Yang78b132e2020-10-13 16:57:08 -0700896 task_lock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700897 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 current->mempolicy = new;
Vlastimil Babka45816682017-07-06 15:39:59 -0700899 if (new && new->mode == MPOL_INTERLEAVE)
900 current->il_prev = MAX_NUMNODES-1;
Miao Xie58568d22009-06-16 15:31:49 -0700901 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700902 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700903 ret = 0;
904out:
905 NODEMASK_SCRATCH_FREE(scratch);
906 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907}
908
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700909/*
910 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700911 *
912 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700913 */
914static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700916 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700917 if (p == &default_policy)
918 return;
919
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700920 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700921 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700923 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 break;
925 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700926 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700927 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700928 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 break;
930 default:
931 BUG();
932 }
933}
934
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700935static int lookup_node(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
Peter Xuba841072020-04-07 21:40:09 -0400937 struct page *p = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 int err;
939
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700940 int locked = 1;
941 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
Michal Hocko2d3a36a2020-06-03 16:03:25 -0700942 if (err > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 err = page_to_nid(p);
944 put_page(p);
945 }
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700946 if (locked)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700947 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 return err;
949}
950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700952static long do_get_mempolicy(int *policy, nodemask_t *nmask,
953 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700955 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 struct mm_struct *mm = current->mm;
957 struct vm_area_struct *vma = NULL;
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700958 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700960 if (flags &
961 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700963
964 if (flags & MPOL_F_MEMS_ALLOWED) {
965 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
966 return -EINVAL;
967 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700968 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700969 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700970 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700971 return 0;
972 }
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700975 /*
976 * Do NOT fall back to task policy if the
977 * vma/shared policy at addr is NULL. We
978 * want to return MPOL_DEFAULT in this case.
979 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700980 mmap_read_lock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 vma = find_vma_intersection(mm, addr, addr+1);
982 if (!vma) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700983 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 return -EFAULT;
985 }
986 if (vma->vm_ops && vma->vm_ops->get_policy)
987 pol = vma->vm_ops->get_policy(vma, addr);
988 else
989 pol = vma->vm_policy;
990 } else if (addr)
991 return -EINVAL;
992
993 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700994 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996 if (flags & MPOL_F_NODE) {
997 if (flags & MPOL_F_ADDR) {
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700998 /*
999 * Take a refcount on the mpol, lookup_node()
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001000 * wil drop the mmap_lock, so after calling
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -07001001 * lookup_node() only "pol" remains valid, "vma"
1002 * is stale.
1003 */
1004 pol_refcount = pol;
1005 vma = NULL;
1006 mpol_get(pol);
1007 err = lookup_node(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (err < 0)
1009 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001010 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001012 pol->mode == MPOL_INTERLEAVE) {
Vlastimil Babka45816682017-07-06 15:39:59 -07001013 *policy = next_node_in(current->il_prev, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 } else {
1015 err = -EINVAL;
1016 goto out;
1017 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001018 } else {
1019 *policy = pol == &default_policy ? MPOL_DEFAULT :
1020 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -07001021 /*
1022 * Internal mempolicy flags must be masked off before exposing
1023 * the policy to userspace.
1024 */
1025 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -07001029 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -07001030 if (mpol_store_user_nodemask(pol)) {
1031 *nmask = pol->w.user_nodemask;
1032 } else {
1033 task_lock(current);
1034 get_policy_nodemask(pol, nmask);
1035 task_unlock(current);
1036 }
Miao Xie58568d22009-06-16 15:31:49 -07001037 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
1039 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001040 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 if (vma)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001042 mmap_read_unlock(mm);
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -07001043 if (pol_refcount)
1044 mpol_put(pol_refcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 return err;
1046}
1047
Christoph Lameterb20a3502006-03-22 00:09:12 -08001048#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001049/*
Naoya Horiguchic8633792017-09-08 16:11:08 -07001050 * page migration, thp tail pages can be passed.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001051 */
Yang Shia53190a2019-08-13 15:37:18 -07001052static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -08001053 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001054{
Naoya Horiguchic8633792017-09-08 16:11:08 -07001055 struct page *head = compound_head(page);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001056 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001057 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001058 */
Naoya Horiguchic8633792017-09-08 16:11:08 -07001059 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1060 if (!isolate_lru_page(head)) {
1061 list_add_tail(&head->lru, pagelist);
1062 mod_node_page_state(page_pgdat(head),
Huang Ying9de4f222020-04-06 20:04:41 -07001063 NR_ISOLATED_ANON + page_is_file_lru(head),
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07001064 thp_nr_pages(head));
Yang Shia53190a2019-08-13 15:37:18 -07001065 } else if (flags & MPOL_MF_STRICT) {
1066 /*
1067 * Non-movable page may reach here. And, there may be
1068 * temporary off LRU pages or non-LRU movable pages.
1069 * Treat them as unmovable pages since they can't be
1070 * isolated, so they can't be moved at the moment. It
1071 * should return -EIO for this case too.
1072 */
1073 return -EIO;
Nick Piggin62695a82008-10-18 20:26:09 -07001074 }
1075 }
Yang Shia53190a2019-08-13 15:37:18 -07001076
1077 return 0;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001078}
1079
1080/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001081 * Migrate pages from one node to a target node.
1082 * Returns error or the number of pages not migrated.
1083 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001084static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1085 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001086{
1087 nodemask_t nmask;
1088 LIST_HEAD(pagelist);
1089 int err = 0;
Joonsoo Kima0976312020-08-11 18:37:28 -07001090 struct migration_target_control mtc = {
1091 .nid = dest,
1092 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1093 };
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001094
1095 nodes_clear(nmask);
1096 node_set(source, nmask);
1097
Minchan Kim08270802012-10-08 16:33:38 -07001098 /*
1099 * This does not "check" the range but isolates all pages that
1100 * need migration. Between passing in the full user address
1101 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1102 */
1103 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001104 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001105 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1106
Minchan Kimcf608ac2010-10-26 14:21:29 -07001107 if (!list_empty(&pagelist)) {
Joonsoo Kima0976312020-08-11 18:37:28 -07001108 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1109 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001110 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001111 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001112 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001113
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001114 return err;
1115}
1116
1117/*
1118 * Move pages between the two nodesets so as to preserve the physical
1119 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001120 *
1121 * Returns the number of page that could not be moved.
1122 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001123int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1124 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001125{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001126 int busy = 0;
Jan Stanceka71a3e12021-01-12 15:49:21 -08001127 int err = 0;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001128 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001129
Minchan Kimc6bc13962021-03-19 12:47:33 -07001130 lru_cache_disable();
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001131
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001132 mmap_read_lock(mm);
Christoph Lameter39743882006-01-08 01:00:51 -08001133
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001134 /*
1135 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1136 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1137 * bit in 'tmp', and return that <source, dest> pair for migration.
1138 * The pair of nodemasks 'to' and 'from' define the map.
1139 *
1140 * If no pair of bits is found that way, fallback to picking some
1141 * pair of 'source' and 'dest' bits that are not the same. If the
1142 * 'source' and 'dest' bits are the same, this represents a node
1143 * that will be migrating to itself, so no pages need move.
1144 *
1145 * If no bits are left in 'tmp', or if all remaining bits left
1146 * in 'tmp' correspond to the same bit in 'to', return false
1147 * (nothing left to migrate).
1148 *
1149 * This lets us pick a pair of nodes to migrate between, such that
1150 * if possible the dest node is not already occupied by some other
1151 * source node, minimizing the risk of overloading the memory on a
1152 * node that would happen if we migrated incoming memory to a node
1153 * before migrating outgoing memory source that same node.
1154 *
1155 * A single scan of tmp is sufficient. As we go, we remember the
1156 * most recent <s, d> pair that moved (s != d). If we find a pair
1157 * that not only moved, but what's better, moved to an empty slot
1158 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001159 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001160 * most recent <s, d> pair that moved. If we get all the way through
1161 * the scan of tmp without finding any node that moved, much less
1162 * moved to an empty node, then there is nothing left worth migrating.
1163 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001164
Andrew Morton0ce72d42012-05-29 15:06:24 -07001165 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001166 while (!nodes_empty(tmp)) {
1167 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001168 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001169 int dest = 0;
1170
1171 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001172
1173 /*
1174 * do_migrate_pages() tries to maintain the relative
1175 * node relationship of the pages established between
1176 * threads and memory areas.
1177 *
1178 * However if the number of source nodes is not equal to
1179 * the number of destination nodes we can not preserve
1180 * this node relative relationship. In that case, skip
1181 * copying memory from a node that is in the destination
1182 * mask.
1183 *
1184 * Example: [2,3,4] -> [3,4,5] moves everything.
1185 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1186 */
1187
Andrew Morton0ce72d42012-05-29 15:06:24 -07001188 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1189 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001190 continue;
1191
Andrew Morton0ce72d42012-05-29 15:06:24 -07001192 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001193 if (s == d)
1194 continue;
1195
1196 source = s; /* Node moved. Memorize */
1197 dest = d;
1198
1199 /* dest not in remaining from nodes? */
1200 if (!node_isset(dest, tmp))
1201 break;
1202 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001203 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001204 break;
1205
1206 node_clear(source, tmp);
1207 err = migrate_to_node(mm, source, dest, flags);
1208 if (err > 0)
1209 busy += err;
1210 if (err < 0)
1211 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001212 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001213 mmap_read_unlock(mm);
Minchan Kim68a47312021-03-19 12:39:51 -07001214
Minchan Kimc6bc13962021-03-19 12:47:33 -07001215 lru_cache_enable();
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001216 if (err < 0)
1217 return err;
1218 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001219
Christoph Lameter39743882006-01-08 01:00:51 -08001220}
1221
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001222/*
1223 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001224 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001225 * Search forward from there, if not. N.B., this assumes that the
1226 * list of pages handed to migrate_pages()--which is how we get here--
1227 * is in virtual address order.
1228 */
Michal Hocko666feb22018-04-10 16:30:03 -07001229static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001230{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001231 struct vm_area_struct *vma;
Kees Cook3f649ab2020-06-03 13:09:38 -07001232 unsigned long address;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001233
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001234 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001235 while (vma) {
1236 address = page_address_in_vma(page, vma);
1237 if (address != -EFAULT)
1238 break;
1239 vma = vma->vm_next;
1240 }
1241
Wanpeng Li11c731e2013-12-18 17:08:56 -08001242 if (PageHuge(page)) {
Michal Hocko389c8172018-01-31 16:21:03 -08001243 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1244 vma, address);
Michal Hocko94723aa2018-04-10 16:30:07 -07001245 } else if (PageTransHuge(page)) {
Naoya Horiguchic8633792017-09-08 16:11:08 -07001246 struct page *thp;
1247
David Rientjes19deb762019-09-04 12:54:20 -07001248 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1249 HPAGE_PMD_ORDER);
Naoya Horiguchic8633792017-09-08 16:11:08 -07001250 if (!thp)
1251 return NULL;
1252 prep_transhuge_page(thp);
1253 return thp;
Wanpeng Li11c731e2013-12-18 17:08:56 -08001254 }
1255 /*
1256 * if !vma, alloc_page_vma() will use task or system default policy
1257 */
Michal Hocko0f556852017-07-12 14:36:58 -07001258 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1259 vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001260}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001261#else
1262
Yang Shia53190a2019-08-13 15:37:18 -07001263static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterb20a3502006-03-22 00:09:12 -08001264 unsigned long flags)
1265{
Yang Shia53190a2019-08-13 15:37:18 -07001266 return -EIO;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001267}
1268
Andrew Morton0ce72d42012-05-29 15:06:24 -07001269int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1270 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001271{
1272 return -ENOSYS;
1273}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001274
Michal Hocko666feb22018-04-10 16:30:03 -07001275static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001276{
1277 return NULL;
1278}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001279#endif
1280
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001281static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001282 unsigned short mode, unsigned short mode_flags,
1283 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001284{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001285 struct mm_struct *mm = current->mm;
1286 struct mempolicy *new;
1287 unsigned long end;
1288 int err;
Yang Shid8835442019-08-13 15:37:15 -07001289 int ret;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001290 LIST_HEAD(pagelist);
1291
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001292 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001293 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001294 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001295 return -EPERM;
1296
1297 if (start & ~PAGE_MASK)
1298 return -EINVAL;
1299
1300 if (mode == MPOL_DEFAULT)
1301 flags &= ~MPOL_MF_STRICT;
1302
1303 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1304 end = start + len;
1305
1306 if (end < start)
1307 return -EINVAL;
1308 if (end == start)
1309 return 0;
1310
David Rientjes028fec42008-04-28 02:12:25 -07001311 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001312 if (IS_ERR(new))
1313 return PTR_ERR(new);
1314
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001315 if (flags & MPOL_MF_LAZY)
1316 new->flags |= MPOL_F_MOF;
1317
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001318 /*
1319 * If we are using the default policy then operation
1320 * on discontinuous address spaces is okay after all
1321 */
1322 if (!new)
1323 flags |= MPOL_MF_DISCONTIG_OK;
1324
David Rientjes028fec42008-04-28 02:12:25 -07001325 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1326 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001327 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001328
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001329 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1330
Minchan Kimc6bc13962021-03-19 12:47:33 -07001331 lru_cache_disable();
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001332 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001333 {
1334 NODEMASK_SCRATCH(scratch);
1335 if (scratch) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001336 mmap_write_lock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001337 err = mpol_set_nodemask(new, nmask, scratch);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001338 if (err)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001339 mmap_write_unlock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001340 } else
1341 err = -ENOMEM;
1342 NODEMASK_SCRATCH_FREE(scratch);
1343 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001344 if (err)
1345 goto mpol_out;
1346
Yang Shid8835442019-08-13 15:37:15 -07001347 ret = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001348 flags | MPOL_MF_INVERT, &pagelist);
Yang Shid8835442019-08-13 15:37:15 -07001349
1350 if (ret < 0) {
Yang Shia85dfc32019-11-15 17:34:33 -08001351 err = ret;
Yang Shid8835442019-08-13 15:37:15 -07001352 goto up_out;
1353 }
1354
1355 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001356
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001357 if (!err) {
1358 int nr_failed = 0;
1359
Minchan Kimcf608ac2010-10-26 14:21:29 -07001360 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001361 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001362 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1363 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001364 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001365 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001366 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001367
Yang Shid8835442019-08-13 15:37:15 -07001368 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001369 err = -EIO;
Yang Shia85dfc32019-11-15 17:34:33 -08001370 } else {
Yang Shid8835442019-08-13 15:37:15 -07001371up_out:
Yang Shia85dfc32019-11-15 17:34:33 -08001372 if (!list_empty(&pagelist))
1373 putback_movable_pages(&pagelist);
1374 }
1375
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001376 mmap_write_unlock(mm);
Yang Shid8835442019-08-13 15:37:15 -07001377mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001378 mpol_put(new);
Minchan Kim68a47312021-03-19 12:39:51 -07001379 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Minchan Kimc6bc13962021-03-19 12:47:33 -07001380 lru_cache_enable();
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001381 return err;
1382}
1383
Christoph Lameter39743882006-01-08 01:00:51 -08001384/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001385 * User space interface with variable sized bitmaps for nodelists.
1386 */
1387
1388/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001389static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001390 unsigned long maxnode)
1391{
1392 unsigned long k;
Yisheng Xie56521e72018-01-31 16:16:11 -08001393 unsigned long t;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001394 unsigned long nlongs;
1395 unsigned long endmask;
1396
1397 --maxnode;
1398 nodes_clear(*nodes);
1399 if (maxnode == 0 || !nmask)
1400 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001401 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001402 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001403
1404 nlongs = BITS_TO_LONGS(maxnode);
1405 if ((maxnode % BITS_PER_LONG) == 0)
1406 endmask = ~0UL;
1407 else
1408 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1409
Yisheng Xie56521e72018-01-31 16:16:11 -08001410 /*
1411 * When the user specified more nodes than supported just check
1412 * if the non supported part is all zero.
1413 *
1414 * If maxnode have more longs than MAX_NUMNODES, check
1415 * the bits in that area first. And then go through to
1416 * check the rest bits which equal or bigger than MAX_NUMNODES.
1417 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1418 */
Christoph Lameter8bccd852005-10-29 18:16:59 -07001419 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001420 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001421 if (get_user(t, nmask + k))
1422 return -EFAULT;
1423 if (k == nlongs - 1) {
1424 if (t & endmask)
1425 return -EINVAL;
1426 } else if (t)
1427 return -EINVAL;
1428 }
1429 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1430 endmask = ~0UL;
1431 }
1432
Yisheng Xie56521e72018-01-31 16:16:11 -08001433 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1434 unsigned long valid_mask = endmask;
1435
1436 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1437 if (get_user(t, nmask + nlongs - 1))
1438 return -EFAULT;
1439 if (t & valid_mask)
1440 return -EINVAL;
1441 }
1442
Christoph Lameter8bccd852005-10-29 18:16:59 -07001443 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1444 return -EFAULT;
1445 nodes_addr(*nodes)[nlongs-1] &= endmask;
1446 return 0;
1447}
1448
1449/* Copy a kernel node mask to user space */
1450static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1451 nodemask_t *nodes)
1452{
1453 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
Ralph Campbell050c17f2019-02-20 22:18:58 -08001454 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001455
1456 if (copy > nbytes) {
1457 if (copy > PAGE_SIZE)
1458 return -EINVAL;
1459 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1460 return -EFAULT;
1461 copy = nbytes;
1462 }
1463 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1464}
1465
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001466static long kernel_mbind(unsigned long start, unsigned long len,
1467 unsigned long mode, const unsigned long __user *nmask,
1468 unsigned long maxnode, unsigned int flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001469{
1470 nodemask_t nodes;
1471 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001472 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001473
Andrey Konovalov057d33892019-09-25 16:48:30 -07001474 start = untagged_addr(start);
David Rientjes028fec42008-04-28 02:12:25 -07001475 mode_flags = mode & MPOL_MODE_FLAGS;
1476 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001477 if (mode >= MPOL_MAX)
1478 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001479 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1480 (mode_flags & MPOL_F_RELATIVE_NODES))
1481 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001482 err = get_nodes(&nodes, nmask, maxnode);
1483 if (err)
1484 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001485 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001486}
1487
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001488SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1489 unsigned long, mode, const unsigned long __user *, nmask,
1490 unsigned long, maxnode, unsigned int, flags)
1491{
1492 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1493}
1494
Christoph Lameter8bccd852005-10-29 18:16:59 -07001495/* Set the process memory policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001496static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1497 unsigned long maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001498{
1499 int err;
1500 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001501 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001502
David Rientjes028fec42008-04-28 02:12:25 -07001503 flags = mode & MPOL_MODE_FLAGS;
1504 mode &= ~MPOL_MODE_FLAGS;
1505 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001506 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001507 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1508 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001509 err = get_nodes(&nodes, nmask, maxnode);
1510 if (err)
1511 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001512 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001513}
1514
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001515SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1516 unsigned long, maxnode)
1517{
1518 return kernel_set_mempolicy(mode, nmask, maxnode);
1519}
1520
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001521static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1522 const unsigned long __user *old_nodes,
1523 const unsigned long __user *new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001524{
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001525 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001526 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001527 nodemask_t task_nodes;
1528 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001529 nodemask_t *old;
1530 nodemask_t *new;
1531 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001532
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001533 if (!scratch)
1534 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001535
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001536 old = &scratch->mask1;
1537 new = &scratch->mask2;
1538
1539 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001540 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001541 goto out;
1542
1543 err = get_nodes(new, new_nodes, maxnode);
1544 if (err)
1545 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001546
1547 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001548 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001549 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001550 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001551 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001552 err = -ESRCH;
1553 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001554 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001555 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001556
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001557 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001558
1559 /*
Otto Ebeling31367462017-11-15 17:38:14 -08001560 * Check if this process has the right to modify the specified process.
1561 * Use the regular "ptrace_may_access()" checks.
Christoph Lameter39743882006-01-08 01:00:51 -08001562 */
Otto Ebeling31367462017-11-15 17:38:14 -08001563 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001564 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001565 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001566 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001567 }
David Howellsc69e8d92008-11-14 10:39:19 +11001568 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001569
1570 task_nodes = cpuset_mems_allowed(task);
1571 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001572 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001573 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001574 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001575 }
1576
Yisheng Xie0486a382018-01-31 16:16:15 -08001577 task_nodes = cpuset_mems_allowed(current);
1578 nodes_and(*new, *new, task_nodes);
1579 if (nodes_empty(*new))
Christoph Lameter3268c632012-03-21 16:34:06 -07001580 goto out_put;
Yisheng Xie0486a382018-01-31 16:16:15 -08001581
David Quigley86c3a762006-06-23 02:04:02 -07001582 err = security_task_movememory(task);
1583 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001584 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001585
Christoph Lameter3268c632012-03-21 16:34:06 -07001586 mm = get_task_mm(task);
1587 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001588
1589 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001590 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001591 goto out;
1592 }
1593
1594 err = do_migrate_pages(mm, old, new,
1595 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001596
1597 mmput(mm);
1598out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001599 NODEMASK_SCRATCH_FREE(scratch);
1600
Christoph Lameter39743882006-01-08 01:00:51 -08001601 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001602
1603out_put:
1604 put_task_struct(task);
1605 goto out;
1606
Christoph Lameter39743882006-01-08 01:00:51 -08001607}
1608
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001609SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1610 const unsigned long __user *, old_nodes,
1611 const unsigned long __user *, new_nodes)
1612{
1613 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1614}
1615
Christoph Lameter39743882006-01-08 01:00:51 -08001616
Christoph Lameter8bccd852005-10-29 18:16:59 -07001617/* Retrieve NUMA policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001618static int kernel_get_mempolicy(int __user *policy,
1619 unsigned long __user *nmask,
1620 unsigned long maxnode,
1621 unsigned long addr,
1622 unsigned long flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001623{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001624 int err;
Kees Cook3f649ab2020-06-03 13:09:38 -07001625 int pval;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001626 nodemask_t nodes;
1627
Ralph Campbell050c17f2019-02-20 22:18:58 -08001628 if (nmask != NULL && maxnode < nr_node_ids)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001629 return -EINVAL;
1630
Wenchao Hao4605f052020-08-11 18:31:16 -07001631 addr = untagged_addr(addr);
1632
Christoph Lameter8bccd852005-10-29 18:16:59 -07001633 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1634
1635 if (err)
1636 return err;
1637
1638 if (policy && put_user(pval, policy))
1639 return -EFAULT;
1640
1641 if (nmask)
1642 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1643
1644 return err;
1645}
1646
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001647SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1648 unsigned long __user *, nmask, unsigned long, maxnode,
1649 unsigned long, addr, unsigned long, flags)
1650{
1651 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1652}
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654#ifdef CONFIG_COMPAT
1655
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001656COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1657 compat_ulong_t __user *, nmask,
1658 compat_ulong_t, maxnode,
1659 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660{
1661 long err;
1662 unsigned long __user *nm = NULL;
1663 unsigned long nr_bits, alloc_size;
1664 DECLARE_BITMAP(bm, MAX_NUMNODES);
1665
Ralph Campbell050c17f2019-02-20 22:18:58 -08001666 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1668
1669 if (nmask)
1670 nm = compat_alloc_user_space(alloc_size);
1671
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001672 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
1674 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001675 unsigned long copy_size;
1676 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1677 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 /* ensure entire bitmap is zeroed */
1679 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1680 err |= compat_put_bitmap(nmask, bm, nr_bits);
1681 }
1682
1683 return err;
1684}
1685
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001686COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1687 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 unsigned long __user *nm = NULL;
1690 unsigned long nr_bits, alloc_size;
1691 DECLARE_BITMAP(bm, MAX_NUMNODES);
1692
1693 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1694 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1695
1696 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001697 if (compat_get_bitmap(bm, nmask, nr_bits))
1698 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001700 if (copy_to_user(nm, bm, alloc_size))
1701 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 }
1703
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001704 return kernel_set_mempolicy(mode, nm, nr_bits+1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705}
1706
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001707COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1708 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1709 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 unsigned long __user *nm = NULL;
1712 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001713 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
1715 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1716 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1717
1718 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001719 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1720 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001722 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1723 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 }
1725
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001726 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727}
1728
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001729COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1730 compat_ulong_t, maxnode,
1731 const compat_ulong_t __user *, old_nodes,
1732 const compat_ulong_t __user *, new_nodes)
1733{
1734 unsigned long __user *old = NULL;
1735 unsigned long __user *new = NULL;
1736 nodemask_t tmp_mask;
1737 unsigned long nr_bits;
1738 unsigned long size;
1739
1740 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1741 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1742 if (old_nodes) {
1743 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1744 return -EFAULT;
1745 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1746 if (new_nodes)
1747 new = old + size / sizeof(unsigned long);
1748 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1749 return -EFAULT;
1750 }
1751 if (new_nodes) {
1752 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1753 return -EFAULT;
1754 if (new == NULL)
1755 new = compat_alloc_user_space(size);
1756 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1757 return -EFAULT;
1758 }
1759 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1760}
1761
1762#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
Li Xinhai20ca87f2020-04-01 21:10:52 -07001764bool vma_migratable(struct vm_area_struct *vma)
1765{
1766 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1767 return false;
1768
1769 /*
1770 * DAX device mappings require predictable access latency, so avoid
1771 * incurring periodic faults.
1772 */
1773 if (vma_is_dax(vma))
1774 return false;
1775
1776 if (is_vm_hugetlb_page(vma) &&
1777 !hugepage_migration_supported(hstate_vma(vma)))
1778 return false;
1779
1780 /*
1781 * Migration allocates pages in the highest zone. If we cannot
1782 * do so then migration (at least from node to node) is not
1783 * possible.
1784 */
1785 if (vma->vm_file &&
1786 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1787 < policy_zone)
1788 return false;
1789 return true;
1790}
1791
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001792struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1793 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
Laurent Dufour9cfe1682018-04-17 16:33:15 +02001795 struct mempolicy *pol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Laurent Dufour9cfe1682018-04-17 16:33:15 +02001797 if (!vma)
1798 return NULL;
Mel Gorman00442ad2012-10-08 16:29:20 -07001799
Laurent Dufour9cfe1682018-04-17 16:33:15 +02001800 if (vma->vm_ops && vma->vm_ops->get_policy)
1801 return vma->vm_ops->get_policy(vma, addr);
1802
1803 /*
1804 * This could be called without holding the mmap_sem in the
1805 * speculative page fault handler's path.
1806 */
1807 pol = READ_ONCE(vma->vm_policy);
1808 if (pol) {
1809 /*
1810 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1811 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1812 * count on these policies which will be dropped by
1813 * mpol_cond_put() later
1814 */
1815 if (mpol_needs_cond_ref(pol))
1816 mpol_get(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001818
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001819 return pol;
1820}
1821
1822/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001823 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001824 * @vma: virtual memory area whose policy is sought
1825 * @addr: address in @vma for shared policy lookup
1826 *
1827 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001828 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001829 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1830 * count--added by the get_policy() vm_op, as appropriate--to protect against
1831 * freeing by another task. It is the caller's responsibility to free the
1832 * extra reference for shared policies.
1833 */
David Rientjesac79f782019-09-04 12:54:18 -07001834static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001835 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001836{
1837 struct mempolicy *pol = __get_vma_policy(vma, addr);
1838
Oleg Nesterov8d902742014-10-09 15:27:45 -07001839 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001840 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001841
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 return pol;
1843}
1844
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001845bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001846{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001847 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001848
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001849 if (vma->vm_ops && vma->vm_ops->get_policy) {
1850 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001851
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001852 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1853 if (pol && (pol->flags & MPOL_F_MOF))
1854 ret = true;
1855 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001856
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001857 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001858 }
1859
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001860 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001861 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001862 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001863
Mel Gormanfc3147242013-10-07 11:29:09 +01001864 return pol->flags & MPOL_F_MOF;
1865}
1866
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001867static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1868{
1869 enum zone_type dynamic_policy_zone = policy_zone;
1870
1871 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1872
1873 /*
1874 * if policy->v.nodes has movable memory only,
1875 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1876 *
1877 * policy->v.nodes is intersect with node_states[N_MEMORY].
1878 * so if the following test faile, it implies
1879 * policy->v.nodes has movable memory only.
1880 */
1881 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1882 dynamic_policy_zone = ZONE_MOVABLE;
1883
1884 return zone >= dynamic_policy_zone;
1885}
1886
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001887/*
1888 * Return a nodemask representing a mempolicy for filtering nodes for
1889 * page allocation
1890 */
Muchun Song8ca39e62020-08-11 18:30:32 -07001891nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001892{
1893 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001894 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001895 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001896 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1897 return &policy->v.nodes;
1898
1899 return NULL;
1900}
1901
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001902/* Return the node id preferred by the given mempolicy, or the given id */
Wei Yangf8fd5252020-10-13 16:57:11 -07001903static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904{
Michal Hocko6d840952016-12-12 16:42:23 -08001905 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1906 nd = policy->v.preferred_node;
1907 else {
Mel Gorman19770b32008-04-28 02:12:18 -07001908 /*
Michal Hocko6d840952016-12-12 16:42:23 -08001909 * __GFP_THISNODE shouldn't even be used with the bind policy
1910 * because we might easily break the expectation to stay on the
1911 * requested node and not break the policy.
Mel Gorman19770b32008-04-28 02:12:18 -07001912 */
Michal Hocko6d840952016-12-12 16:42:23 -08001913 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 }
Michal Hocko6d840952016-12-12 16:42:23 -08001915
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001916 return nd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917}
1918
1919/* Do dynamic interleaving for a process */
1920static unsigned interleave_nodes(struct mempolicy *policy)
1921{
Vlastimil Babka45816682017-07-06 15:39:59 -07001922 unsigned next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 struct task_struct *me = current;
1924
Vlastimil Babka45816682017-07-06 15:39:59 -07001925 next = next_node_in(me->il_prev, policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001926 if (next < MAX_NUMNODES)
Vlastimil Babka45816682017-07-06 15:39:59 -07001927 me->il_prev = next;
1928 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929}
1930
Christoph Lameterdc85da12006-01-18 17:42:36 -08001931/*
1932 * Depending on the memory policy provide a node from which to allocate the
1933 * next slab entry.
1934 */
David Rientjes2a389612014-04-07 15:37:29 -07001935unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001936{
Andi Kleene7b691b2012-06-09 02:40:03 -07001937 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001938 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001939
1940 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001941 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001942
1943 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001944 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001945 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001946
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001947 switch (policy->mode) {
1948 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001949 /*
1950 * handled MPOL_F_LOCAL above
1951 */
1952 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001953
Christoph Lameterdc85da12006-01-18 17:42:36 -08001954 case MPOL_INTERLEAVE:
1955 return interleave_nodes(policy);
1956
Mel Gormandd1a2392008-04-28 02:12:17 -07001957 case MPOL_BIND: {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001958 struct zoneref *z;
1959
Christoph Lameterdc85da12006-01-18 17:42:36 -08001960 /*
1961 * Follow bind policy behavior and start allocation at the
1962 * first node.
1963 */
Mel Gorman19770b32008-04-28 02:12:18 -07001964 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001965 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001966 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001967 z = first_zones_zonelist(zonelist, highest_zoneidx,
1968 &policy->v.nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07001969 return z->zone ? zone_to_nid(z->zone) : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001970 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001971
Christoph Lameterdc85da12006-01-18 17:42:36 -08001972 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001973 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001974 }
1975}
1976
Andrew Mortonfee83b32016-05-19 17:11:43 -07001977/*
1978 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1979 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1980 * number of present nodes.
1981 */
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001982static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001984 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001985 unsigned target;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001986 int i;
1987 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
David Rientjesf5b087b2008-04-28 02:12:27 -07001989 if (!nnodes)
1990 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001991 target = (unsigned int)n % nnodes;
1992 nid = first_node(pol->v.nodes);
1993 for (i = 0; i < target; i++)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001994 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 return nid;
1996}
1997
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001998/* Determine a node number for interleave */
1999static inline unsigned interleave_nid(struct mempolicy *pol,
2000 struct vm_area_struct *vma, unsigned long addr, int shift)
2001{
2002 if (vma) {
2003 unsigned long off;
2004
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07002005 /*
2006 * for small pages, there is no difference between
2007 * shift and PAGE_SHIFT, so the bit-shift is safe.
2008 * for huge pages, since vm_pgoff is in units of small
2009 * pages, we need to shift off the always 0 bits to get
2010 * a useful offset.
2011 */
2012 BUG_ON(shift < PAGE_SHIFT);
2013 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002014 off += (addr - vma->vm_start) >> shift;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07002015 return offset_il_node(pol, off);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002016 } else
2017 return interleave_nodes(pol);
2018}
2019
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01002020#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002021/*
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002022 * huge_node(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002023 * @vma: virtual memory area whose policy is sought
2024 * @addr: address in @vma for shared policy lookup and interleave policy
2025 * @gfp_flags: for requested zone
2026 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2027 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002028 *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002029 * Returns a nid suitable for a huge page allocation and a pointer
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002030 * to the struct mempolicy for conditional unref after allocation.
2031 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2032 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07002033 *
Mel Gormand26914d2014-04-03 14:47:24 -07002034 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002035 */
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002036int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2037 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002038{
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002039 int nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002040
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002041 *mpol = get_vma_policy(vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07002042 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002043
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002044 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002045 nid = interleave_nid(*mpol, vma, addr,
2046 huge_page_shift(hstate_vma(vma)));
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002047 } else {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002048 nid = policy_node(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002049 if ((*mpol)->mode == MPOL_BIND)
2050 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002051 }
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002052 return nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002053}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002054
2055/*
2056 * init_nodemask_of_mempolicy
2057 *
2058 * If the current task's mempolicy is "default" [NULL], return 'false'
2059 * to indicate default policy. Otherwise, extract the policy nodemask
2060 * for 'bind' or 'interleave' policy into the argument nodemask, or
2061 * initialize the argument nodemask to contain the single node for
2062 * 'preferred' or 'local' policy and return 'true' to indicate presence
2063 * of non-default mempolicy.
2064 *
2065 * We don't bother with reference counting the mempolicy [mpol_get/put]
2066 * because the current task is examining it's own mempolicy and a task's
2067 * mempolicy is only ever changed by the task itself.
2068 *
2069 * N.B., it is the caller's responsibility to free a returned nodemask.
2070 */
2071bool init_nodemask_of_mempolicy(nodemask_t *mask)
2072{
2073 struct mempolicy *mempolicy;
2074 int nid;
2075
2076 if (!(mask && current->mempolicy))
2077 return false;
2078
Miao Xiec0ff7452010-05-24 14:32:08 -07002079 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002080 mempolicy = current->mempolicy;
2081 switch (mempolicy->mode) {
2082 case MPOL_PREFERRED:
2083 if (mempolicy->flags & MPOL_F_LOCAL)
2084 nid = numa_node_id();
2085 else
2086 nid = mempolicy->v.preferred_node;
2087 init_nodemask_of_node(mask, nid);
2088 break;
2089
2090 case MPOL_BIND:
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002091 case MPOL_INTERLEAVE:
2092 *mask = mempolicy->v.nodes;
2093 break;
2094
2095 default:
2096 BUG();
2097 }
Miao Xiec0ff7452010-05-24 14:32:08 -07002098 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002099
2100 return true;
2101}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01002102#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002103
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002104/*
2105 * mempolicy_nodemask_intersects
2106 *
2107 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2108 * policy. Otherwise, check for intersection between mask and the policy
2109 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
2110 * policy, always return true since it may allocate elsewhere on fallback.
2111 *
2112 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2113 */
2114bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2115 const nodemask_t *mask)
2116{
2117 struct mempolicy *mempolicy;
2118 bool ret = true;
2119
2120 if (!mask)
2121 return ret;
2122 task_lock(tsk);
2123 mempolicy = tsk->mempolicy;
2124 if (!mempolicy)
2125 goto out;
2126
2127 switch (mempolicy->mode) {
2128 case MPOL_PREFERRED:
2129 /*
2130 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2131 * allocate from, they may fallback to other nodes when oom.
2132 * Thus, it's possible for tsk to have allocated memory from
2133 * nodes in mask.
2134 */
2135 break;
2136 case MPOL_BIND:
2137 case MPOL_INTERLEAVE:
2138 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2139 break;
2140 default:
2141 BUG();
2142 }
2143out:
2144 task_unlock(tsk);
2145 return ret;
2146}
2147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148/* Allocate a page in interleaved policy.
2149 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07002150static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2151 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 struct page *page;
2154
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002155 page = __alloc_pages(gfp, order, nid);
Kemi Wang45180852017-11-15 17:38:22 -08002156 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2157 if (!static_branch_likely(&vm_numa_stat_key))
2158 return page;
Andrey Ryabininde55c8b2017-10-13 15:57:43 -07002159 if (page && page_to_nid(page) == nid) {
2160 preempt_disable();
2161 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2162 preempt_enable();
2163 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 return page;
2165}
2166
2167/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002168 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 *
2170 * @gfp:
2171 * %GFP_USER user allocation.
2172 * %GFP_KERNEL kernel allocations,
2173 * %GFP_HIGHMEM highmem/user allocations,
2174 * %GFP_FS allocation should not call back into a file system.
2175 * %GFP_ATOMIC don't sleep.
2176 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002177 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 * @vma: Pointer to VMA or NULL if not available.
2179 * @addr: Virtual Address of the allocation. Must be inside the VMA.
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002180 * @node: Which node to prefer for allocation (modulo policy).
David Rientjes19deb762019-09-04 12:54:20 -07002181 * @hugepage: for hugepages try only the preferred node if possible
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 *
2183 * This function allocates a page from the kernel page pool and applies
2184 * a NUMA policy associated with the VMA or the current process.
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07002185 * When VMA is not NULL caller must read-lock the mmap_lock of the
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 * mm_struct of the VMA to prevent it from going away. Should be used for
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002187 * all allocations for pages that will be mapped into user space. Returns
2188 * NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 */
2190struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002191alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
David Rientjes19deb762019-09-04 12:54:20 -07002192 unsigned long addr, int node, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002194 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07002195 struct page *page;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002196 int preferred_nid;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002197 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002199 pol = get_vma_policy(vma, addr);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002200
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002201 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002203
Andi Kleen8eac5632011-02-25 14:44:28 -08002204 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002205 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002206 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002207 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002209
David Rientjes19deb762019-09-04 12:54:20 -07002210 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2211 int hpage_node = node;
2212
2213 /*
2214 * For hugepage allocation and non-interleave policy which
2215 * allows the current node (or other explicitly preferred
2216 * node) we only try to allocate from the current/preferred
2217 * node and don't fall back to other nodes, as the cost of
2218 * remote accesses would likely offset THP benefits.
2219 *
2220 * If the policy is interleave, or does not allow the current
2221 * node in its nodemask, we allocate the standard way.
2222 */
2223 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2224 hpage_node = pol->v.preferred_node;
2225
2226 nmask = policy_nodemask(gfp, pol);
2227 if (!nmask || node_isset(hpage_node, *nmask)) {
2228 mpol_cond_put(pol);
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002229 /*
2230 * First, try to allocate THP only on local node, but
2231 * don't reclaim unnecessarily, just compact.
2232 */
David Rientjes19deb762019-09-04 12:54:20 -07002233 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002234 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002235
2236 /*
2237 * If hugepage allocations are configured to always
2238 * synchronous compact or the vma has been madvised
2239 * to prefer hugepage backing, retry allowing remote
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002240 * memory with both reclaim and compact as well.
David Rientjes76e654c2019-09-04 12:54:25 -07002241 */
2242 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2243 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002244 gfp, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002245
David Rientjes19deb762019-09-04 12:54:20 -07002246 goto out;
2247 }
2248 }
2249
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002250 nmask = policy_nodemask(gfp, pol);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002251 preferred_nid = policy_node(gfp, pol, node);
2252 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
Vlastimil Babkad51e9892017-01-24 15:18:18 -08002253 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002254out:
Miao Xiec0ff7452010-05-24 14:32:08 -07002255 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256}
Christoph Hellwig69262212019-06-26 14:27:05 +02002257EXPORT_SYMBOL(alloc_pages_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
2259/**
2260 * alloc_pages_current - Allocate pages.
2261 *
2262 * @gfp:
2263 * %GFP_USER user allocation,
2264 * %GFP_KERNEL kernel allocation,
2265 * %GFP_HIGHMEM highmem allocation,
2266 * %GFP_FS don't call back into a file system.
2267 * %GFP_ATOMIC don't sleep.
2268 * @order: Power of two of allocation size in pages. 0 is a single page.
2269 *
2270 * Allocate a page from the kernel page pool. When not in
2271 * interrupt context and apply the current process NUMA policy.
2272 * Returns NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 */
Al Virodd0fc662005-10-07 07:46:04 +01002274struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275{
Oleg Nesterov8d902742014-10-09 15:27:45 -07002276 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002277 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Oleg Nesterov8d902742014-10-09 15:27:45 -07002279 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2280 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002281
2282 /*
2283 * No reference counting needed for current->mempolicy
2284 * nor system default_policy
2285 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002286 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002287 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2288 else
2289 page = __alloc_pages_nodemask(gfp, order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002290 policy_node(gfp, pol, numa_node_id()),
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002291 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002292
Miao Xiec0ff7452010-05-24 14:32:08 -07002293 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294}
2295EXPORT_SYMBOL(alloc_pages_current);
2296
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002297int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2298{
2299 struct mempolicy *pol = mpol_dup(vma_policy(src));
2300
2301 if (IS_ERR(pol))
2302 return PTR_ERR(pol);
2303 dst->vm_policy = pol;
2304 return 0;
2305}
2306
Paul Jackson42253992006-01-08 01:01:59 -08002307/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002308 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002309 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2310 * with the mems_allowed returned by cpuset_mems_allowed(). This
2311 * keeps mempolicies cpuset relative after its cpuset moves. See
2312 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002313 *
2314 * current's mempolicy may be rebinded by the other task(the task that changes
2315 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002316 */
Paul Jackson42253992006-01-08 01:01:59 -08002317
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002318/* Slow path of a mempolicy duplicate */
2319struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320{
2321 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2322
2323 if (!new)
2324 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002325
2326 /* task's mempolicy is protected by alloc_lock */
2327 if (old == current->mempolicy) {
2328 task_lock(current);
2329 *new = *old;
2330 task_unlock(current);
2331 } else
2332 *new = *old;
2333
Paul Jackson42253992006-01-08 01:01:59 -08002334 if (current_cpuset_is_being_rebound()) {
2335 nodemask_t mems = cpuset_mems_allowed(current);
Vlastimil Babka213980c2017-07-06 15:40:06 -07002336 mpol_rebind_policy(new, &mems);
Paul Jackson42253992006-01-08 01:01:59 -08002337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 return new;
2340}
2341
2342/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002343bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344{
2345 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002346 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002347 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002348 return false;
Bob Liu19800502010-05-24 14:32:01 -07002349 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002350 return false;
Bob Liu19800502010-05-24 14:32:01 -07002351 if (mpol_store_user_nodemask(a))
2352 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002353 return false;
Bob Liu19800502010-05-24 14:32:01 -07002354
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002355 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002356 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002358 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 case MPOL_PREFERRED:
Yisheng Xie8970a632018-03-22 16:17:02 -07002360 /* a's ->flags is the same as b's */
2361 if (a->flags & MPOL_F_LOCAL)
2362 return true;
Namhyung Kim75719662011-03-22 16:33:02 -07002363 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 default:
2365 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002366 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 }
2368}
2369
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 * Shared memory backing store policy support.
2372 *
2373 * Remember policies even when nobody has shared memory mapped.
2374 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002375 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 * for any accesses to the tree.
2377 */
2378
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002379/*
2380 * lookup first element intersecting start-end. Caller holds sp->lock for
2381 * reading or for writing
2382 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383static struct sp_node *
2384sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2385{
2386 struct rb_node *n = sp->root.rb_node;
2387
2388 while (n) {
2389 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2390
2391 if (start >= p->end)
2392 n = n->rb_right;
2393 else if (end <= p->start)
2394 n = n->rb_left;
2395 else
2396 break;
2397 }
2398 if (!n)
2399 return NULL;
2400 for (;;) {
2401 struct sp_node *w = NULL;
2402 struct rb_node *prev = rb_prev(n);
2403 if (!prev)
2404 break;
2405 w = rb_entry(prev, struct sp_node, nd);
2406 if (w->end <= start)
2407 break;
2408 n = prev;
2409 }
2410 return rb_entry(n, struct sp_node, nd);
2411}
2412
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002413/*
2414 * Insert a new shared policy into the list. Caller holds sp->lock for
2415 * writing.
2416 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2418{
2419 struct rb_node **p = &sp->root.rb_node;
2420 struct rb_node *parent = NULL;
2421 struct sp_node *nd;
2422
2423 while (*p) {
2424 parent = *p;
2425 nd = rb_entry(parent, struct sp_node, nd);
2426 if (new->start < nd->start)
2427 p = &(*p)->rb_left;
2428 else if (new->end > nd->end)
2429 p = &(*p)->rb_right;
2430 else
2431 BUG();
2432 }
2433 rb_link_node(&new->nd, parent, p);
2434 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002435 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002436 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437}
2438
2439/* Find shared policy intersecting idx */
2440struct mempolicy *
2441mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2442{
2443 struct mempolicy *pol = NULL;
2444 struct sp_node *sn;
2445
2446 if (!sp->root.rb_node)
2447 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002448 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 sn = sp_lookup(sp, idx, idx+1);
2450 if (sn) {
2451 mpol_get(sn->policy);
2452 pol = sn->policy;
2453 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002454 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 return pol;
2456}
2457
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002458static void sp_free(struct sp_node *n)
2459{
2460 mpol_put(n->policy);
2461 kmem_cache_free(sn_cache, n);
2462}
2463
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002464/**
2465 * mpol_misplaced - check whether current page node is valid in policy
2466 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002467 * @page: page to be checked
2468 * @vma: vm area where page mapped
2469 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002470 *
2471 * Lookup current policy node id for vma,addr and "compare to" page's
2472 * node id.
2473 *
2474 * Returns:
2475 * -1 - not misplaced, page is in the right node
2476 * node - node id where the page should be
2477 *
2478 * Policy determination "mimics" alloc_page_vma().
2479 * Called from fault path where we know the vma and faulting address.
2480 */
2481int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2482{
2483 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002484 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002485 int curnid = page_to_nid(page);
2486 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002487 int thiscpu = raw_smp_processor_id();
2488 int thisnid = cpu_to_node(thiscpu);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08002489 int polnid = NUMA_NO_NODE;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002490 int ret = -1;
2491
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002492 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002493 if (!(pol->flags & MPOL_F_MOF))
2494 goto out;
2495
2496 switch (pol->mode) {
2497 case MPOL_INTERLEAVE:
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002498 pgoff = vma->vm_pgoff;
2499 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07002500 polnid = offset_il_node(pol, pgoff);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002501 break;
2502
2503 case MPOL_PREFERRED:
2504 if (pol->flags & MPOL_F_LOCAL)
2505 polnid = numa_node_id();
2506 else
2507 polnid = pol->v.preferred_node;
2508 break;
2509
2510 case MPOL_BIND:
Mel Gormanc33d6c02016-05-19 17:14:10 -07002511
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002512 /*
2513 * allows binding to multiple nodes.
2514 * use current page if in policy nodemask,
2515 * else select nearest allowed node, if any.
2516 * If no allowed nodes, use current [!misplaced].
2517 */
2518 if (node_isset(curnid, pol->v.nodes))
2519 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002520 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002521 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2522 gfp_zone(GFP_HIGHUSER),
Mel Gormanc33d6c02016-05-19 17:14:10 -07002523 &pol->v.nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07002524 polnid = zone_to_nid(z->zone);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002525 break;
2526
2527 default:
2528 BUG();
2529 }
Mel Gorman5606e382012-11-02 18:19:13 +00002530
2531 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002532 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002533 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002534
Rik van Riel10f39042014-01-27 17:03:44 -05002535 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002536 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002537 }
2538
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002539 if (curnid != polnid)
2540 ret = polnid;
2541out:
2542 mpol_cond_put(pol);
2543
2544 return ret;
2545}
2546
David Rientjesc11600e2016-09-01 16:15:07 -07002547/*
2548 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2549 * dropped after task->mempolicy is set to NULL so that any allocation done as
2550 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2551 * policy.
2552 */
2553void mpol_put_task_policy(struct task_struct *task)
2554{
2555 struct mempolicy *pol;
2556
2557 task_lock(task);
2558 pol = task->mempolicy;
2559 task->mempolicy = NULL;
2560 task_unlock(task);
2561 mpol_put(pol);
2562}
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2565{
Paul Mundt140d5a42007-07-15 23:38:16 -07002566 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002568 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569}
2570
Mel Gorman42288fe2012-12-21 23:10:25 +00002571static void sp_node_init(struct sp_node *node, unsigned long start,
2572 unsigned long end, struct mempolicy *pol)
2573{
2574 node->start = start;
2575 node->end = end;
2576 node->policy = pol;
2577}
2578
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002579static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2580 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002582 struct sp_node *n;
2583 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002585 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 if (!n)
2587 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002588
2589 newpol = mpol_dup(pol);
2590 if (IS_ERR(newpol)) {
2591 kmem_cache_free(sn_cache, n);
2592 return NULL;
2593 }
2594 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002595 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 return n;
2598}
2599
2600/* Replace a policy range. */
2601static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2602 unsigned long end, struct sp_node *new)
2603{
Mel Gormanb22d1272012-10-08 16:29:17 -07002604 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002605 struct sp_node *n_new = NULL;
2606 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002607 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
Mel Gorman42288fe2012-12-21 23:10:25 +00002609restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002610 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 n = sp_lookup(sp, start, end);
2612 /* Take care of old policies in the same range. */
2613 while (n && n->start < end) {
2614 struct rb_node *next = rb_next(&n->nd);
2615 if (n->start >= start) {
2616 if (n->end <= end)
2617 sp_delete(sp, n);
2618 else
2619 n->start = end;
2620 } else {
2621 /* Old policy spanning whole new range. */
2622 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002623 if (!n_new)
2624 goto alloc_new;
2625
2626 *mpol_new = *n->policy;
2627 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002628 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002630 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002631 n_new = NULL;
2632 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 break;
2634 } else
2635 n->end = start;
2636 }
2637 if (!next)
2638 break;
2639 n = rb_entry(next, struct sp_node, nd);
2640 }
2641 if (new)
2642 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002643 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002644 ret = 0;
2645
2646err_out:
2647 if (mpol_new)
2648 mpol_put(mpol_new);
2649 if (n_new)
2650 kmem_cache_free(sn_cache, n_new);
2651
Mel Gormanb22d1272012-10-08 16:29:17 -07002652 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002653
2654alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002655 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002656 ret = -ENOMEM;
2657 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2658 if (!n_new)
2659 goto err_out;
2660 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2661 if (!mpol_new)
2662 goto err_out;
2663 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664}
2665
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002666/**
2667 * mpol_shared_policy_init - initialize shared policy for inode
2668 * @sp: pointer to inode shared policy
2669 * @mpol: struct mempolicy to install
2670 *
2671 * Install non-NULL @mpol in inode's shared policy rb-tree.
2672 * On entry, the current task has a reference on a non-NULL @mpol.
2673 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002674 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002675 */
2676void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002677{
Miao Xie58568d22009-06-16 15:31:49 -07002678 int ret;
2679
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002680 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002681 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002682
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002683 if (mpol) {
2684 struct vm_area_struct pvma;
2685 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002686 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002687
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002688 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002689 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002690 /* contextualize the tmpfs mount point mempolicy */
2691 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002692 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002693 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002694
2695 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002696 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002697 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002698 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002699 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002700
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002701 /* Create pseudo-vma that contains just the policy */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -07002702 vma_init(&pvma, NULL);
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002703 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2704 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002705
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002706put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002707 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002708free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002709 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002710put_mpol:
2711 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002712 }
2713}
2714
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715int mpol_set_shared_policy(struct shared_policy *info,
2716 struct vm_area_struct *vma, struct mempolicy *npol)
2717{
2718 int err;
2719 struct sp_node *new = NULL;
2720 unsigned long sz = vma_pages(vma);
2721
David Rientjes028fec42008-04-28 02:12:25 -07002722 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002724 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002725 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002726 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
2728 if (npol) {
2729 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2730 if (!new)
2731 return -ENOMEM;
2732 }
2733 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2734 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002735 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 return err;
2737}
2738
2739/* Free a backing policy store on inode delete. */
2740void mpol_free_shared_policy(struct shared_policy *p)
2741{
2742 struct sp_node *n;
2743 struct rb_node *next;
2744
2745 if (!p->root.rb_node)
2746 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002747 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 next = rb_first(&p->root);
2749 while (next) {
2750 n = rb_entry(next, struct sp_node, nd);
2751 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002752 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002754 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755}
2756
Mel Gorman1a687c22012-11-22 11:16:36 +00002757#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002758static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002759
2760static void __init check_numabalancing_enable(void)
2761{
2762 bool numabalancing_default = false;
2763
2764 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2765 numabalancing_default = true;
2766
Mel Gormanc2976632014-01-29 14:05:42 -08002767 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2768 if (numabalancing_override)
2769 set_numabalancing_state(numabalancing_override == 1);
2770
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002771 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a0252016-03-17 14:19:47 -07002772 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002773 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002774 set_numabalancing_state(numabalancing_default);
2775 }
2776}
2777
2778static int __init setup_numabalancing(char *str)
2779{
2780 int ret = 0;
2781 if (!str)
2782 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002783
2784 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002785 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002786 ret = 1;
2787 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002788 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002789 ret = 1;
2790 }
2791out:
2792 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002793 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002794
2795 return ret;
2796}
2797__setup("numa_balancing=", setup_numabalancing);
2798#else
2799static inline void __init check_numabalancing_enable(void)
2800{
2801}
2802#endif /* CONFIG_NUMA_BALANCING */
2803
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804/* assumes fs == KERNEL_DS */
2805void __init numa_policy_init(void)
2806{
Paul Mundtb71636e2007-07-15 23:38:15 -07002807 nodemask_t interleave_nodes;
2808 unsigned long largest = 0;
2809 int nid, prefer = 0;
2810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 policy_cache = kmem_cache_create("numa_policy",
2812 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002813 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814
2815 sn_cache = kmem_cache_create("shared_policy_node",
2816 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002817 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
Mel Gorman5606e382012-11-02 18:19:13 +00002819 for_each_node(nid) {
2820 preferred_node_policy[nid] = (struct mempolicy) {
2821 .refcnt = ATOMIC_INIT(1),
2822 .mode = MPOL_PREFERRED,
2823 .flags = MPOL_F_MOF | MPOL_F_MORON,
2824 .v = { .preferred_node = nid, },
2825 };
2826 }
2827
Paul Mundtb71636e2007-07-15 23:38:15 -07002828 /*
2829 * Set interleaving policy for system init. Interleaving is only
2830 * enabled across suitably sized nodes (default is >= 16MB), or
2831 * fall back to the largest node if they're all smaller.
2832 */
2833 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002834 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002835 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836
Paul Mundtb71636e2007-07-15 23:38:15 -07002837 /* Preserve the largest node */
2838 if (largest < total_pages) {
2839 largest = total_pages;
2840 prefer = nid;
2841 }
2842
2843 /* Interleave this node? */
2844 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2845 node_set(nid, interleave_nodes);
2846 }
2847
2848 /* All too small, use the largest */
2849 if (unlikely(nodes_empty(interleave_nodes)))
2850 node_set(prefer, interleave_nodes);
2851
David Rientjes028fec42008-04-28 02:12:25 -07002852 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002853 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002854
2855 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856}
2857
Christoph Lameter8bccd852005-10-29 18:16:59 -07002858/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859void numa_default_policy(void)
2860{
David Rientjes028fec42008-04-28 02:12:25 -07002861 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862}
Paul Jackson68860ec2005-10-30 15:02:36 -08002863
Paul Jackson42253992006-01-08 01:01:59 -08002864/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002865 * Parse and format mempolicy from/to strings
2866 */
2867
2868/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002869 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002870 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002871static const char * const policy_modes[] =
2872{
2873 [MPOL_DEFAULT] = "default",
2874 [MPOL_PREFERRED] = "prefer",
2875 [MPOL_BIND] = "bind",
2876 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002877 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002878};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002879
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002880
2881#ifdef CONFIG_TMPFS
2882/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002883 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002884 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002885 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002886 *
2887 * Format of input:
2888 * <mode>[=<flags>][:<nodelist>]
2889 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002890 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002891 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002892int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002893{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002894 struct mempolicy *new = NULL;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002895 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002896 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002897 char *nodelist = strchr(str, ':');
2898 char *flags = strchr(str, '=');
zhong jiangdedf2c72018-10-26 15:06:57 -07002899 int err = 1, mode;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002900
Dan Carpenterc7a91bc2020-01-30 22:11:07 -08002901 if (flags)
2902 *flags++ = '\0'; /* terminate mode string */
2903
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002904 if (nodelist) {
2905 /* NUL-terminate mode or flags string */
2906 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002907 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002908 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002909 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002910 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002911 } else
2912 nodes_clear(nodes);
2913
zhong jiangdedf2c72018-10-26 15:06:57 -07002914 mode = match_string(policy_modes, MPOL_MAX, str);
2915 if (mode < 0)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002916 goto out;
2917
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002918 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002919 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002920 /*
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07002921 * Insist on a nodelist of one node only, although later
2922 * we use first_node(nodes) to grab a single node, so here
2923 * nodelist (or nodes) cannot be empty.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002924 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002925 if (nodelist) {
2926 char *rest = nodelist;
2927 while (isdigit(*rest))
2928 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002929 if (*rest)
2930 goto out;
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07002931 if (nodes_empty(nodes))
2932 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002933 }
2934 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002935 case MPOL_INTERLEAVE:
2936 /*
2937 * Default to online nodes with memory if no nodelist
2938 */
2939 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002940 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002941 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002942 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002943 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002944 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002945 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002946 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002947 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002948 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002949 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002950 case MPOL_DEFAULT:
2951 /*
2952 * Insist on a empty nodelist
2953 */
2954 if (!nodelist)
2955 err = 0;
2956 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002957 case MPOL_BIND:
2958 /*
2959 * Insist on a nodelist
2960 */
2961 if (!nodelist)
2962 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002963 }
2964
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002965 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002966 if (flags) {
2967 /*
2968 * Currently, we only support two mutually exclusive
2969 * mode flags.
2970 */
2971 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002972 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002973 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002974 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002975 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002976 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002977 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002978
2979 new = mpol_new(mode, mode_flags, &nodes);
2980 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002981 goto out;
2982
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002983 /*
2984 * Save nodes for mpol_to_str() to show the tmpfs mount options
2985 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2986 */
2987 if (mode != MPOL_PREFERRED)
2988 new->v.nodes = nodes;
2989 else if (nodelist)
2990 new->v.preferred_node = first_node(nodes);
2991 else
2992 new->flags |= MPOL_F_LOCAL;
2993
2994 /*
2995 * Save nodes for contextualization: this will be used to "clone"
2996 * the mempolicy in a specific context [cpuset] at a later time.
2997 */
2998 new->w.user_nodemask = nodes;
2999
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07003000 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003001
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003002out:
3003 /* Restore string for error message */
3004 if (nodelist)
3005 *--nodelist = ':';
3006 if (flags)
3007 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003008 if (!err)
3009 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003010 return err;
3011}
3012#endif /* CONFIG_TMPFS */
3013
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003014/**
3015 * mpol_to_str - format a mempolicy structure for printing
3016 * @buffer: to contain formatted mempolicy string
3017 * @maxlen: length of @buffer
3018 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003019 *
David Rientjes948927e2013-11-12 15:07:28 -08003020 * Convert @pol into a string. If @buffer is too short, truncate the string.
3021 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3022 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003023 */
David Rientjes948927e2013-11-12 15:07:28 -08003024void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003025{
3026 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08003027 nodemask_t nodes = NODE_MASK_NONE;
3028 unsigned short mode = MPOL_DEFAULT;
3029 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003030
David Rientjes8790c71a2014-01-30 15:46:08 -08003031 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003032 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08003033 flags = pol->flags;
3034 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003035
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003036 switch (mode) {
3037 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003038 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003039 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003040 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08003041 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07003042 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003043 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003044 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003045 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003046 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08003047 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003048 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003049 default:
David Rientjes948927e2013-11-12 15:07:28 -08003050 WARN_ON_ONCE(1);
3051 snprintf(p, maxlen, "unknown");
3052 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003053 }
3054
David Rientjesb7a9f422013-11-21 14:32:06 -08003055 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003056
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003057 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08003058 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07003059
Lee Schermerhorn22919902008-04-28 02:13:22 -07003060 /*
3061 * Currently, the only defined flags are mutually exclusive
3062 */
David Rientjesf5b087b2008-04-28 02:12:27 -07003063 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07003064 p += snprintf(p, buffer + maxlen - p, "static");
3065 else if (flags & MPOL_F_RELATIVE_NODES)
3066 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07003067 }
3068
Tejun Heo9e763e02015-02-13 14:38:02 -08003069 if (!nodes_empty(nodes))
3070 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3071 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003072}