blob: 7d8e56214ac099ad7c0a1eb8201d344a008428f1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010076#include <linux/sched/mm.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010077#include <linux/sched/numa_balancing.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010078#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/nodemask.h>
80#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <linux/slab.h>
82#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040083#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070084#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080088#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080089#include <linux/seq_file.h>
90#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080091#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080092#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070093#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070094#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070095#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070096#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080097#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020098#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070099#include <linux/printk.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800102#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Nick Piggin62695a82008-10-18 20:26:09 -0700104#include "internal.h"
105
Christoph Lameter38e35862006-01-08 01:01:01 -0800106/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800107#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800108#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800109
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800110static struct kmem_cache *policy_cache;
111static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/* Highest zone. An specific allocation for a zone below that is not
114 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800115enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700117/*
118 * run-time system-wide default policy => local allocation
119 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700120static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700122 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700123 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124};
125
Mel Gorman5606e382012-11-02 18:19:13 +0000126static struct mempolicy preferred_node_policy[MAX_NUMNODES];
127
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700128struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000129{
130 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700131 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000132
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700133 if (pol)
134 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000135
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700136 node = numa_node_id();
137 if (node != NUMA_NO_NODE) {
138 pol = &preferred_node_policy[node];
139 /* preferred_node_policy is not initialised early in boot */
140 if (pol->mode)
141 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000142 }
143
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700144 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000145}
146
David Rientjes37012942008-04-28 02:12:33 -0700147static const struct mempolicy_operations {
148 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700149 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
David Rientjes37012942008-04-28 02:12:33 -0700150} mpol_ops[MPOL_MAX];
151
David Rientjesf5b087b2008-04-28 02:12:27 -0700152static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
153{
Bob Liu6d556292010-05-24 14:31:59 -0700154 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700155}
156
157static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
158 const nodemask_t *rel)
159{
160 nodemask_t tmp;
161 nodes_fold(tmp, *orig, nodes_weight(*rel));
162 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700163}
164
David Rientjes37012942008-04-28 02:12:33 -0700165static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
166{
167 if (nodes_empty(*nodes))
168 return -EINVAL;
169 pol->v.nodes = *nodes;
170 return 0;
171}
172
173static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
174{
175 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700176 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700177 else if (nodes_empty(*nodes))
178 return -EINVAL; /* no allowed nodes */
179 else
180 pol->v.preferred_node = first_node(*nodes);
181 return 0;
182}
183
184static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
185{
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800186 if (nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700187 return -EINVAL;
188 pol->v.nodes = *nodes;
189 return 0;
190}
191
Miao Xie58568d22009-06-16 15:31:49 -0700192/*
193 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
194 * any, for the new policy. mpol_new() has already validated the nodes
195 * parameter with respect to the policy mode and flags. But, we need to
196 * handle an empty nodemask with MPOL_PREFERRED here.
197 *
198 * Must be called holding task's alloc_lock to protect task's mems_allowed
199 * and mempolicy. May also be called holding the mmap_semaphore for write.
200 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700201static int mpol_set_nodemask(struct mempolicy *pol,
202 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700203{
Miao Xie58568d22009-06-16 15:31:49 -0700204 int ret;
205
206 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
207 if (pol == NULL)
208 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800209 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700210 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800211 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700212
213 VM_BUG_ON(!nodes);
214 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
215 nodes = NULL; /* explicit local allocation */
216 else {
217 if (pol->flags & MPOL_F_RELATIVE_NODES)
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800218 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700219 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700220 nodes_and(nsc->mask2, *nodes, nsc->mask1);
221
Miao Xie58568d22009-06-16 15:31:49 -0700222 if (mpol_store_user_nodemask(pol))
223 pol->w.user_nodemask = *nodes;
224 else
225 pol->w.cpuset_mems_allowed =
226 cpuset_current_mems_allowed;
227 }
228
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700229 if (nodes)
230 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
231 else
232 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700233 return ret;
234}
235
236/*
237 * This function just creates a new policy, does some check and simple
238 * initialization. You must invoke mpol_set_nodemask() to set nodes.
239 */
David Rientjes028fec42008-04-28 02:12:25 -0700240static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
241 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 struct mempolicy *policy;
244
David Rientjes028fec42008-04-28 02:12:25 -0700245 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800246 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700247
David Rientjes3e1f06452008-04-28 02:12:34 -0700248 if (mode == MPOL_DEFAULT) {
249 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700250 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200251 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700252 }
David Rientjes3e1f06452008-04-28 02:12:34 -0700253 VM_BUG_ON(!nodes);
254
255 /*
256 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
257 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
258 * All other modes require a valid pointer to a non-empty nodemask.
259 */
260 if (mode == MPOL_PREFERRED) {
261 if (nodes_empty(*nodes)) {
262 if (((flags & MPOL_F_STATIC_NODES) ||
263 (flags & MPOL_F_RELATIVE_NODES)))
264 return ERR_PTR(-EINVAL);
David Rientjes3e1f06452008-04-28 02:12:34 -0700265 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200266 } else if (mode == MPOL_LOCAL) {
Piotr Kwapulinski8d303e42016-12-12 16:42:49 -0800267 if (!nodes_empty(*nodes) ||
268 (flags & MPOL_F_STATIC_NODES) ||
269 (flags & MPOL_F_RELATIVE_NODES))
Peter Zijlstra479e2802012-10-25 14:16:28 +0200270 return ERR_PTR(-EINVAL);
271 mode = MPOL_PREFERRED;
David Rientjes3e1f06452008-04-28 02:12:34 -0700272 } else if (nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 if (!policy)
276 return ERR_PTR(-ENOMEM);
277 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700278 policy->mode = mode;
David Rientjes3e1f06452008-04-28 02:12:34 -0700279 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700280
David Rientjes37012942008-04-28 02:12:33 -0700281 return policy;
282}
283
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700284/* Slow path of a mpol destructor. */
285void __mpol_put(struct mempolicy *p)
286{
287 if (!atomic_dec_and_test(&p->refcnt))
288 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700289 kmem_cache_free(policy_cache, p);
290}
291
Vlastimil Babka213980c2017-07-06 15:40:06 -0700292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700293{
294}
295
Vlastimil Babka213980c2017-07-06 15:40:06 -0700296static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700297{
298 nodemask_t tmp;
299
300 if (pol->flags & MPOL_F_STATIC_NODES)
301 nodes_and(tmp, pol->w.user_nodemask, *nodes);
302 else if (pol->flags & MPOL_F_RELATIVE_NODES)
303 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
304 else {
Vlastimil Babka213980c2017-07-06 15:40:06 -0700305 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
306 *nodes);
307 pol->w.cpuset_mems_allowed = tmp;
David Rientjes37012942008-04-28 02:12:33 -0700308 }
309
Miao Xie708c1bb2010-05-24 14:32:07 -0700310 if (nodes_empty(tmp))
311 tmp = *nodes;
312
Vlastimil Babka213980c2017-07-06 15:40:06 -0700313 pol->v.nodes = tmp;
David Rientjes37012942008-04-28 02:12:33 -0700314}
315
316static void mpol_rebind_preferred(struct mempolicy *pol,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700317 const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700318{
319 nodemask_t tmp;
320
David Rientjes37012942008-04-28 02:12:33 -0700321 if (pol->flags & MPOL_F_STATIC_NODES) {
322 int node = first_node(pol->w.user_nodemask);
323
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700324 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700325 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700326 pol->flags &= ~MPOL_F_LOCAL;
327 } else
328 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700329 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
330 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
331 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700332 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700333 pol->v.preferred_node = node_remap(pol->v.preferred_node,
334 pol->w.cpuset_mems_allowed,
335 *nodes);
336 pol->w.cpuset_mems_allowed = *nodes;
337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Miao Xie708c1bb2010-05-24 14:32:07 -0700340/*
341 * mpol_rebind_policy - Migrate a policy to a different set of nodes
342 *
Vlastimil Babka213980c2017-07-06 15:40:06 -0700343 * Per-vma policies are protected by mmap_sem. Allocations using per-task
344 * policies are protected by task->mems_allowed_seq to prevent a premature
345 * OOM/allocation failure due to parallel nodemask modification.
Miao Xie708c1bb2010-05-24 14:32:07 -0700346 */
Vlastimil Babka213980c2017-07-06 15:40:06 -0700347static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
David Rientjes1d0d2682008-04-28 02:12:32 -0700348{
David Rientjes1d0d2682008-04-28 02:12:32 -0700349 if (!pol)
350 return;
Vlastimil Babka213980c2017-07-06 15:40:06 -0700351 if (!mpol_store_user_nodemask(pol) &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700352 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
353 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700354
Vlastimil Babka213980c2017-07-06 15:40:06 -0700355 mpol_ops[pol->mode].rebind(pol, newmask);
David Rientjes1d0d2682008-04-28 02:12:32 -0700356}
357
358/*
359 * Wrapper for mpol_rebind_policy() that just requires task
360 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700361 *
362 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700363 */
364
Vlastimil Babka213980c2017-07-06 15:40:06 -0700365void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
David Rientjes1d0d2682008-04-28 02:12:32 -0700366{
Vlastimil Babka213980c2017-07-06 15:40:06 -0700367 mpol_rebind_policy(tsk->mempolicy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700368}
369
370/*
371 * Rebind each vma in mm to new nodemask.
372 *
373 * Call holding a reference to mm. Takes mm->mmap_sem during call.
374 */
375
376void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
377{
378 struct vm_area_struct *vma;
379
380 down_write(&mm->mmap_sem);
381 for (vma = mm->mmap; vma; vma = vma->vm_next)
Vlastimil Babka213980c2017-07-06 15:40:06 -0700382 mpol_rebind_policy(vma->vm_policy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700383 up_write(&mm->mmap_sem);
384}
385
David Rientjes37012942008-04-28 02:12:33 -0700386static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
387 [MPOL_DEFAULT] = {
388 .rebind = mpol_rebind_default,
389 },
390 [MPOL_INTERLEAVE] = {
391 .create = mpol_new_interleave,
392 .rebind = mpol_rebind_nodemask,
393 },
394 [MPOL_PREFERRED] = {
395 .create = mpol_new_preferred,
396 .rebind = mpol_rebind_preferred,
397 },
398 [MPOL_BIND] = {
399 .create = mpol_new_bind,
400 .rebind = mpol_rebind_nodemask,
401 },
402};
403
Christoph Lameterfc301282006-01-18 17:42:29 -0800404static void migrate_page_add(struct page *page, struct list_head *pagelist,
405 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800406
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800407struct queue_pages {
408 struct list_head *pagelist;
409 unsigned long flags;
410 nodemask_t *nmask;
411 struct vm_area_struct *prev;
412};
413
Naoya Horiguchi98094942013-09-11 14:22:14 -0700414/*
415 * Scan through pages checking if pages follow certain conditions,
416 * and move them to the pagelist if they do.
417 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800418static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
419 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800421 struct vm_area_struct *vma = walk->vma;
422 struct page *page;
423 struct queue_pages *qp = walk->private;
424 unsigned long flags = qp->flags;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800425 int nid, ret;
Hugh Dickins91612e02005-06-21 17:15:07 -0700426 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700427 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700428
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800429 if (pmd_trans_huge(*pmd)) {
430 ptl = pmd_lock(walk->mm, pmd);
431 if (pmd_trans_huge(*pmd)) {
432 page = pmd_page(*pmd);
433 if (is_huge_zero_page(page)) {
434 spin_unlock(ptl);
David Rientjesfd607752016-12-12 16:42:20 -0800435 __split_huge_pmd(vma, pmd, addr, false, NULL);
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800436 } else {
437 get_page(page);
438 spin_unlock(ptl);
439 lock_page(page);
440 ret = split_huge_page(page);
441 unlock_page(page);
442 put_page(page);
443 if (ret)
444 return 0;
445 }
446 } else {
447 spin_unlock(ptl);
448 }
449 }
Hugh Dickins91612e02005-06-21 17:15:07 -0700450
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700451 if (pmd_trans_unstable(pmd))
452 return 0;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800453retry:
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800454 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
455 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700456 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800458 page = vm_normal_page(vma, addr, *pte);
459 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800461 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800462 * vm_normal_page() filters out zero pages, but there might
463 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800464 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800465 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800466 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800467 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800468 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Christoph Lameter38e35862006-01-08 01:01:01 -0800469 continue;
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700470 if (PageTransCompound(page)) {
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800471 get_page(page);
472 pte_unmap_unlock(pte, ptl);
473 lock_page(page);
474 ret = split_huge_page(page);
475 unlock_page(page);
476 put_page(page);
477 /* Failed to split -- skip. */
478 if (ret) {
479 pte = pte_offset_map_lock(walk->mm, pmd,
480 addr, &ptl);
481 continue;
482 }
483 goto retry;
484 }
Christoph Lameter38e35862006-01-08 01:01:01 -0800485
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800486 migrate_page_add(page, qp->pagelist, flags);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800487 }
488 pte_unmap_unlock(pte - 1, ptl);
489 cond_resched();
490 return 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700491}
492
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800493static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
494 unsigned long addr, unsigned long end,
495 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700496{
497#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800498 struct queue_pages *qp = walk->private;
499 unsigned long flags = qp->flags;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700500 int nid;
501 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800502 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400503 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700504
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800505 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
506 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400507 if (!pte_present(entry))
508 goto unlock;
509 page = pte_page(entry);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700510 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800511 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700512 goto unlock;
513 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
514 if (flags & (MPOL_MF_MOVE_ALL) ||
515 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800516 isolate_huge_page(page, qp->pagelist);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700517unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800518 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700519#else
520 BUG();
521#endif
Hugh Dickins91612e02005-06-21 17:15:07 -0700522 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523}
524
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530525#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200526/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200527 * This is used to mark a range of virtual addresses to be inaccessible.
528 * These are later cleared by a NUMA hinting fault. Depending on these
529 * faults, pages may be migrated for better NUMA placement.
530 *
531 * This is assuming that NUMA faults are handled using PROT_NONE. If
532 * an architecture makes a different choice, it will need further
533 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200534 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200535unsigned long change_prot_numa(struct vm_area_struct *vma,
536 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200537{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200538 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200539
Mel Gorman4d942462015-02-12 14:58:28 -0800540 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000541 if (nr_updated)
542 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200543
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200544 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200545}
546#else
547static unsigned long change_prot_numa(struct vm_area_struct *vma,
548 unsigned long addr, unsigned long end)
549{
550 return 0;
551}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530552#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200553
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800554static int queue_pages_test_walk(unsigned long start, unsigned long end,
555 struct mm_walk *walk)
556{
557 struct vm_area_struct *vma = walk->vma;
558 struct queue_pages *qp = walk->private;
559 unsigned long endvma = vma->vm_end;
560 unsigned long flags = qp->flags;
561
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800562 if (!vma_migratable(vma))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800563 return 1;
564
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800565 if (endvma > end)
566 endvma = end;
567 if (vma->vm_start > start)
568 start = vma->vm_start;
569
570 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
571 if (!vma->vm_next && vma->vm_end < end)
572 return -EFAULT;
573 if (qp->prev && qp->prev->vm_end < vma->vm_start)
574 return -EFAULT;
575 }
576
577 qp->prev = vma;
578
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800579 if (flags & MPOL_MF_LAZY) {
580 /* Similar to task_numa_work, skip inaccessible VMAs */
Liang Chen4355c012016-03-15 14:56:42 -0700581 if (!is_vm_hugetlb_page(vma) &&
582 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
583 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800584 change_prot_numa(vma, start, endvma);
585 return 1;
586 }
587
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800588 /* queue pages from current vma */
589 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800590 return 0;
591 return 1;
592}
593
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800594/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700595 * Walk through page tables and collect pages to be migrated.
596 *
597 * If pages found in a given range are on a set of nodes (determined by
598 * @nodes and @flags,) it's isolated and queued to the pagelist which is
599 * passed via @private.)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800600 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700601static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700602queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800603 nodemask_t *nodes, unsigned long flags,
604 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800606 struct queue_pages qp = {
607 .pagelist = pagelist,
608 .flags = flags,
609 .nmask = nodes,
610 .prev = NULL,
611 };
612 struct mm_walk queue_pages_walk = {
613 .hugetlb_entry = queue_pages_hugetlb,
614 .pmd_entry = queue_pages_pte_range,
615 .test_walk = queue_pages_test_walk,
616 .mm = mm,
617 .private = &qp,
618 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800620 return walk_page_range(start, end, &queue_pages_walk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}
622
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700623/*
624 * Apply policy to a single VMA
625 * This must be called with the mmap_sem held for writing.
626 */
627static int vma_replace_policy(struct vm_area_struct *vma,
628 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700629{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700630 int err;
631 struct mempolicy *old;
632 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700633
634 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
635 vma->vm_start, vma->vm_end, vma->vm_pgoff,
636 vma->vm_ops, vma->vm_file,
637 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
638
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700639 new = mpol_dup(pol);
640 if (IS_ERR(new))
641 return PTR_ERR(new);
642
643 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700644 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700645 if (err)
646 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700647 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700648
649 old = vma->vm_policy;
650 vma->vm_policy = new; /* protected by mmap_sem */
651 mpol_put(old);
652
653 return 0;
654 err_out:
655 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700656 return err;
657}
658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800660static int mbind_range(struct mm_struct *mm, unsigned long start,
661 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662{
663 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800664 struct vm_area_struct *prev;
665 struct vm_area_struct *vma;
666 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800667 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800668 unsigned long vmstart;
669 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Linus Torvalds097d5912012-03-06 18:23:36 -0800671 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800672 if (!vma || vma->vm_start > start)
673 return -EFAULT;
674
Linus Torvalds097d5912012-03-06 18:23:36 -0800675 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800676 if (start > vma->vm_start)
677 prev = vma;
678
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800679 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800681 vmstart = max(start, vma->vm_start);
682 vmend = min(end, vma->vm_end);
683
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800684 if (mpol_equal(vma_policy(vma), new_pol))
685 continue;
686
687 pgoff = vma->vm_pgoff +
688 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800689 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700690 vma->anon_vma, vma->vm_file, pgoff,
691 new_pol, vma->vm_userfaultfd_ctx);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800692 if (prev) {
693 vma = prev;
694 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700695 if (mpol_equal(vma_policy(vma), new_pol))
696 continue;
697 /* vma_merge() joined vma && vma->next, case 8 */
698 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800699 }
700 if (vma->vm_start != vmstart) {
701 err = split_vma(vma->vm_mm, vma, vmstart, 1);
702 if (err)
703 goto out;
704 }
705 if (vma->vm_end != vmend) {
706 err = split_vma(vma->vm_mm, vma, vmend, 0);
707 if (err)
708 goto out;
709 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700710 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700711 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700712 if (err)
713 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800715
716 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 return err;
718}
719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700721static long do_set_mempolicy(unsigned short mode, unsigned short flags,
722 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723{
Miao Xie58568d22009-06-16 15:31:49 -0700724 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700725 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700726 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700728 if (!scratch)
729 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700730
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700731 new = mpol_new(mode, flags, nodes);
732 if (IS_ERR(new)) {
733 ret = PTR_ERR(new);
734 goto out;
735 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700736
Miao Xie58568d22009-06-16 15:31:49 -0700737 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700738 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700739 if (ret) {
740 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700741 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700742 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700743 }
744 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 current->mempolicy = new;
Vlastimil Babka45816682017-07-06 15:39:59 -0700746 if (new && new->mode == MPOL_INTERLEAVE)
747 current->il_prev = MAX_NUMNODES-1;
Miao Xie58568d22009-06-16 15:31:49 -0700748 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700749 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700750 ret = 0;
751out:
752 NODEMASK_SCRATCH_FREE(scratch);
753 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
755
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700756/*
757 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700758 *
759 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700760 */
761static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700763 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700764 if (p == &default_policy)
765 return;
766
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700767 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700768 case MPOL_BIND:
769 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700771 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 break;
773 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700774 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700775 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700776 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 break;
778 default:
779 BUG();
780 }
781}
782
Dave Hansend4edcf02016-02-12 13:01:56 -0800783static int lookup_node(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
785 struct page *p;
786 int err;
787
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100788 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (err >= 0) {
790 err = page_to_nid(p);
791 put_page(p);
792 }
793 return err;
794}
795
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700797static long do_get_mempolicy(int *policy, nodemask_t *nmask,
798 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700800 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 struct mm_struct *mm = current->mm;
802 struct vm_area_struct *vma = NULL;
803 struct mempolicy *pol = current->mempolicy;
804
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700805 if (flags &
806 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700808
809 if (flags & MPOL_F_MEMS_ALLOWED) {
810 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
811 return -EINVAL;
812 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700813 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700814 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700815 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700816 return 0;
817 }
818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700820 /*
821 * Do NOT fall back to task policy if the
822 * vma/shared policy at addr is NULL. We
823 * want to return MPOL_DEFAULT in this case.
824 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 down_read(&mm->mmap_sem);
826 vma = find_vma_intersection(mm, addr, addr+1);
827 if (!vma) {
828 up_read(&mm->mmap_sem);
829 return -EFAULT;
830 }
831 if (vma->vm_ops && vma->vm_ops->get_policy)
832 pol = vma->vm_ops->get_policy(vma, addr);
833 else
834 pol = vma->vm_policy;
835 } else if (addr)
836 return -EINVAL;
837
838 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700839 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
841 if (flags & MPOL_F_NODE) {
842 if (flags & MPOL_F_ADDR) {
Dave Hansend4edcf02016-02-12 13:01:56 -0800843 err = lookup_node(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 if (err < 0)
845 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700846 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700848 pol->mode == MPOL_INTERLEAVE) {
Vlastimil Babka45816682017-07-06 15:39:59 -0700849 *policy = next_node_in(current->il_prev, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 } else {
851 err = -EINVAL;
852 goto out;
853 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700854 } else {
855 *policy = pol == &default_policy ? MPOL_DEFAULT :
856 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700857 /*
858 * Internal mempolicy flags must be masked off before exposing
859 * the policy to userspace.
860 */
861 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 if (vma) {
865 up_read(&current->mm->mmap_sem);
866 vma = NULL;
867 }
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700870 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700871 if (mpol_store_user_nodemask(pol)) {
872 *nmask = pol->w.user_nodemask;
873 } else {
874 task_lock(current);
875 get_policy_nodemask(pol, nmask);
876 task_unlock(current);
877 }
Miao Xie58568d22009-06-16 15:31:49 -0700878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
880 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700881 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (vma)
883 up_read(&current->mm->mmap_sem);
884 return err;
885}
886
Christoph Lameterb20a3502006-03-22 00:09:12 -0800887#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700888/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800889 * page migration
890 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800891static void migrate_page_add(struct page *page, struct list_head *pagelist,
892 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800893{
894 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800895 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800896 */
Nick Piggin62695a82008-10-18 20:26:09 -0700897 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
898 if (!isolate_lru_page(page)) {
899 list_add_tail(&page->lru, pagelist);
Mel Gorman599d0c92016-07-28 15:45:31 -0700900 inc_node_page_state(page, NR_ISOLATED_ANON +
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800901 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -0700902 }
903 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800904}
905
Christoph Lameter742755a2006-06-23 02:03:55 -0700906static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700907{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700908 if (PageHuge(page))
909 return alloc_huge_page_node(page_hstate(compound_head(page)),
910 node);
911 else
Vlastimil Babka96db8002015-09-08 15:03:50 -0700912 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
David Rientjesb360edb2015-04-14 15:46:52 -0700913 __GFP_THISNODE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700914}
915
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800916/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800917 * Migrate pages from one node to a target node.
918 * Returns error or the number of pages not migrated.
919 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700920static int migrate_to_node(struct mm_struct *mm, int source, int dest,
921 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800922{
923 nodemask_t nmask;
924 LIST_HEAD(pagelist);
925 int err = 0;
926
927 nodes_clear(nmask);
928 node_set(source, nmask);
929
Minchan Kim08270802012-10-08 16:33:38 -0700930 /*
931 * This does not "check" the range but isolates all pages that
932 * need migration. Between passing in the full user address
933 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
934 */
935 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -0700936 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800937 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
938
Minchan Kimcf608ac2010-10-26 14:21:29 -0700939 if (!list_empty(&pagelist)) {
David Rientjes68711a72014-06-04 16:08:25 -0700940 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -0800941 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -0700942 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700943 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -0700944 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700945
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800946 return err;
947}
948
949/*
950 * Move pages between the two nodesets so as to preserve the physical
951 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -0800952 *
953 * Returns the number of page that could not be moved.
954 */
Andrew Morton0ce72d42012-05-29 15:06:24 -0700955int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
956 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -0800957{
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800958 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -0800959 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800960 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -0800961
Christoph Lameter0aedadf2008-11-06 12:53:30 -0800962 err = migrate_prep();
963 if (err)
964 return err;
965
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700966 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -0800967
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -0800968 /*
969 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
970 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
971 * bit in 'tmp', and return that <source, dest> pair for migration.
972 * The pair of nodemasks 'to' and 'from' define the map.
973 *
974 * If no pair of bits is found that way, fallback to picking some
975 * pair of 'source' and 'dest' bits that are not the same. If the
976 * 'source' and 'dest' bits are the same, this represents a node
977 * that will be migrating to itself, so no pages need move.
978 *
979 * If no bits are left in 'tmp', or if all remaining bits left
980 * in 'tmp' correspond to the same bit in 'to', return false
981 * (nothing left to migrate).
982 *
983 * This lets us pick a pair of nodes to migrate between, such that
984 * if possible the dest node is not already occupied by some other
985 * source node, minimizing the risk of overloading the memory on a
986 * node that would happen if we migrated incoming memory to a node
987 * before migrating outgoing memory source that same node.
988 *
989 * A single scan of tmp is sufficient. As we go, we remember the
990 * most recent <s, d> pair that moved (s != d). If we find a pair
991 * that not only moved, but what's better, moved to an empty slot
992 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +0100993 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -0800994 * most recent <s, d> pair that moved. If we get all the way through
995 * the scan of tmp without finding any node that moved, much less
996 * moved to an empty node, then there is nothing left worth migrating.
997 */
Christoph Lameterd4984712006-01-08 01:00:55 -0800998
Andrew Morton0ce72d42012-05-29 15:06:24 -0700999 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001000 while (!nodes_empty(tmp)) {
1001 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001002 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001003 int dest = 0;
1004
1005 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001006
1007 /*
1008 * do_migrate_pages() tries to maintain the relative
1009 * node relationship of the pages established between
1010 * threads and memory areas.
1011 *
1012 * However if the number of source nodes is not equal to
1013 * the number of destination nodes we can not preserve
1014 * this node relative relationship. In that case, skip
1015 * copying memory from a node that is in the destination
1016 * mask.
1017 *
1018 * Example: [2,3,4] -> [3,4,5] moves everything.
1019 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1020 */
1021
Andrew Morton0ce72d42012-05-29 15:06:24 -07001022 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1023 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001024 continue;
1025
Andrew Morton0ce72d42012-05-29 15:06:24 -07001026 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001027 if (s == d)
1028 continue;
1029
1030 source = s; /* Node moved. Memorize */
1031 dest = d;
1032
1033 /* dest not in remaining from nodes? */
1034 if (!node_isset(dest, tmp))
1035 break;
1036 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001037 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001038 break;
1039
1040 node_clear(source, tmp);
1041 err = migrate_to_node(mm, source, dest, flags);
1042 if (err > 0)
1043 busy += err;
1044 if (err < 0)
1045 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001046 }
1047 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001048 if (err < 0)
1049 return err;
1050 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001051
Christoph Lameter39743882006-01-08 01:00:51 -08001052}
1053
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001054/*
1055 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001056 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001057 * Search forward from there, if not. N.B., this assumes that the
1058 * list of pages handed to migrate_pages()--which is how we get here--
1059 * is in virtual address order.
1060 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001061static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001062{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001063 struct vm_area_struct *vma;
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001064 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001065
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001066 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001067 while (vma) {
1068 address = page_address_in_vma(page, vma);
1069 if (address != -EFAULT)
1070 break;
1071 vma = vma->vm_next;
1072 }
1073
Wanpeng Li11c731e2013-12-18 17:08:56 -08001074 if (PageHuge(page)) {
Michal Hockocc817172014-01-23 15:53:15 -08001075 BUG_ON(!vma);
1076 return alloc_huge_page_noerr(vma, address, 1);
Wanpeng Li11c731e2013-12-18 17:08:56 -08001077 }
1078 /*
1079 * if !vma, alloc_page_vma() will use task or system default policy
1080 */
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001081 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001082}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001083#else
1084
1085static void migrate_page_add(struct page *page, struct list_head *pagelist,
1086 unsigned long flags)
1087{
1088}
1089
Andrew Morton0ce72d42012-05-29 15:06:24 -07001090int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1091 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001092{
1093 return -ENOSYS;
1094}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001095
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001096static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001097{
1098 return NULL;
1099}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001100#endif
1101
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001102static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001103 unsigned short mode, unsigned short mode_flags,
1104 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001105{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001106 struct mm_struct *mm = current->mm;
1107 struct mempolicy *new;
1108 unsigned long end;
1109 int err;
1110 LIST_HEAD(pagelist);
1111
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001112 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001113 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001114 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001115 return -EPERM;
1116
1117 if (start & ~PAGE_MASK)
1118 return -EINVAL;
1119
1120 if (mode == MPOL_DEFAULT)
1121 flags &= ~MPOL_MF_STRICT;
1122
1123 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1124 end = start + len;
1125
1126 if (end < start)
1127 return -EINVAL;
1128 if (end == start)
1129 return 0;
1130
David Rientjes028fec42008-04-28 02:12:25 -07001131 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001132 if (IS_ERR(new))
1133 return PTR_ERR(new);
1134
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001135 if (flags & MPOL_MF_LAZY)
1136 new->flags |= MPOL_F_MOF;
1137
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001138 /*
1139 * If we are using the default policy then operation
1140 * on discontinuous address spaces is okay after all
1141 */
1142 if (!new)
1143 flags |= MPOL_MF_DISCONTIG_OK;
1144
David Rientjes028fec42008-04-28 02:12:25 -07001145 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1146 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001147 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001148
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001149 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1150
1151 err = migrate_prep();
1152 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001153 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001154 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001155 {
1156 NODEMASK_SCRATCH(scratch);
1157 if (scratch) {
1158 down_write(&mm->mmap_sem);
1159 task_lock(current);
1160 err = mpol_set_nodemask(new, nmask, scratch);
1161 task_unlock(current);
1162 if (err)
1163 up_write(&mm->mmap_sem);
1164 } else
1165 err = -ENOMEM;
1166 NODEMASK_SCRATCH_FREE(scratch);
1167 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001168 if (err)
1169 goto mpol_out;
1170
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001171 err = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001172 flags | MPOL_MF_INVERT, &pagelist);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001173 if (!err)
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001174 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001175
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001176 if (!err) {
1177 int nr_failed = 0;
1178
Minchan Kimcf608ac2010-10-26 14:21:29 -07001179 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001180 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001181 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1182 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001183 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001184 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001185 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001186
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001187 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001188 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001189 } else
Joonsoo Kimb0e5fd72013-12-18 17:08:51 -08001190 putback_movable_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001191
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001192 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001193 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001194 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001195 return err;
1196}
1197
Christoph Lameter39743882006-01-08 01:00:51 -08001198/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001199 * User space interface with variable sized bitmaps for nodelists.
1200 */
1201
1202/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001203static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001204 unsigned long maxnode)
1205{
1206 unsigned long k;
1207 unsigned long nlongs;
1208 unsigned long endmask;
1209
1210 --maxnode;
1211 nodes_clear(*nodes);
1212 if (maxnode == 0 || !nmask)
1213 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001214 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001215 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001216
1217 nlongs = BITS_TO_LONGS(maxnode);
1218 if ((maxnode % BITS_PER_LONG) == 0)
1219 endmask = ~0UL;
1220 else
1221 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1222
1223 /* When the user specified more nodes than supported just check
1224 if the non supported part is all zero. */
1225 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1226 if (nlongs > PAGE_SIZE/sizeof(long))
1227 return -EINVAL;
1228 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1229 unsigned long t;
1230 if (get_user(t, nmask + k))
1231 return -EFAULT;
1232 if (k == nlongs - 1) {
1233 if (t & endmask)
1234 return -EINVAL;
1235 } else if (t)
1236 return -EINVAL;
1237 }
1238 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1239 endmask = ~0UL;
1240 }
1241
1242 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1243 return -EFAULT;
1244 nodes_addr(*nodes)[nlongs-1] &= endmask;
1245 return 0;
1246}
1247
1248/* Copy a kernel node mask to user space */
1249static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1250 nodemask_t *nodes)
1251{
1252 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1253 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1254
1255 if (copy > nbytes) {
1256 if (copy > PAGE_SIZE)
1257 return -EINVAL;
1258 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1259 return -EFAULT;
1260 copy = nbytes;
1261 }
1262 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1263}
1264
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001265SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
Rasmus Villemoesf7f28ca2014-06-04 16:07:57 -07001266 unsigned long, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001267 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001268{
1269 nodemask_t nodes;
1270 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001271 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001272
David Rientjes028fec42008-04-28 02:12:25 -07001273 mode_flags = mode & MPOL_MODE_FLAGS;
1274 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001275 if (mode >= MPOL_MAX)
1276 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001277 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1278 (mode_flags & MPOL_F_RELATIVE_NODES))
1279 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001280 err = get_nodes(&nodes, nmask, maxnode);
1281 if (err)
1282 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001283 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001284}
1285
1286/* Set the process memory policy */
Rasmus Villemoes23c89022014-06-04 16:07:58 -07001287SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001288 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001289{
1290 int err;
1291 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001292 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001293
David Rientjes028fec42008-04-28 02:12:25 -07001294 flags = mode & MPOL_MODE_FLAGS;
1295 mode &= ~MPOL_MODE_FLAGS;
1296 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001297 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001298 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1299 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001300 err = get_nodes(&nodes, nmask, maxnode);
1301 if (err)
1302 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001303 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001304}
1305
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001306SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1307 const unsigned long __user *, old_nodes,
1308 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001309{
David Howellsc69e8d92008-11-14 10:39:19 +11001310 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001311 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001312 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001313 nodemask_t task_nodes;
1314 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001315 nodemask_t *old;
1316 nodemask_t *new;
1317 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001318
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001319 if (!scratch)
1320 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001321
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001322 old = &scratch->mask1;
1323 new = &scratch->mask2;
1324
1325 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001326 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001327 goto out;
1328
1329 err = get_nodes(new, new_nodes, maxnode);
1330 if (err)
1331 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001332
1333 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001334 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001335 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001336 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001337 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001338 err = -ESRCH;
1339 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001340 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001341 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001342
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001343 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001344
1345 /*
1346 * Check if this process has the right to modify the specified
1347 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001348 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001349 * userid as the target process.
1350 */
David Howellsc69e8d92008-11-14 10:39:19 +11001351 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001352 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1353 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001354 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001355 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001356 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001357 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001358 }
David Howellsc69e8d92008-11-14 10:39:19 +11001359 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001360
1361 task_nodes = cpuset_mems_allowed(task);
1362 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001363 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001364 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001365 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001366 }
1367
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08001368 if (!nodes_subset(*new, node_states[N_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001369 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001370 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001371 }
1372
David Quigley86c3a762006-06-23 02:04:02 -07001373 err = security_task_movememory(task);
1374 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001375 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001376
Christoph Lameter3268c632012-03-21 16:34:06 -07001377 mm = get_task_mm(task);
1378 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001379
1380 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001381 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001382 goto out;
1383 }
1384
1385 err = do_migrate_pages(mm, old, new,
1386 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001387
1388 mmput(mm);
1389out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001390 NODEMASK_SCRATCH_FREE(scratch);
1391
Christoph Lameter39743882006-01-08 01:00:51 -08001392 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001393
1394out_put:
1395 put_task_struct(task);
1396 goto out;
1397
Christoph Lameter39743882006-01-08 01:00:51 -08001398}
1399
1400
Christoph Lameter8bccd852005-10-29 18:16:59 -07001401/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001402SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1403 unsigned long __user *, nmask, unsigned long, maxnode,
1404 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001405{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001406 int err;
1407 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001408 nodemask_t nodes;
1409
1410 if (nmask != NULL && maxnode < MAX_NUMNODES)
1411 return -EINVAL;
1412
1413 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1414
1415 if (err)
1416 return err;
1417
1418 if (policy && put_user(pval, policy))
1419 return -EFAULT;
1420
1421 if (nmask)
1422 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1423
1424 return err;
1425}
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427#ifdef CONFIG_COMPAT
1428
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001429COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1430 compat_ulong_t __user *, nmask,
1431 compat_ulong_t, maxnode,
1432 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433{
1434 long err;
1435 unsigned long __user *nm = NULL;
1436 unsigned long nr_bits, alloc_size;
1437 DECLARE_BITMAP(bm, MAX_NUMNODES);
1438
1439 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1440 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1441
1442 if (nmask)
1443 nm = compat_alloc_user_space(alloc_size);
1444
1445 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1446
1447 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001448 unsigned long copy_size;
1449 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1450 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 /* ensure entire bitmap is zeroed */
1452 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1453 err |= compat_put_bitmap(nmask, bm, nr_bits);
1454 }
1455
1456 return err;
1457}
1458
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001459COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1460 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 unsigned long __user *nm = NULL;
1463 unsigned long nr_bits, alloc_size;
1464 DECLARE_BITMAP(bm, MAX_NUMNODES);
1465
1466 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1467 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1468
1469 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001470 if (compat_get_bitmap(bm, nmask, nr_bits))
1471 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001473 if (copy_to_user(nm, bm, alloc_size))
1474 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 }
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 return sys_set_mempolicy(mode, nm, nr_bits+1);
1478}
1479
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001480COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1481 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1482 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 unsigned long __user *nm = NULL;
1485 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001486 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1489 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1490
1491 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001492 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1493 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001495 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1496 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 }
1498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1500}
1501
1502#endif
1503
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001504struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1505 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001507 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
1509 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001510 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d902742014-10-09 15:27:45 -07001511 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001512 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001514
1515 /*
1516 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1517 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1518 * count on these policies which will be dropped by
1519 * mpol_cond_put() later
1520 */
1521 if (mpol_needs_cond_ref(pol))
1522 mpol_get(pol);
1523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001525
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001526 return pol;
1527}
1528
1529/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001530 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001531 * @vma: virtual memory area whose policy is sought
1532 * @addr: address in @vma for shared policy lookup
1533 *
1534 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001535 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001536 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1537 * count--added by the get_policy() vm_op, as appropriate--to protect against
1538 * freeing by another task. It is the caller's responsibility to free the
1539 * extra reference for shared policies.
1540 */
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001541static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1542 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001543{
1544 struct mempolicy *pol = __get_vma_policy(vma, addr);
1545
Oleg Nesterov8d902742014-10-09 15:27:45 -07001546 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001547 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 return pol;
1550}
1551
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001552bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001553{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001554 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001555
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001556 if (vma->vm_ops && vma->vm_ops->get_policy) {
1557 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001558
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001559 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1560 if (pol && (pol->flags & MPOL_F_MOF))
1561 ret = true;
1562 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001563
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001564 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001565 }
1566
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001567 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001568 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001569 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001570
Mel Gormanfc3147242013-10-07 11:29:09 +01001571 return pol->flags & MPOL_F_MOF;
1572}
1573
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001574static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1575{
1576 enum zone_type dynamic_policy_zone = policy_zone;
1577
1578 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1579
1580 /*
1581 * if policy->v.nodes has movable memory only,
1582 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1583 *
1584 * policy->v.nodes is intersect with node_states[N_MEMORY].
1585 * so if the following test faile, it implies
1586 * policy->v.nodes has movable memory only.
1587 */
1588 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1589 dynamic_policy_zone = ZONE_MOVABLE;
1590
1591 return zone >= dynamic_policy_zone;
1592}
1593
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001594/*
1595 * Return a nodemask representing a mempolicy for filtering nodes for
1596 * page allocation
1597 */
1598static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001599{
1600 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001601 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001602 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001603 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1604 return &policy->v.nodes;
1605
1606 return NULL;
1607}
1608
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001609/* Return the node id preferred by the given mempolicy, or the given id */
1610static int policy_node(gfp_t gfp, struct mempolicy *policy,
1611 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612{
Michal Hocko6d840952016-12-12 16:42:23 -08001613 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1614 nd = policy->v.preferred_node;
1615 else {
Mel Gorman19770b32008-04-28 02:12:18 -07001616 /*
Michal Hocko6d840952016-12-12 16:42:23 -08001617 * __GFP_THISNODE shouldn't even be used with the bind policy
1618 * because we might easily break the expectation to stay on the
1619 * requested node and not break the policy.
Mel Gorman19770b32008-04-28 02:12:18 -07001620 */
Michal Hocko6d840952016-12-12 16:42:23 -08001621 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 }
Michal Hocko6d840952016-12-12 16:42:23 -08001623
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001624 return nd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625}
1626
1627/* Do dynamic interleaving for a process */
1628static unsigned interleave_nodes(struct mempolicy *policy)
1629{
Vlastimil Babka45816682017-07-06 15:39:59 -07001630 unsigned next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 struct task_struct *me = current;
1632
Vlastimil Babka45816682017-07-06 15:39:59 -07001633 next = next_node_in(me->il_prev, policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001634 if (next < MAX_NUMNODES)
Vlastimil Babka45816682017-07-06 15:39:59 -07001635 me->il_prev = next;
1636 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637}
1638
Christoph Lameterdc85da12006-01-18 17:42:36 -08001639/*
1640 * Depending on the memory policy provide a node from which to allocate the
1641 * next slab entry.
1642 */
David Rientjes2a389612014-04-07 15:37:29 -07001643unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001644{
Andi Kleene7b691b2012-06-09 02:40:03 -07001645 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001646 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001647
1648 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001649 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001650
1651 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001652 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001653 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001654
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001655 switch (policy->mode) {
1656 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001657 /*
1658 * handled MPOL_F_LOCAL above
1659 */
1660 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001661
Christoph Lameterdc85da12006-01-18 17:42:36 -08001662 case MPOL_INTERLEAVE:
1663 return interleave_nodes(policy);
1664
Mel Gormandd1a2392008-04-28 02:12:17 -07001665 case MPOL_BIND: {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001666 struct zoneref *z;
1667
Christoph Lameterdc85da12006-01-18 17:42:36 -08001668 /*
1669 * Follow bind policy behavior and start allocation at the
1670 * first node.
1671 */
Mel Gorman19770b32008-04-28 02:12:18 -07001672 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001673 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001674 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001675 z = first_zones_zonelist(zonelist, highest_zoneidx,
1676 &policy->v.nodes);
1677 return z->zone ? z->zone->node : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001678 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001679
Christoph Lameterdc85da12006-01-18 17:42:36 -08001680 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001681 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001682 }
1683}
1684
Andrew Mortonfee83b32016-05-19 17:11:43 -07001685/*
1686 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1687 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1688 * number of present nodes.
1689 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690static unsigned offset_il_node(struct mempolicy *pol,
Andrew Mortonfee83b32016-05-19 17:11:43 -07001691 struct vm_area_struct *vma, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001693 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001694 unsigned target;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001695 int i;
1696 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
David Rientjesf5b087b2008-04-28 02:12:27 -07001698 if (!nnodes)
1699 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001700 target = (unsigned int)n % nnodes;
1701 nid = first_node(pol->v.nodes);
1702 for (i = 0; i < target; i++)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001703 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 return nid;
1705}
1706
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001707/* Determine a node number for interleave */
1708static inline unsigned interleave_nid(struct mempolicy *pol,
1709 struct vm_area_struct *vma, unsigned long addr, int shift)
1710{
1711 if (vma) {
1712 unsigned long off;
1713
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001714 /*
1715 * for small pages, there is no difference between
1716 * shift and PAGE_SHIFT, so the bit-shift is safe.
1717 * for huge pages, since vm_pgoff is in units of small
1718 * pages, we need to shift off the always 0 bits to get
1719 * a useful offset.
1720 */
1721 BUG_ON(shift < PAGE_SHIFT);
1722 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001723 off += (addr - vma->vm_start) >> shift;
1724 return offset_il_node(pol, vma, off);
1725 } else
1726 return interleave_nodes(pol);
1727}
1728
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001729#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001730/*
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001731 * huge_node(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07001732 * @vma: virtual memory area whose policy is sought
1733 * @addr: address in @vma for shared policy lookup and interleave policy
1734 * @gfp_flags: for requested zone
1735 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1736 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001737 *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001738 * Returns a nid suitable for a huge page allocation and a pointer
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001739 * to the struct mempolicy for conditional unref after allocation.
1740 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1741 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001742 *
Mel Gormand26914d2014-04-03 14:47:24 -07001743 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001744 */
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001745int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1746 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001747{
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001748 int nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001749
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001750 *mpol = get_vma_policy(vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001751 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001752
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001753 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001754 nid = interleave_nid(*mpol, vma, addr,
1755 huge_page_shift(hstate_vma(vma)));
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001756 } else {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001757 nid = policy_node(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001758 if ((*mpol)->mode == MPOL_BIND)
1759 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001760 }
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001761 return nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001762}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001763
1764/*
1765 * init_nodemask_of_mempolicy
1766 *
1767 * If the current task's mempolicy is "default" [NULL], return 'false'
1768 * to indicate default policy. Otherwise, extract the policy nodemask
1769 * for 'bind' or 'interleave' policy into the argument nodemask, or
1770 * initialize the argument nodemask to contain the single node for
1771 * 'preferred' or 'local' policy and return 'true' to indicate presence
1772 * of non-default mempolicy.
1773 *
1774 * We don't bother with reference counting the mempolicy [mpol_get/put]
1775 * because the current task is examining it's own mempolicy and a task's
1776 * mempolicy is only ever changed by the task itself.
1777 *
1778 * N.B., it is the caller's responsibility to free a returned nodemask.
1779 */
1780bool init_nodemask_of_mempolicy(nodemask_t *mask)
1781{
1782 struct mempolicy *mempolicy;
1783 int nid;
1784
1785 if (!(mask && current->mempolicy))
1786 return false;
1787
Miao Xiec0ff7452010-05-24 14:32:08 -07001788 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001789 mempolicy = current->mempolicy;
1790 switch (mempolicy->mode) {
1791 case MPOL_PREFERRED:
1792 if (mempolicy->flags & MPOL_F_LOCAL)
1793 nid = numa_node_id();
1794 else
1795 nid = mempolicy->v.preferred_node;
1796 init_nodemask_of_node(mask, nid);
1797 break;
1798
1799 case MPOL_BIND:
1800 /* Fall through */
1801 case MPOL_INTERLEAVE:
1802 *mask = mempolicy->v.nodes;
1803 break;
1804
1805 default:
1806 BUG();
1807 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001808 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001809
1810 return true;
1811}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001812#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001813
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001814/*
1815 * mempolicy_nodemask_intersects
1816 *
1817 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1818 * policy. Otherwise, check for intersection between mask and the policy
1819 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1820 * policy, always return true since it may allocate elsewhere on fallback.
1821 *
1822 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1823 */
1824bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1825 const nodemask_t *mask)
1826{
1827 struct mempolicy *mempolicy;
1828 bool ret = true;
1829
1830 if (!mask)
1831 return ret;
1832 task_lock(tsk);
1833 mempolicy = tsk->mempolicy;
1834 if (!mempolicy)
1835 goto out;
1836
1837 switch (mempolicy->mode) {
1838 case MPOL_PREFERRED:
1839 /*
1840 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1841 * allocate from, they may fallback to other nodes when oom.
1842 * Thus, it's possible for tsk to have allocated memory from
1843 * nodes in mask.
1844 */
1845 break;
1846 case MPOL_BIND:
1847 case MPOL_INTERLEAVE:
1848 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1849 break;
1850 default:
1851 BUG();
1852 }
1853out:
1854 task_unlock(tsk);
1855 return ret;
1856}
1857
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858/* Allocate a page in interleaved policy.
1859 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001860static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1861 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 struct page *page;
1864
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001865 page = __alloc_pages(gfp, order, nid);
1866 if (page && page_to_nid(page) == nid)
Christoph Lameterca889e62006-06-30 01:55:44 -07001867 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 return page;
1869}
1870
1871/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001872 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 *
1874 * @gfp:
1875 * %GFP_USER user allocation.
1876 * %GFP_KERNEL kernel allocations,
1877 * %GFP_HIGHMEM highmem/user allocations,
1878 * %GFP_FS allocation should not call back into a file system.
1879 * %GFP_ATOMIC don't sleep.
1880 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001881 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 * @vma: Pointer to VMA or NULL if not available.
1883 * @addr: Virtual Address of the allocation. Must be inside the VMA.
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001884 * @node: Which node to prefer for allocation (modulo policy).
1885 * @hugepage: for hugepages try only the preferred node if possible
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 *
1887 * This function allocates a page from the kernel page pool and applies
1888 * a NUMA policy associated with the VMA or the current process.
1889 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1890 * mm_struct of the VMA to prevent it from going away. Should be used for
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001891 * all allocations for pages that will be mapped into user space. Returns
1892 * NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 */
1894struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001895alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001896 unsigned long addr, int node, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897{
Mel Gormancc9a6c82012-03-21 16:34:11 -07001898 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07001899 struct page *page;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001900 int preferred_nid;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001901 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001903 pol = get_vma_policy(vma, addr);
Mel Gormancc9a6c82012-03-21 16:34:11 -07001904
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001905 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001907
Andi Kleen8eac5632011-02-25 14:44:28 -08001908 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001909 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001910 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001911 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001913
Vlastimil Babka0867a572015-06-24 16:58:48 -07001914 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1915 int hpage_node = node;
1916
1917 /*
1918 * For hugepage allocation and non-interleave policy which
1919 * allows the current node (or other explicitly preferred
1920 * node) we only try to allocate from the current/preferred
1921 * node and don't fall back to other nodes, as the cost of
1922 * remote accesses would likely offset THP benefits.
1923 *
1924 * If the policy is interleave, or does not allow the current
1925 * node in its nodemask, we allocate the standard way.
1926 */
1927 if (pol->mode == MPOL_PREFERRED &&
1928 !(pol->flags & MPOL_F_LOCAL))
1929 hpage_node = pol->v.preferred_node;
1930
1931 nmask = policy_nodemask(gfp, pol);
1932 if (!nmask || node_isset(hpage_node, *nmask)) {
1933 mpol_cond_put(pol);
Vlastimil Babka96db8002015-09-08 15:03:50 -07001934 page = __alloc_pages_node(hpage_node,
Vlastimil Babka0867a572015-06-24 16:58:48 -07001935 gfp | __GFP_THISNODE, order);
1936 goto out;
1937 }
1938 }
1939
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001940 nmask = policy_nodemask(gfp, pol);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001941 preferred_nid = policy_node(gfp, pol, node);
1942 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
Vlastimil Babkad51e9892017-01-24 15:18:18 -08001943 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001944out:
Miao Xiec0ff7452010-05-24 14:32:08 -07001945 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946}
1947
1948/**
1949 * alloc_pages_current - Allocate pages.
1950 *
1951 * @gfp:
1952 * %GFP_USER user allocation,
1953 * %GFP_KERNEL kernel allocation,
1954 * %GFP_HIGHMEM highmem allocation,
1955 * %GFP_FS don't call back into a file system.
1956 * %GFP_ATOMIC don't sleep.
1957 * @order: Power of two of allocation size in pages. 0 is a single page.
1958 *
1959 * Allocate a page from the kernel page pool. When not in
1960 * interrupt context and apply the current process NUMA policy.
1961 * Returns NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 */
Al Virodd0fc662005-10-07 07:46:04 +01001963struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001965 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07001966 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Oleg Nesterov8d902742014-10-09 15:27:45 -07001968 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
1969 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001970
1971 /*
1972 * No reference counting needed for current->mempolicy
1973 * nor system default_policy
1974 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001975 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07001976 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1977 else
1978 page = __alloc_pages_nodemask(gfp, order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001979 policy_node(gfp, pol, numa_node_id()),
Andi Kleen5c4b4be2011-03-04 17:36:32 -08001980 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07001981
Miao Xiec0ff7452010-05-24 14:32:08 -07001982 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983}
1984EXPORT_SYMBOL(alloc_pages_current);
1985
Oleg Nesterovef0855d2013-09-11 14:20:14 -07001986int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
1987{
1988 struct mempolicy *pol = mpol_dup(vma_policy(src));
1989
1990 if (IS_ERR(pol))
1991 return PTR_ERR(pol);
1992 dst->vm_policy = pol;
1993 return 0;
1994}
1995
Paul Jackson42253992006-01-08 01:01:59 -08001996/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07001997 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08001998 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1999 * with the mems_allowed returned by cpuset_mems_allowed(). This
2000 * keeps mempolicies cpuset relative after its cpuset moves. See
2001 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002002 *
2003 * current's mempolicy may be rebinded by the other task(the task that changes
2004 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002005 */
Paul Jackson42253992006-01-08 01:01:59 -08002006
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002007/* Slow path of a mempolicy duplicate */
2008struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009{
2010 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2011
2012 if (!new)
2013 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002014
2015 /* task's mempolicy is protected by alloc_lock */
2016 if (old == current->mempolicy) {
2017 task_lock(current);
2018 *new = *old;
2019 task_unlock(current);
2020 } else
2021 *new = *old;
2022
Paul Jackson42253992006-01-08 01:01:59 -08002023 if (current_cpuset_is_being_rebound()) {
2024 nodemask_t mems = cpuset_mems_allowed(current);
Vlastimil Babka213980c2017-07-06 15:40:06 -07002025 mpol_rebind_policy(new, &mems);
Paul Jackson42253992006-01-08 01:01:59 -08002026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 return new;
2029}
2030
2031/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002032bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033{
2034 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002035 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002036 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002037 return false;
Bob Liu19800502010-05-24 14:32:01 -07002038 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002039 return false;
Bob Liu19800502010-05-24 14:32:01 -07002040 if (mpol_store_user_nodemask(a))
2041 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002042 return false;
Bob Liu19800502010-05-24 14:32:01 -07002043
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002044 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002045 case MPOL_BIND:
2046 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002048 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002050 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 default:
2052 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002053 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 }
2055}
2056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 * Shared memory backing store policy support.
2059 *
2060 * Remember policies even when nobody has shared memory mapped.
2061 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002062 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 * for any accesses to the tree.
2064 */
2065
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002066/*
2067 * lookup first element intersecting start-end. Caller holds sp->lock for
2068 * reading or for writing
2069 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070static struct sp_node *
2071sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2072{
2073 struct rb_node *n = sp->root.rb_node;
2074
2075 while (n) {
2076 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2077
2078 if (start >= p->end)
2079 n = n->rb_right;
2080 else if (end <= p->start)
2081 n = n->rb_left;
2082 else
2083 break;
2084 }
2085 if (!n)
2086 return NULL;
2087 for (;;) {
2088 struct sp_node *w = NULL;
2089 struct rb_node *prev = rb_prev(n);
2090 if (!prev)
2091 break;
2092 w = rb_entry(prev, struct sp_node, nd);
2093 if (w->end <= start)
2094 break;
2095 n = prev;
2096 }
2097 return rb_entry(n, struct sp_node, nd);
2098}
2099
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002100/*
2101 * Insert a new shared policy into the list. Caller holds sp->lock for
2102 * writing.
2103 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2105{
2106 struct rb_node **p = &sp->root.rb_node;
2107 struct rb_node *parent = NULL;
2108 struct sp_node *nd;
2109
2110 while (*p) {
2111 parent = *p;
2112 nd = rb_entry(parent, struct sp_node, nd);
2113 if (new->start < nd->start)
2114 p = &(*p)->rb_left;
2115 else if (new->end > nd->end)
2116 p = &(*p)->rb_right;
2117 else
2118 BUG();
2119 }
2120 rb_link_node(&new->nd, parent, p);
2121 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002122 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002123 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124}
2125
2126/* Find shared policy intersecting idx */
2127struct mempolicy *
2128mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2129{
2130 struct mempolicy *pol = NULL;
2131 struct sp_node *sn;
2132
2133 if (!sp->root.rb_node)
2134 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002135 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 sn = sp_lookup(sp, idx, idx+1);
2137 if (sn) {
2138 mpol_get(sn->policy);
2139 pol = sn->policy;
2140 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002141 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 return pol;
2143}
2144
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002145static void sp_free(struct sp_node *n)
2146{
2147 mpol_put(n->policy);
2148 kmem_cache_free(sn_cache, n);
2149}
2150
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002151/**
2152 * mpol_misplaced - check whether current page node is valid in policy
2153 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002154 * @page: page to be checked
2155 * @vma: vm area where page mapped
2156 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002157 *
2158 * Lookup current policy node id for vma,addr and "compare to" page's
2159 * node id.
2160 *
2161 * Returns:
2162 * -1 - not misplaced, page is in the right node
2163 * node - node id where the page should be
2164 *
2165 * Policy determination "mimics" alloc_page_vma().
2166 * Called from fault path where we know the vma and faulting address.
2167 */
2168int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2169{
2170 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002171 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002172 int curnid = page_to_nid(page);
2173 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002174 int thiscpu = raw_smp_processor_id();
2175 int thisnid = cpu_to_node(thiscpu);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002176 int polnid = -1;
2177 int ret = -1;
2178
2179 BUG_ON(!vma);
2180
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002181 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002182 if (!(pol->flags & MPOL_F_MOF))
2183 goto out;
2184
2185 switch (pol->mode) {
2186 case MPOL_INTERLEAVE:
2187 BUG_ON(addr >= vma->vm_end);
2188 BUG_ON(addr < vma->vm_start);
2189
2190 pgoff = vma->vm_pgoff;
2191 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2192 polnid = offset_il_node(pol, vma, pgoff);
2193 break;
2194
2195 case MPOL_PREFERRED:
2196 if (pol->flags & MPOL_F_LOCAL)
2197 polnid = numa_node_id();
2198 else
2199 polnid = pol->v.preferred_node;
2200 break;
2201
2202 case MPOL_BIND:
Mel Gormanc33d6c02016-05-19 17:14:10 -07002203
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002204 /*
2205 * allows binding to multiple nodes.
2206 * use current page if in policy nodemask,
2207 * else select nearest allowed node, if any.
2208 * If no allowed nodes, use current [!misplaced].
2209 */
2210 if (node_isset(curnid, pol->v.nodes))
2211 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002212 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002213 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2214 gfp_zone(GFP_HIGHUSER),
Mel Gormanc33d6c02016-05-19 17:14:10 -07002215 &pol->v.nodes);
2216 polnid = z->zone->node;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002217 break;
2218
2219 default:
2220 BUG();
2221 }
Mel Gorman5606e382012-11-02 18:19:13 +00002222
2223 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002224 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002225 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002226
Rik van Riel10f39042014-01-27 17:03:44 -05002227 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002228 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002229 }
2230
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002231 if (curnid != polnid)
2232 ret = polnid;
2233out:
2234 mpol_cond_put(pol);
2235
2236 return ret;
2237}
2238
David Rientjesc11600e2016-09-01 16:15:07 -07002239/*
2240 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2241 * dropped after task->mempolicy is set to NULL so that any allocation done as
2242 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2243 * policy.
2244 */
2245void mpol_put_task_policy(struct task_struct *task)
2246{
2247 struct mempolicy *pol;
2248
2249 task_lock(task);
2250 pol = task->mempolicy;
2251 task->mempolicy = NULL;
2252 task_unlock(task);
2253 mpol_put(pol);
2254}
2255
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2257{
Paul Mundt140d5a42007-07-15 23:38:16 -07002258 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002260 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261}
2262
Mel Gorman42288fe2012-12-21 23:10:25 +00002263static void sp_node_init(struct sp_node *node, unsigned long start,
2264 unsigned long end, struct mempolicy *pol)
2265{
2266 node->start = start;
2267 node->end = end;
2268 node->policy = pol;
2269}
2270
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002271static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2272 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002274 struct sp_node *n;
2275 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002277 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 if (!n)
2279 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002280
2281 newpol = mpol_dup(pol);
2282 if (IS_ERR(newpol)) {
2283 kmem_cache_free(sn_cache, n);
2284 return NULL;
2285 }
2286 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002287 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002288
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 return n;
2290}
2291
2292/* Replace a policy range. */
2293static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2294 unsigned long end, struct sp_node *new)
2295{
Mel Gormanb22d1272012-10-08 16:29:17 -07002296 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002297 struct sp_node *n_new = NULL;
2298 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002299 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
Mel Gorman42288fe2012-12-21 23:10:25 +00002301restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002302 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 n = sp_lookup(sp, start, end);
2304 /* Take care of old policies in the same range. */
2305 while (n && n->start < end) {
2306 struct rb_node *next = rb_next(&n->nd);
2307 if (n->start >= start) {
2308 if (n->end <= end)
2309 sp_delete(sp, n);
2310 else
2311 n->start = end;
2312 } else {
2313 /* Old policy spanning whole new range. */
2314 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002315 if (!n_new)
2316 goto alloc_new;
2317
2318 *mpol_new = *n->policy;
2319 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002320 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002322 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002323 n_new = NULL;
2324 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 break;
2326 } else
2327 n->end = start;
2328 }
2329 if (!next)
2330 break;
2331 n = rb_entry(next, struct sp_node, nd);
2332 }
2333 if (new)
2334 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002335 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002336 ret = 0;
2337
2338err_out:
2339 if (mpol_new)
2340 mpol_put(mpol_new);
2341 if (n_new)
2342 kmem_cache_free(sn_cache, n_new);
2343
Mel Gormanb22d1272012-10-08 16:29:17 -07002344 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002345
2346alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002347 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002348 ret = -ENOMEM;
2349 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2350 if (!n_new)
2351 goto err_out;
2352 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2353 if (!mpol_new)
2354 goto err_out;
2355 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356}
2357
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002358/**
2359 * mpol_shared_policy_init - initialize shared policy for inode
2360 * @sp: pointer to inode shared policy
2361 * @mpol: struct mempolicy to install
2362 *
2363 * Install non-NULL @mpol in inode's shared policy rb-tree.
2364 * On entry, the current task has a reference on a non-NULL @mpol.
2365 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002366 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002367 */
2368void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002369{
Miao Xie58568d22009-06-16 15:31:49 -07002370 int ret;
2371
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002372 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002373 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002374
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002375 if (mpol) {
2376 struct vm_area_struct pvma;
2377 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002378 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002379
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002380 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002381 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002382 /* contextualize the tmpfs mount point mempolicy */
2383 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002384 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002385 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002386
2387 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002388 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002389 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002390 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002391 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002392
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002393 /* Create pseudo-vma that contains just the policy */
2394 memset(&pvma, 0, sizeof(struct vm_area_struct));
2395 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2396 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002397
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002398put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002399 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002400free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002401 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002402put_mpol:
2403 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002404 }
2405}
2406
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407int mpol_set_shared_policy(struct shared_policy *info,
2408 struct vm_area_struct *vma, struct mempolicy *npol)
2409{
2410 int err;
2411 struct sp_node *new = NULL;
2412 unsigned long sz = vma_pages(vma);
2413
David Rientjes028fec42008-04-28 02:12:25 -07002414 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002416 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002417 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002418 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
2420 if (npol) {
2421 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2422 if (!new)
2423 return -ENOMEM;
2424 }
2425 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2426 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002427 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 return err;
2429}
2430
2431/* Free a backing policy store on inode delete. */
2432void mpol_free_shared_policy(struct shared_policy *p)
2433{
2434 struct sp_node *n;
2435 struct rb_node *next;
2436
2437 if (!p->root.rb_node)
2438 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002439 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 next = rb_first(&p->root);
2441 while (next) {
2442 n = rb_entry(next, struct sp_node, nd);
2443 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002444 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002446 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447}
2448
Mel Gorman1a687c22012-11-22 11:16:36 +00002449#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002450static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002451
2452static void __init check_numabalancing_enable(void)
2453{
2454 bool numabalancing_default = false;
2455
2456 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2457 numabalancing_default = true;
2458
Mel Gormanc2976632014-01-29 14:05:42 -08002459 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2460 if (numabalancing_override)
2461 set_numabalancing_state(numabalancing_override == 1);
2462
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002463 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a0252016-03-17 14:19:47 -07002464 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002465 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002466 set_numabalancing_state(numabalancing_default);
2467 }
2468}
2469
2470static int __init setup_numabalancing(char *str)
2471{
2472 int ret = 0;
2473 if (!str)
2474 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002475
2476 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002477 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002478 ret = 1;
2479 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002480 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002481 ret = 1;
2482 }
2483out:
2484 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002485 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002486
2487 return ret;
2488}
2489__setup("numa_balancing=", setup_numabalancing);
2490#else
2491static inline void __init check_numabalancing_enable(void)
2492{
2493}
2494#endif /* CONFIG_NUMA_BALANCING */
2495
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496/* assumes fs == KERNEL_DS */
2497void __init numa_policy_init(void)
2498{
Paul Mundtb71636e2007-07-15 23:38:15 -07002499 nodemask_t interleave_nodes;
2500 unsigned long largest = 0;
2501 int nid, prefer = 0;
2502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 policy_cache = kmem_cache_create("numa_policy",
2504 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002505 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
2507 sn_cache = kmem_cache_create("shared_policy_node",
2508 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002509 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510
Mel Gorman5606e382012-11-02 18:19:13 +00002511 for_each_node(nid) {
2512 preferred_node_policy[nid] = (struct mempolicy) {
2513 .refcnt = ATOMIC_INIT(1),
2514 .mode = MPOL_PREFERRED,
2515 .flags = MPOL_F_MOF | MPOL_F_MORON,
2516 .v = { .preferred_node = nid, },
2517 };
2518 }
2519
Paul Mundtb71636e2007-07-15 23:38:15 -07002520 /*
2521 * Set interleaving policy for system init. Interleaving is only
2522 * enabled across suitably sized nodes (default is >= 16MB), or
2523 * fall back to the largest node if they're all smaller.
2524 */
2525 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002526 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002527 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
Paul Mundtb71636e2007-07-15 23:38:15 -07002529 /* Preserve the largest node */
2530 if (largest < total_pages) {
2531 largest = total_pages;
2532 prefer = nid;
2533 }
2534
2535 /* Interleave this node? */
2536 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2537 node_set(nid, interleave_nodes);
2538 }
2539
2540 /* All too small, use the largest */
2541 if (unlikely(nodes_empty(interleave_nodes)))
2542 node_set(prefer, interleave_nodes);
2543
David Rientjes028fec42008-04-28 02:12:25 -07002544 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002545 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002546
2547 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548}
2549
Christoph Lameter8bccd852005-10-29 18:16:59 -07002550/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551void numa_default_policy(void)
2552{
David Rientjes028fec42008-04-28 02:12:25 -07002553 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554}
Paul Jackson68860ec2005-10-30 15:02:36 -08002555
Paul Jackson42253992006-01-08 01:01:59 -08002556/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002557 * Parse and format mempolicy from/to strings
2558 */
2559
2560/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002561 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002562 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002563static const char * const policy_modes[] =
2564{
2565 [MPOL_DEFAULT] = "default",
2566 [MPOL_PREFERRED] = "prefer",
2567 [MPOL_BIND] = "bind",
2568 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002569 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002570};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002571
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002572
2573#ifdef CONFIG_TMPFS
2574/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002575 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002576 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002577 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002578 *
2579 * Format of input:
2580 * <mode>[=<flags>][:<nodelist>]
2581 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002582 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002583 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002584int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002585{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002586 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002587 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002588 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002589 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002590 char *nodelist = strchr(str, ':');
2591 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002592 int err = 1;
2593
2594 if (nodelist) {
2595 /* NUL-terminate mode or flags string */
2596 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002597 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002598 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002599 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002600 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002601 } else
2602 nodes_clear(nodes);
2603
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002604 if (flags)
2605 *flags++ = '\0'; /* terminate mode string */
2606
Peter Zijlstra479e2802012-10-25 14:16:28 +02002607 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002608 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002609 break;
2610 }
2611 }
Mel Gormana7200942012-11-16 09:37:58 +00002612 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002613 goto out;
2614
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002615 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002616 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002617 /*
2618 * Insist on a nodelist of one node only
2619 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002620 if (nodelist) {
2621 char *rest = nodelist;
2622 while (isdigit(*rest))
2623 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002624 if (*rest)
2625 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002626 }
2627 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002628 case MPOL_INTERLEAVE:
2629 /*
2630 * Default to online nodes with memory if no nodelist
2631 */
2632 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002633 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002634 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002635 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002636 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002637 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002638 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002639 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002640 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002641 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002642 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002643 case MPOL_DEFAULT:
2644 /*
2645 * Insist on a empty nodelist
2646 */
2647 if (!nodelist)
2648 err = 0;
2649 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002650 case MPOL_BIND:
2651 /*
2652 * Insist on a nodelist
2653 */
2654 if (!nodelist)
2655 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002656 }
2657
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002658 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002659 if (flags) {
2660 /*
2661 * Currently, we only support two mutually exclusive
2662 * mode flags.
2663 */
2664 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002665 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002666 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002667 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002668 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002669 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002670 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002671
2672 new = mpol_new(mode, mode_flags, &nodes);
2673 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002674 goto out;
2675
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002676 /*
2677 * Save nodes for mpol_to_str() to show the tmpfs mount options
2678 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2679 */
2680 if (mode != MPOL_PREFERRED)
2681 new->v.nodes = nodes;
2682 else if (nodelist)
2683 new->v.preferred_node = first_node(nodes);
2684 else
2685 new->flags |= MPOL_F_LOCAL;
2686
2687 /*
2688 * Save nodes for contextualization: this will be used to "clone"
2689 * the mempolicy in a specific context [cpuset] at a later time.
2690 */
2691 new->w.user_nodemask = nodes;
2692
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002693 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002694
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002695out:
2696 /* Restore string for error message */
2697 if (nodelist)
2698 *--nodelist = ':';
2699 if (flags)
2700 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002701 if (!err)
2702 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002703 return err;
2704}
2705#endif /* CONFIG_TMPFS */
2706
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002707/**
2708 * mpol_to_str - format a mempolicy structure for printing
2709 * @buffer: to contain formatted mempolicy string
2710 * @maxlen: length of @buffer
2711 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002712 *
David Rientjes948927e2013-11-12 15:07:28 -08002713 * Convert @pol into a string. If @buffer is too short, truncate the string.
2714 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2715 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002716 */
David Rientjes948927e2013-11-12 15:07:28 -08002717void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002718{
2719 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08002720 nodemask_t nodes = NODE_MASK_NONE;
2721 unsigned short mode = MPOL_DEFAULT;
2722 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002723
David Rientjes8790c71a2014-01-30 15:46:08 -08002724 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002725 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08002726 flags = pol->flags;
2727 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002728
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002729 switch (mode) {
2730 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002731 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002732 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002733 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002734 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002735 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002736 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002737 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002738 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002739 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002740 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002741 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002742 default:
David Rientjes948927e2013-11-12 15:07:28 -08002743 WARN_ON_ONCE(1);
2744 snprintf(p, maxlen, "unknown");
2745 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002746 }
2747
David Rientjesb7a9f422013-11-21 14:32:06 -08002748 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002749
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002750 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08002751 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07002752
Lee Schermerhorn22919902008-04-28 02:13:22 -07002753 /*
2754 * Currently, the only defined flags are mutually exclusive
2755 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002756 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002757 p += snprintf(p, buffer + maxlen - p, "static");
2758 else if (flags & MPOL_F_RELATIVE_NODES)
2759 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002760 }
2761
Tejun Heo9e763e02015-02-13 14:38:02 -08002762 if (!nodes_empty(nodes))
2763 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2764 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002765}