blob: 49fbc88d8d8cf30436e8299a0311ba626e1d78b9 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13/*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24/*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32/*
33 * 05.04.94 - Multi-page memory management added for v1.1.
Tobin C Harding166f61b2017-02-24 14:59:01 -080034 * Idea by Alex Bligh (alex@cconcepts.co.uk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * (Gerhard.Wichert@pdb.siemens.de)
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42#include <linux/kernel_stat.h>
43#include <linux/mm.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010045#include <linux/sched/coredump.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010046#include <linux/sched/numa_balancing.h>
Ingo Molnar29930022017-02-08 18:51:36 +010047#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/hugetlb.h>
49#include <linux/mman.h>
50#include <linux/swap.h>
51#include <linux/highmem.h>
52#include <linux/pagemap.h>
Jérôme Glisse5042db42017-09-08 16:11:43 -070053#include <linux/memremap.h>
Hugh Dickins9a840892009-09-21 17:02:01 -070054#include <linux/ksm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/rmap.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040056#include <linux/export.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070057#include <linux/delayacct.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/init.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080059#include <linux/pfn_t.h>
Peter Zijlstraedc79b22006-09-25 23:30:58 -070060#include <linux/writeback.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080061#include <linux/memcontrol.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070062#include <linux/mmu_notifier.h>
Hugh Dickins3dc14742009-01-06 14:40:08 -080063#include <linux/swapops.h>
64#include <linux/elf.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090065#include <linux/gfp.h>
Mel Gorman4daae3b2012-11-02 11:33:45 +000066#include <linux/migrate.h>
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -080067#include <linux/string.h>
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -070068#include <linux/debugfs.h>
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -070069#include <linux/userfaultfd_k.h>
Jan Karabc2466e2016-05-12 18:29:19 +020070#include <linux/dax.h>
Michal Hocko6b31d592017-08-18 15:16:15 -070071#include <linux/oom.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080072#include <linux/numa.h>
Peter Xubce617e2020-08-11 18:37:44 -070073#include <linux/perf_event.h>
74#include <linux/ptrace.h>
Joerg Roedele80d3902020-09-04 16:35:43 -070075#include <linux/vmalloc.h>
Chris Goldsworthy62e32cf2020-11-09 22:26:47 -080076#include <trace/hooks/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -080078#include <trace/events/kmem.h>
79
Alexey Dobriyan6952b612009-09-18 23:55:55 +040080#include <asm/io.h>
Dave Hansen33a709b2016-02-12 13:02:19 -080081#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <asm/pgalloc.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080083#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <asm/tlb.h>
85#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Joerg Roedele80d3902020-09-04 16:35:43 -070087#include "pgalloc-track.h"
Jan Beulich42b77722008-07-23 21:27:10 -070088#include "internal.h"
89
Arnd Bergmannaf27d942018-02-16 16:25:53 +010090#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
Peter Zijlstra90572892013-10-07 11:29:20 +010091#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
Peter Zijlstra75980e92013-02-22 16:34:32 -080092#endif
93
Andy Whitcroftd41dee32005-06-23 00:07:54 -070094#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -070095/* use the per-pgdat data instead for discontigmem - mbligh */
96unsigned long max_mapnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097EXPORT_SYMBOL(max_mapnr);
Tobin C Harding166f61b2017-02-24 14:59:01 -080098
99struct page *mem_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100EXPORT_SYMBOL(mem_map);
101#endif
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/*
104 * A number of key systems in x86 including ioremap() rely on the assumption
105 * that high_memory defines the upper bound on direct map memory, then end
106 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
107 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
108 * and ZONE_HIGHMEM.
109 */
Tobin C Harding166f61b2017-02-24 14:59:01 -0800110void *high_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111EXPORT_SYMBOL(high_memory);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Ingo Molnar32a93232008-02-06 22:39:44 +0100113/*
114 * Randomize the address space (stacks, mmaps, brk, etc.).
115 *
116 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
117 * as ancient (libc5 based) binaries can segfault. )
118 */
119int randomize_va_space __read_mostly =
120#ifdef CONFIG_COMPAT_BRK
121 1;
122#else
123 2;
124#endif
Andi Kleena62eaf12006-02-16 23:41:58 +0100125
Jia He83d116c2019-10-11 22:09:39 +0800126#ifndef arch_faults_on_old_pte
127static inline bool arch_faults_on_old_pte(void)
128{
129 /*
130 * Those arches which don't have hw access flag feature need to
131 * implement their own helper. By default, "true" means pagefault
132 * will be hit on old pte.
133 */
134 return true;
135}
136#endif
137
Andi Kleena62eaf12006-02-16 23:41:58 +0100138static int __init disable_randmaps(char *s)
139{
140 randomize_va_space = 0;
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800141 return 1;
Andi Kleena62eaf12006-02-16 23:41:58 +0100142}
143__setup("norandmaps", disable_randmaps);
144
Hugh Dickins62eede62009-09-21 17:03:34 -0700145unsigned long zero_pfn __read_mostly;
Ard Biesheuvel0b700682014-09-12 22:17:23 +0200146EXPORT_SYMBOL(zero_pfn);
147
Tobin C Harding166f61b2017-02-24 14:59:01 -0800148unsigned long highest_memmap_pfn __read_mostly;
149
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -0700150/*
151 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
152 */
153static int __init init_zero_pfn(void)
154{
155 zero_pfn = page_to_pfn(ZERO_PAGE(0));
156 return 0;
157}
158core_initcall(init_zero_pfn);
Andi Kleena62eaf12006-02-16 23:41:58 +0100159
Joel Fernandes77dfeaa2019-12-10 10:45:34 -0500160/*
161 * Only trace rss_stat when there is a 512kb cross over.
162 * Smaller changes may be lost unless every small change is
163 * crossing into or returning to a 512kb boundary.
164 */
165#define TRACE_MM_COUNTER_THRESHOLD 128
166
167void mm_trace_rss_stat(struct mm_struct *mm, int member, long count,
168 long value)
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -0800169{
Joel Fernandes77dfeaa2019-12-10 10:45:34 -0500170 long thresh_mask = ~(TRACE_MM_COUNTER_THRESHOLD - 1);
171
172 /* Threshold roll-over, trace it */
173 if ((count & thresh_mask) != ((count - value) & thresh_mask))
174 trace_rss_stat(mm, member, count);
Joel Fernandes (Google)b3d14112019-11-30 17:50:30 -0800175}
Greg Kroah-Hartmanbb0c8742019-12-17 19:12:36 +0100176EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800177
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800178#if defined(SPLIT_RSS_COUNTING)
179
David Rientjesea48cf72012-03-21 16:34:13 -0700180void sync_mm_rss(struct mm_struct *mm)
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800181{
182 int i;
183
184 for (i = 0; i < NR_MM_COUNTERS; i++) {
David Rientjes05af2e12012-03-21 16:34:13 -0700185 if (current->rss_stat.count[i]) {
186 add_mm_counter(mm, i, current->rss_stat.count[i]);
187 current->rss_stat.count[i] = 0;
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800188 }
189 }
David Rientjes05af2e12012-03-21 16:34:13 -0700190 current->rss_stat.events = 0;
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800191}
192
193static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
194{
195 struct task_struct *task = current;
196
197 if (likely(task->mm == mm))
198 task->rss_stat.count[member] += val;
199 else
200 add_mm_counter(mm, member, val);
201}
202#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
203#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
204
205/* sync counter once per 64 page faults */
206#define TASK_RSS_EVENTS_THRESH (64)
207static void check_sync_rss_stat(struct task_struct *task)
208{
209 if (unlikely(task != current))
210 return;
211 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
David Rientjesea48cf72012-03-21 16:34:13 -0700212 sync_mm_rss(task->mm);
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800213}
Peter Zijlstra9547d012011-05-24 17:12:14 -0700214#else /* SPLIT_RSS_COUNTING */
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800215
216#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
217#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
218
219static void check_sync_rss_stat(struct task_struct *task)
220{
221}
222
Peter Zijlstra9547d012011-05-24 17:12:14 -0700223#endif /* SPLIT_RSS_COUNTING */
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 * Note: this doesn't free the actual pages themselves. That
227 * has been handled earlier when unmapping all the memory regions.
228 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000229static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
230 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800232 pgtable_t token = pmd_pgtable(*pmd);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700233 pmd_clear(pmd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000234 pte_free_tlb(tlb, token, addr);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800235 mm_dec_nr_ptes(tlb->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
Hugh Dickinse0da3822005-04-19 13:29:15 -0700238static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
239 unsigned long addr, unsigned long end,
240 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 pmd_t *pmd;
243 unsigned long next;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700244 unsigned long start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Hugh Dickinse0da3822005-04-19 13:29:15 -0700246 start = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 pmd = pmd_offset(pud, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 do {
249 next = pmd_addr_end(addr, end);
250 if (pmd_none_or_clear_bad(pmd))
251 continue;
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000252 free_pte_range(tlb, pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 } while (pmd++, addr = next, addr != end);
254
Hugh Dickinse0da3822005-04-19 13:29:15 -0700255 start &= PUD_MASK;
256 if (start < floor)
257 return;
258 if (ceiling) {
259 ceiling &= PUD_MASK;
260 if (!ceiling)
261 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700263 if (end - 1 > ceiling - 1)
264 return;
265
266 pmd = pmd_offset(pud, start);
267 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000268 pmd_free_tlb(tlb, pmd, start);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800269 mm_dec_nr_pmds(tlb->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300272static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
Hugh Dickinse0da3822005-04-19 13:29:15 -0700273 unsigned long addr, unsigned long end,
274 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
276 pud_t *pud;
277 unsigned long next;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700278 unsigned long start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Hugh Dickinse0da3822005-04-19 13:29:15 -0700280 start = addr;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300281 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 do {
283 next = pud_addr_end(addr, end);
284 if (pud_none_or_clear_bad(pud))
285 continue;
Hugh Dickinse0da3822005-04-19 13:29:15 -0700286 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 } while (pud++, addr = next, addr != end);
288
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300289 start &= P4D_MASK;
290 if (start < floor)
291 return;
292 if (ceiling) {
293 ceiling &= P4D_MASK;
294 if (!ceiling)
295 return;
296 }
297 if (end - 1 > ceiling - 1)
298 return;
299
300 pud = pud_offset(p4d, start);
301 p4d_clear(p4d);
302 pud_free_tlb(tlb, pud, start);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -0800303 mm_dec_nr_puds(tlb->mm);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300304}
305
306static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
307 unsigned long addr, unsigned long end,
308 unsigned long floor, unsigned long ceiling)
309{
310 p4d_t *p4d;
311 unsigned long next;
312 unsigned long start;
313
314 start = addr;
315 p4d = p4d_offset(pgd, addr);
316 do {
317 next = p4d_addr_end(addr, end);
318 if (p4d_none_or_clear_bad(p4d))
319 continue;
320 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
321 } while (p4d++, addr = next, addr != end);
322
Hugh Dickinse0da3822005-04-19 13:29:15 -0700323 start &= PGDIR_MASK;
324 if (start < floor)
325 return;
326 if (ceiling) {
327 ceiling &= PGDIR_MASK;
328 if (!ceiling)
329 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700331 if (end - 1 > ceiling - 1)
332 return;
333
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300334 p4d = p4d_offset(pgd, start);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700335 pgd_clear(pgd);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300336 p4d_free_tlb(tlb, p4d, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
339/*
Hugh Dickinse0da3822005-04-19 13:29:15 -0700340 * This function frees user-level page tables of a process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 */
Jan Beulich42b77722008-07-23 21:27:10 -0700342void free_pgd_range(struct mmu_gather *tlb,
Hugh Dickinse0da3822005-04-19 13:29:15 -0700343 unsigned long addr, unsigned long end,
344 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 pgd_t *pgd;
347 unsigned long next;
348
Hugh Dickinse0da3822005-04-19 13:29:15 -0700349 /*
350 * The next few lines have given us lots of grief...
351 *
352 * Why are we testing PMD* at this top level? Because often
353 * there will be no work to do at all, and we'd prefer not to
354 * go all the way down to the bottom just to discover that.
355 *
356 * Why all these "- 1"s? Because 0 represents both the bottom
357 * of the address space and the top of it (using -1 for the
358 * top wouldn't help much: the masks would do the wrong thing).
359 * The rule is that addr 0 and floor 0 refer to the bottom of
360 * the address space, but end 0 and ceiling 0 refer to the top
361 * Comparisons need to use "end - 1" and "ceiling - 1" (though
362 * that end 0 case should be mythical).
363 *
364 * Wherever addr is brought up or ceiling brought down, we must
365 * be careful to reject "the opposite 0" before it confuses the
366 * subsequent tests. But what about where end is brought down
367 * by PMD_SIZE below? no, end can't go down to 0 there.
368 *
369 * Whereas we round start (addr) and ceiling down, by different
370 * masks at different levels, in order to test whether a table
371 * now has no other vmas using it, so can be freed, we don't
372 * bother to round floor or end up - the tests don't need that.
373 */
374
375 addr &= PMD_MASK;
376 if (addr < floor) {
377 addr += PMD_SIZE;
378 if (!addr)
379 return;
380 }
381 if (ceiling) {
382 ceiling &= PMD_MASK;
383 if (!ceiling)
384 return;
385 }
386 if (end - 1 > ceiling - 1)
387 end -= PMD_SIZE;
388 if (addr > end - 1)
389 return;
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800390 /*
391 * We add page table cache pages with PAGE_SIZE,
392 * (see pte_free_tlb()), flush the tlb if we need
393 */
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200394 tlb_change_page_size(tlb, PAGE_SIZE);
Jan Beulich42b77722008-07-23 21:27:10 -0700395 pgd = pgd_offset(tlb->mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 do {
397 next = pgd_addr_end(addr, end);
398 if (pgd_none_or_clear_bad(pgd))
399 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300400 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 } while (pgd++, addr = next, addr != end);
Hugh Dickinse0da3822005-04-19 13:29:15 -0700402}
403
Jan Beulich42b77722008-07-23 21:27:10 -0700404void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700405 unsigned long floor, unsigned long ceiling)
Hugh Dickinse0da3822005-04-19 13:29:15 -0700406{
407 while (vma) {
408 struct vm_area_struct *next = vma->vm_next;
409 unsigned long addr = vma->vm_start;
410
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700411 /*
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000412 * Hide vma from rmap and truncate_pagecache before freeing
413 * pgtables
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700414 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800415 unlink_anon_vmas(vma);
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700416 unlink_file_vma(vma);
417
David Gibson9da61ae2006-03-22 00:08:57 -0800418 if (is_vm_hugetlb_page(vma)) {
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700419 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
Tobin C Harding166f61b2017-02-24 14:59:01 -0800420 floor, next ? next->vm_start : ceiling);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700421 } else {
422 /*
423 * Optimization: gather nearby vmas into one call down
424 */
425 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
David Gibson48669202006-03-22 00:08:58 -0800426 && !is_vm_hugetlb_page(next)) {
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700427 vma = next;
428 next = vma->vm_next;
Rik van Riel5beb4932010-03-05 13:42:07 -0800429 unlink_anon_vmas(vma);
Hugh Dickins8f4f8c12005-10-29 18:16:29 -0700430 unlink_file_vma(vma);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700431 }
432 free_pgd_range(tlb, addr, vma->vm_end,
Tobin C Harding166f61b2017-02-24 14:59:01 -0800433 floor, next ? next->vm_start : ceiling);
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700434 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700435 vma = next;
436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437}
438
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800439int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800441 spinlock_t *ptl;
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800442 pgtable_t new = pte_alloc_one(mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700443 if (!new)
444 return -ENOMEM;
445
Nick Piggin362a61a2008-05-14 06:37:36 +0200446 /*
447 * Ensure all pte setup (eg. pte page lock and page clearing) are
448 * visible before the pte is made visible to other CPUs by being
449 * put into page tables.
450 *
451 * The other side of the story is the pointer chasing in the page
452 * table walking code (when walking the page table without locking;
453 * ie. most of the time). Fortunately, these data accesses consist
454 * of a chain of data-dependent loads, meaning most CPUs (alpha
455 * being the notable exception) will already guarantee loads are
456 * seen in-order. See the alpha page table accessors for the
Will Deaconbb7cdd32019-10-30 17:15:01 +0000457 * smp_rmb() barriers in page table walking code.
Nick Piggin362a61a2008-05-14 06:37:36 +0200458 */
459 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
460
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800461 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800462 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800463 mm_inc_nr_ptes(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 pmd_populate(mm, pmd, new);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800465 new = NULL;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -0800466 }
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800467 spin_unlock(ptl);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800468 if (new)
469 pte_free(mm, new);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700470 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471}
472
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800473int __pte_alloc_kernel(pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800475 pte_t *new = pte_alloc_one_kernel(&init_mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700476 if (!new)
477 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Nick Piggin362a61a2008-05-14 06:37:36 +0200479 smp_wmb(); /* See comment in __pte_alloc */
480
Hugh Dickins1bb36302005-10-29 18:16:22 -0700481 spin_lock(&init_mm.page_table_lock);
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800482 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
Hugh Dickins1bb36302005-10-29 18:16:22 -0700483 pmd_populate_kernel(&init_mm, pmd, new);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800484 new = NULL;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -0800485 }
Hugh Dickins1bb36302005-10-29 18:16:22 -0700486 spin_unlock(&init_mm.page_table_lock);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800487 if (new)
488 pte_free_kernel(&init_mm, new);
Hugh Dickins1bb36302005-10-29 18:16:22 -0700489 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490}
491
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800492static inline void init_rss_vec(int *rss)
Hugh Dickinsae859762005-10-29 18:16:05 -0700493{
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800494 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
495}
496
497static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
498{
499 int i;
500
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800501 if (current->mm == mm)
David Rientjes05af2e12012-03-21 16:34:13 -0700502 sync_mm_rss(mm);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800503 for (i = 0; i < NR_MM_COUNTERS; i++)
504 if (rss[i])
505 add_mm_counter(mm, i, rss[i]);
Hugh Dickinsae859762005-10-29 18:16:05 -0700506}
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508/*
Linus Torvalds6aab3412005-11-28 14:34:23 -0800509 * This function is called to print an error when a bad pte
510 * is found. For example, we might have a PFN-mapped pte in
511 * a region that doesn't allow it.
Nick Pigginb5810032005-10-29 18:16:12 -0700512 *
513 * The calling function must still handle the error.
514 */
Hugh Dickins3dc14742009-01-06 14:40:08 -0800515static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
516 pte_t pte, struct page *page)
Nick Pigginb5810032005-10-29 18:16:12 -0700517{
Hugh Dickins3dc14742009-01-06 14:40:08 -0800518 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300519 p4d_t *p4d = p4d_offset(pgd, addr);
520 pud_t *pud = pud_offset(p4d, addr);
Hugh Dickins3dc14742009-01-06 14:40:08 -0800521 pmd_t *pmd = pmd_offset(pud, addr);
522 struct address_space *mapping;
523 pgoff_t index;
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800524 static unsigned long resume;
525 static unsigned long nr_shown;
526 static unsigned long nr_unshown;
527
528 /*
529 * Allow a burst of 60 reports, then keep quiet for that minute;
530 * or allow a steady drip of one report per second.
531 */
532 if (nr_shown == 60) {
533 if (time_before(jiffies, resume)) {
534 nr_unshown++;
535 return;
536 }
537 if (nr_unshown) {
Joe Perches11705322016-03-17 14:19:50 -0700538 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
539 nr_unshown);
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800540 nr_unshown = 0;
541 }
542 nr_shown = 0;
543 }
544 if (nr_shown++ == 0)
545 resume = jiffies + 60 * HZ;
Hugh Dickins3dc14742009-01-06 14:40:08 -0800546
547 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
548 index = linear_page_index(vma, addr);
549
Joe Perches11705322016-03-17 14:19:50 -0700550 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
551 current->comm,
552 (long long)pte_val(pte), (long long)pmd_val(*pmd));
Wu Fengguang718a3822010-03-10 15:20:43 -0800553 if (page)
Dave Hansenf0b791a2014-01-23 15:52:49 -0800554 dump_page(page, "bad pte");
Kefeng Wang6aa9b8b2019-09-23 15:35:34 -0700555 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
Joe Perches11705322016-03-17 14:19:50 -0700556 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
Sakari Ailusd75f7732019-03-25 21:32:28 +0200557 pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
Konstantin Khlebnikov26825822015-04-15 16:15:08 -0700558 vma->vm_file,
559 vma->vm_ops ? vma->vm_ops->fault : NULL,
560 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
561 mapping ? mapping->a_ops->readpage : NULL);
Nick Pigginb5810032005-10-29 18:16:12 -0700562 dump_stack();
Rusty Russell373d4d02013-01-21 17:17:39 +1030563 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Nick Pigginb5810032005-10-29 18:16:12 -0700564}
565
566/*
Nick Piggin7e675132008-04-28 02:13:00 -0700567 * vm_normal_page -- This function gets the "struct page" associated with a pte.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800568 *
Nick Piggin7e675132008-04-28 02:13:00 -0700569 * "Special" mappings do not wish to be associated with a "struct page" (either
570 * it doesn't exist, or it exists but they don't want to touch it). In this
571 * case, NULL is returned here. "Normal" mappings do have a struct page.
Jared Hulbertb379d792008-04-28 02:12:58 -0700572 *
Nick Piggin7e675132008-04-28 02:13:00 -0700573 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
574 * pte bit, in which case this function is trivial. Secondly, an architecture
575 * may not have a spare pte bit, which requires a more complicated scheme,
576 * described below.
577 *
578 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
579 * special mapping (even if there are underlying and valid "struct pages").
580 * COWed pages of a VM_PFNMAP are always normal.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800581 *
Jared Hulbertb379d792008-04-28 02:12:58 -0700582 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
583 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
Nick Piggin7e675132008-04-28 02:13:00 -0700584 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
585 * mapping will always honor the rule
Linus Torvalds6aab3412005-11-28 14:34:23 -0800586 *
587 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
588 *
Nick Piggin7e675132008-04-28 02:13:00 -0700589 * And for normal mappings this is false.
Jared Hulbertb379d792008-04-28 02:12:58 -0700590 *
Nick Piggin7e675132008-04-28 02:13:00 -0700591 * This restricts such mappings to be a linear translation from virtual address
592 * to pfn. To get around this restriction, we allow arbitrary mappings so long
593 * as the vma is not a COW mapping; in that case, we know that all ptes are
594 * special (because none can have been COWed).
Jared Hulbertb379d792008-04-28 02:12:58 -0700595 *
596 *
Nick Piggin7e675132008-04-28 02:13:00 -0700597 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
598 *
Jared Hulbertb379d792008-04-28 02:12:58 -0700599 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
600 * page" backing, however the difference is that _all_ pages with a struct
601 * page (that is, those where pfn_valid is true) are refcounted and considered
602 * normal pages by the VM. The disadvantage is that pages are refcounted
603 * (which can be slower and simply not an option for some PFNMAP users). The
604 * advantage is that we don't have to follow the strict linearity rule of
605 * PFNMAP mappings in order to support COWable mappings.
606 *
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800607 */
Christoph Hellwig25b29952019-06-13 22:50:49 +0200608struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
609 pte_t pte)
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800610{
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800611 unsigned long pfn = pte_pfn(pte);
Nick Piggin7e675132008-04-28 02:13:00 -0700612
Laurent Dufour00b3a332018-06-07 17:06:12 -0700613 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
Hugh Dickinsb38af472014-08-29 15:18:44 -0700614 if (likely(!pte_special(pte)))
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800615 goto check_pfn;
David Vrabel667a0a02014-12-18 14:48:15 +0000616 if (vma->vm_ops && vma->vm_ops->find_special_page)
617 return vma->vm_ops->find_special_page(vma, addr);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -0700618 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
619 return NULL;
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700620 if (is_zero_pfn(pfn))
621 return NULL;
Dave Jiange1fb4a02018-08-17 15:43:40 -0700622 if (pte_devmap(pte))
623 return NULL;
624
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700625 print_bad_pte(vma, addr, pte, NULL);
Nick Piggin7e675132008-04-28 02:13:00 -0700626 return NULL;
627 }
628
Laurent Dufour00b3a332018-06-07 17:06:12 -0700629 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
Nick Piggin7e675132008-04-28 02:13:00 -0700630
Jared Hulbertb379d792008-04-28 02:12:58 -0700631 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
632 if (vma->vm_flags & VM_MIXEDMAP) {
633 if (!pfn_valid(pfn))
634 return NULL;
635 goto out;
636 } else {
Nick Piggin7e675132008-04-28 02:13:00 -0700637 unsigned long off;
638 off = (addr - vma->vm_start) >> PAGE_SHIFT;
Jared Hulbertb379d792008-04-28 02:12:58 -0700639 if (pfn == vma->vm_pgoff + off)
640 return NULL;
641 if (!is_cow_mapping(vma->vm_flags))
642 return NULL;
643 }
Linus Torvalds6aab3412005-11-28 14:34:23 -0800644 }
645
Hugh Dickinsb38af472014-08-29 15:18:44 -0700646 if (is_zero_pfn(pfn))
647 return NULL;
Laurent Dufour00b3a332018-06-07 17:06:12 -0700648
Hugh Dickins22b31ee2009-01-06 14:40:09 -0800649check_pfn:
650 if (unlikely(pfn > highest_memmap_pfn)) {
651 print_bad_pte(vma, addr, pte, NULL);
652 return NULL;
653 }
Linus Torvalds6aab3412005-11-28 14:34:23 -0800654
655 /*
Nick Piggin7e675132008-04-28 02:13:00 -0700656 * NOTE! We still have PageReserved() pages in the page tables.
Nick Piggin7e675132008-04-28 02:13:00 -0700657 * eg. VDSO mappings can cause them to exist.
Linus Torvalds6aab3412005-11-28 14:34:23 -0800658 */
Jared Hulbertb379d792008-04-28 02:12:58 -0700659out:
Linus Torvalds6aab3412005-11-28 14:34:23 -0800660 return pfn_to_page(pfn);
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800661}
662
Gerald Schaefer28093f92016-04-28 16:18:35 -0700663#ifdef CONFIG_TRANSPARENT_HUGEPAGE
664struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
665 pmd_t pmd)
666{
667 unsigned long pfn = pmd_pfn(pmd);
668
669 /*
670 * There is no pmd_special() but there may be special pmds, e.g.
671 * in a direct-access (dax) mapping, so let's just replicate the
Laurent Dufour00b3a332018-06-07 17:06:12 -0700672 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
Gerald Schaefer28093f92016-04-28 16:18:35 -0700673 */
674 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
675 if (vma->vm_flags & VM_MIXEDMAP) {
676 if (!pfn_valid(pfn))
677 return NULL;
678 goto out;
679 } else {
680 unsigned long off;
681 off = (addr - vma->vm_start) >> PAGE_SHIFT;
682 if (pfn == vma->vm_pgoff + off)
683 return NULL;
684 if (!is_cow_mapping(vma->vm_flags))
685 return NULL;
686 }
687 }
688
Dave Jiange1fb4a02018-08-17 15:43:40 -0700689 if (pmd_devmap(pmd))
690 return NULL;
Yu Zhao3cde2872019-12-04 16:49:56 -0800691 if (is_huge_zero_pmd(pmd))
Gerald Schaefer28093f92016-04-28 16:18:35 -0700692 return NULL;
693 if (unlikely(pfn > highest_memmap_pfn))
694 return NULL;
695
696 /*
697 * NOTE! We still have PageReserved() pages in the page tables.
698 * eg. VDSO mappings can cause them to exist.
699 */
700out:
701 return pfn_to_page(pfn);
702}
703#endif
704
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800705/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 * copy one vm_area from one task to the other. Assumes the page tables
707 * already present in the new task to be cleared in the whole range
708 * covered by this vma.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 */
710
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700711static unsigned long
712copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
Nick Pigginb5810032005-10-29 18:16:12 -0700713 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
Hugh Dickins8c103762005-10-29 18:16:13 -0700714 unsigned long addr, int *rss)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
Nick Pigginb5810032005-10-29 18:16:12 -0700716 unsigned long vm_flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 pte_t pte = *src_pte;
718 struct page *page;
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700719 swp_entry_t entry = pte_to_swp_entry(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700721 if (likely(!non_swap_entry(entry))) {
722 if (swap_duplicate(entry) < 0)
723 return entry.val;
Christoph Lameter06972122006-06-23 02:03:35 -0700724
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700725 /* make sure dst_mm is on swapoff's mmlist. */
726 if (unlikely(list_empty(&dst_mm->mmlist))) {
727 spin_lock(&mmlist_lock);
728 if (list_empty(&dst_mm->mmlist))
729 list_add(&dst_mm->mmlist,
730 &src_mm->mmlist);
731 spin_unlock(&mmlist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700733 rss[MM_SWAPENTS]++;
734 } else if (is_migration_entry(entry)) {
735 page = migration_entry_to_page(entry);
736
737 rss[mm_counter(page)]++;
738
739 if (is_write_migration_entry(entry) &&
740 is_cow_mapping(vm_flags)) {
741 /*
742 * COW mappings require pages in both
743 * parent and child to be set to read.
744 */
745 make_migration_entry_read(&entry);
746 pte = swp_entry_to_pte(entry);
747 if (pte_swp_soft_dirty(*src_pte))
748 pte = pte_swp_mksoft_dirty(pte);
749 if (pte_swp_uffd_wp(*src_pte))
750 pte = pte_swp_mkuffd_wp(pte);
751 set_pte_at(src_mm, addr, src_pte, pte);
752 }
753 } else if (is_device_private_entry(entry)) {
754 page = device_private_entry_to_page(entry);
755
756 /*
757 * Update rss count even for unaddressable pages, as
758 * they should treated just like normal pages in this
759 * respect.
760 *
761 * We will likely want to have some new rss counters
762 * for unaddressable pages, at some point. But for now
763 * keep things as they are.
764 */
765 get_page(page);
766 rss[mm_counter(page)]++;
767 page_dup_rmap(page, false);
768
769 /*
770 * We do not preserve soft-dirty information, because so
771 * far, checkpoint/restore is the only feature that
772 * requires that. And checkpoint/restore does not work
773 * when a device driver is involved (you cannot easily
774 * save and restore device driver state).
775 */
776 if (is_write_device_private_entry(entry) &&
777 is_cow_mapping(vm_flags)) {
778 make_device_private_entry_read(&entry);
779 pte = swp_entry_to_pte(entry);
780 if (pte_swp_uffd_wp(*src_pte))
781 pte = pte_swp_mkuffd_wp(pte);
782 set_pte_at(src_mm, addr, src_pte, pte);
783 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 }
Linus Torvaldsdf3a57d2020-09-23 09:56:59 -0700785 set_pte_at(dst_mm, addr, dst_pte, pte);
786 return 0;
787}
788
Peter Xu70e806e2020-09-25 18:25:59 -0400789/*
790 * Copy a present and normal page if necessary.
791 *
792 * NOTE! The usual case is that this doesn't need to do
793 * anything, and can just return a positive value. That
794 * will let the caller know that it can just increase
795 * the page refcount and re-use the pte the traditional
796 * way.
797 *
798 * But _if_ we need to copy it because it needs to be
799 * pinned in the parent (and the child should get its own
800 * copy rather than just a reference to the same page),
801 * we'll do that here and return zero to let the caller
802 * know we're done.
803 *
804 * And if we need a pre-allocated page but don't yet have
805 * one, return a negative error to let the preallocation
806 * code know so that it can do so outside the page table
807 * lock.
808 */
809static inline int
Peter Xuc78f4632020-10-13 16:54:21 -0700810copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
811 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
812 struct page **prealloc, pte_t pte, struct page *page)
Peter Xu70e806e2020-09-25 18:25:59 -0400813{
Peter Xuc78f4632020-10-13 16:54:21 -0700814 struct mm_struct *src_mm = src_vma->vm_mm;
Peter Xu70e806e2020-09-25 18:25:59 -0400815 struct page *new_page;
816
Peter Xuc78f4632020-10-13 16:54:21 -0700817 if (!is_cow_mapping(src_vma->vm_flags))
Peter Xu70e806e2020-09-25 18:25:59 -0400818 return 1;
819
820 /*
Peter Xu70e806e2020-09-25 18:25:59 -0400821 * What we want to do is to check whether this page may
822 * have been pinned by the parent process. If so,
823 * instead of wrprotect the pte on both sides, we copy
824 * the page immediately so that we'll always guarantee
825 * the pinned page won't be randomly replaced in the
826 * future.
827 *
Linus Torvaldsf3c64ed2020-09-28 12:50:03 -0700828 * The page pinning checks are just "has this mm ever
829 * seen pinning", along with the (inexact) check of
830 * the page count. That might give false positives for
831 * for pinning, but it will work correctly.
Peter Xu70e806e2020-09-25 18:25:59 -0400832 */
833 if (likely(!atomic_read(&src_mm->has_pinned)))
834 return 1;
835 if (likely(!page_maybe_dma_pinned(page)))
836 return 1;
837
Peter Xu70e806e2020-09-25 18:25:59 -0400838 new_page = *prealloc;
839 if (!new_page)
840 return -EAGAIN;
841
842 /*
843 * We have a prealloc page, all good! Take it
844 * over and copy the page & arm it.
845 */
846 *prealloc = NULL;
Peter Xuc78f4632020-10-13 16:54:21 -0700847 copy_user_highpage(new_page, page, addr, src_vma);
Peter Xu70e806e2020-09-25 18:25:59 -0400848 __SetPageUptodate(new_page);
Peter Xuc78f4632020-10-13 16:54:21 -0700849 page_add_new_anon_rmap(new_page, dst_vma, addr, false);
850 lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
Peter Xu70e806e2020-09-25 18:25:59 -0400851 rss[mm_counter(new_page)]++;
852
853 /* All done, just insert the new page copy in the child */
Peter Xuc78f4632020-10-13 16:54:21 -0700854 pte = mk_pte(new_page, dst_vma->vm_page_prot);
855 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
856 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400857 return 0;
858}
859
860/*
861 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
862 * is required to copy this pte.
863 */
864static inline int
Peter Xuc78f4632020-10-13 16:54:21 -0700865copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
866 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
867 struct page **prealloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868{
Peter Xuc78f4632020-10-13 16:54:21 -0700869 struct mm_struct *src_mm = src_vma->vm_mm;
870 unsigned long vm_flags = src_vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 pte_t pte = *src_pte;
872 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Peter Xuc78f4632020-10-13 16:54:21 -0700874 page = vm_normal_page(src_vma, addr, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400875 if (page) {
876 int retval;
877
Peter Xuc78f4632020-10-13 16:54:21 -0700878 retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
879 addr, rss, prealloc, pte, page);
Peter Xu70e806e2020-09-25 18:25:59 -0400880 if (retval <= 0)
881 return retval;
882
883 get_page(page);
884 page_dup_rmap(page, false);
885 rss[mm_counter(page)]++;
886 }
887
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 /*
889 * If it's a COW mapping, write protect it both
890 * in the parent and the child
891 */
Linus Torvalds1b2de5d2018-07-09 13:19:49 -0700892 if (is_cow_mapping(vm_flags) && pte_write(pte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 ptep_set_wrprotect(src_mm, addr, src_pte);
Zachary Amsden3dc90792006-09-30 23:29:30 -0700894 pte = pte_wrprotect(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 }
896
897 /*
898 * If it's a shared mapping, mark it clean in
899 * the child
900 */
901 if (vm_flags & VM_SHARED)
902 pte = pte_mkclean(pte);
903 pte = pte_mkold(pte);
Linus Torvalds6aab3412005-11-28 14:34:23 -0800904
Peter Xub569a172020-04-06 20:05:53 -0700905 /*
906 * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
907 * does not have the VM_UFFD_WP, which means that the uffd
908 * fork event is not enabled.
909 */
910 if (!(vm_flags & VM_UFFD_WP))
911 pte = pte_clear_uffd_wp(pte);
912
Peter Xuc78f4632020-10-13 16:54:21 -0700913 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
Peter Xu70e806e2020-09-25 18:25:59 -0400914 return 0;
915}
916
917static inline struct page *
918page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
919 unsigned long addr)
920{
921 struct page *new_page;
922
923 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
924 if (!new_page)
925 return NULL;
926
927 if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
928 put_page(new_page);
929 return NULL;
930 }
931 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
932
933 return new_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934}
935
Peter Xuc78f4632020-10-13 16:54:21 -0700936static int
937copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
938 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
939 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
Peter Xuc78f4632020-10-13 16:54:21 -0700941 struct mm_struct *dst_mm = dst_vma->vm_mm;
942 struct mm_struct *src_mm = src_vma->vm_mm;
Daisuke Nishimurac36987e2009-10-26 16:50:23 -0700943 pte_t *orig_src_pte, *orig_dst_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 pte_t *src_pte, *dst_pte;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700945 spinlock_t *src_ptl, *dst_ptl;
Peter Xu70e806e2020-09-25 18:25:59 -0400946 int progress, ret = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800947 int rss[NR_MM_COUNTERS];
Hugh Dickins570a335b2009-12-14 17:58:46 -0800948 swp_entry_t entry = (swp_entry_t){0};
Peter Xu70e806e2020-09-25 18:25:59 -0400949 struct page *prealloc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951again:
Peter Xu70e806e2020-09-25 18:25:59 -0400952 progress = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800953 init_rss_vec(rss);
954
Hugh Dickinsc74df322005-10-29 18:16:23 -0700955 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
Peter Xu70e806e2020-09-25 18:25:59 -0400956 if (!dst_pte) {
957 ret = -ENOMEM;
958 goto out;
959 }
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700960 src_pte = pte_offset_map(src_pmd, addr);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700961 src_ptl = pte_lockptr(src_mm, src_pmd);
Ingo Molnarf20dc5f2006-07-03 00:25:08 -0700962 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Daisuke Nishimurac36987e2009-10-26 16:50:23 -0700963 orig_src_pte = src_pte;
964 orig_dst_pte = dst_pte;
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700965 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 do {
968 /*
969 * We are holding two locks at this point - either of them
970 * could generate latencies in another task on another CPU.
971 */
Hugh Dickinse040f212005-10-29 18:15:53 -0700972 if (progress >= 32) {
973 progress = 0;
974 if (need_resched() ||
Nick Piggin95c354f2008-01-30 13:31:20 +0100975 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
Hugh Dickinse040f212005-10-29 18:15:53 -0700976 break;
977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 if (pte_none(*src_pte)) {
979 progress++;
980 continue;
981 }
Linus Torvalds79a19712020-09-23 10:04:16 -0700982 if (unlikely(!pte_present(*src_pte))) {
983 entry.val = copy_nonpresent_pte(dst_mm, src_mm,
984 dst_pte, src_pte,
Peter Xuc78f4632020-10-13 16:54:21 -0700985 src_vma, addr, rss);
Linus Torvalds79a19712020-09-23 10:04:16 -0700986 if (entry.val)
987 break;
988 progress += 8;
989 continue;
990 }
Peter Xu70e806e2020-09-25 18:25:59 -0400991 /* copy_present_pte() will clear `*prealloc' if consumed */
Peter Xuc78f4632020-10-13 16:54:21 -0700992 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
993 addr, rss, &prealloc);
Peter Xu70e806e2020-09-25 18:25:59 -0400994 /*
995 * If we need a pre-allocated page for this pte, drop the
996 * locks, allocate, and try again.
997 */
998 if (unlikely(ret == -EAGAIN))
999 break;
1000 if (unlikely(prealloc)) {
1001 /*
1002 * pre-alloc page cannot be reused by next time so as
1003 * to strictly follow mempolicy (e.g., alloc_page_vma()
1004 * will allocate page according to address). This
1005 * could only happen if one pinned pte changed.
1006 */
1007 put_page(prealloc);
1008 prealloc = NULL;
1009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 progress += 8;
1011 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001013 arch_leave_lazy_mmu_mode();
Hugh Dickinsc74df322005-10-29 18:16:23 -07001014 spin_unlock(src_ptl);
Peter Zijlstraece0e2b2010-10-26 14:21:52 -07001015 pte_unmap(orig_src_pte);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001016 add_mm_rss_vec(dst_mm, rss);
Daisuke Nishimurac36987e2009-10-26 16:50:23 -07001017 pte_unmap_unlock(orig_dst_pte, dst_ptl);
Hugh Dickinsc74df322005-10-29 18:16:23 -07001018 cond_resched();
Hugh Dickins570a335b2009-12-14 17:58:46 -08001019
1020 if (entry.val) {
Peter Xu70e806e2020-09-25 18:25:59 -04001021 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1022 ret = -ENOMEM;
1023 goto out;
1024 }
1025 entry.val = 0;
1026 } else if (ret) {
1027 WARN_ON_ONCE(ret != -EAGAIN);
Peter Xuc78f4632020-10-13 16:54:21 -07001028 prealloc = page_copy_prealloc(src_mm, src_vma, addr);
Peter Xu70e806e2020-09-25 18:25:59 -04001029 if (!prealloc)
Hugh Dickins570a335b2009-12-14 17:58:46 -08001030 return -ENOMEM;
Peter Xu70e806e2020-09-25 18:25:59 -04001031 /* We've captured and resolved the error. Reset, try again. */
1032 ret = 0;
Hugh Dickins570a335b2009-12-14 17:58:46 -08001033 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 if (addr != end)
1035 goto again;
Peter Xu70e806e2020-09-25 18:25:59 -04001036out:
1037 if (unlikely(prealloc))
1038 put_page(prealloc);
1039 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
Peter Xuc78f4632020-10-13 16:54:21 -07001042static inline int
1043copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1044 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1045 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
Peter Xuc78f4632020-10-13 16:54:21 -07001047 struct mm_struct *dst_mm = dst_vma->vm_mm;
1048 struct mm_struct *src_mm = src_vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 pmd_t *src_pmd, *dst_pmd;
1050 unsigned long next;
1051
1052 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1053 if (!dst_pmd)
1054 return -ENOMEM;
1055 src_pmd = pmd_offset(src_pud, addr);
1056 do {
1057 next = pmd_addr_end(addr, end);
Zi Yan84c3fc42017-09-08 16:11:01 -07001058 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1059 || pmd_devmap(*src_pmd)) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001060 int err;
Peter Xuc78f4632020-10-13 16:54:21 -07001061 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001062 err = copy_huge_pmd(dst_mm, src_mm,
Peter Xuc78f4632020-10-13 16:54:21 -07001063 dst_pmd, src_pmd, addr, src_vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001064 if (err == -ENOMEM)
1065 return -ENOMEM;
1066 if (!err)
1067 continue;
1068 /* fall through */
1069 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 if (pmd_none_or_clear_bad(src_pmd))
1071 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001072 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1073 addr, next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 return -ENOMEM;
1075 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1076 return 0;
1077}
1078
Peter Xuc78f4632020-10-13 16:54:21 -07001079static inline int
1080copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1081 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1082 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Peter Xuc78f4632020-10-13 16:54:21 -07001084 struct mm_struct *dst_mm = dst_vma->vm_mm;
1085 struct mm_struct *src_mm = src_vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 pud_t *src_pud, *dst_pud;
1087 unsigned long next;
1088
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001089 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 if (!dst_pud)
1091 return -ENOMEM;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001092 src_pud = pud_offset(src_p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 do {
1094 next = pud_addr_end(addr, end);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001095 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1096 int err;
1097
Peter Xuc78f4632020-10-13 16:54:21 -07001098 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001099 err = copy_huge_pud(dst_mm, src_mm,
Peter Xuc78f4632020-10-13 16:54:21 -07001100 dst_pud, src_pud, addr, src_vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001101 if (err == -ENOMEM)
1102 return -ENOMEM;
1103 if (!err)
1104 continue;
1105 /* fall through */
1106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 if (pud_none_or_clear_bad(src_pud))
1108 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001109 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1110 addr, next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 return -ENOMEM;
1112 } while (dst_pud++, src_pud++, addr = next, addr != end);
1113 return 0;
1114}
1115
Peter Xuc78f4632020-10-13 16:54:21 -07001116static inline int
1117copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1118 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1119 unsigned long end)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001120{
Peter Xuc78f4632020-10-13 16:54:21 -07001121 struct mm_struct *dst_mm = dst_vma->vm_mm;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001122 p4d_t *src_p4d, *dst_p4d;
1123 unsigned long next;
1124
1125 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1126 if (!dst_p4d)
1127 return -ENOMEM;
1128 src_p4d = p4d_offset(src_pgd, addr);
1129 do {
1130 next = p4d_addr_end(addr, end);
1131 if (p4d_none_or_clear_bad(src_p4d))
1132 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001133 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1134 addr, next))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001135 return -ENOMEM;
1136 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1137 return 0;
1138}
1139
Peter Xuc78f4632020-10-13 16:54:21 -07001140int
1141copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142{
1143 pgd_t *src_pgd, *dst_pgd;
1144 unsigned long next;
Peter Xuc78f4632020-10-13 16:54:21 -07001145 unsigned long addr = src_vma->vm_start;
1146 unsigned long end = src_vma->vm_end;
1147 struct mm_struct *dst_mm = dst_vma->vm_mm;
1148 struct mm_struct *src_mm = src_vma->vm_mm;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001149 struct mmu_notifier_range range;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001150 bool is_cow;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001151 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
Nick Piggind9928952005-08-28 16:49:11 +10001153 /*
1154 * Don't copy ptes where a page fault will fill them correctly.
1155 * Fork becomes much lighter when there are big shared or private
1156 * readonly mappings. The tradeoff is that copy_page_range is more
1157 * efficient than faulting.
1158 */
Peter Xuc78f4632020-10-13 16:54:21 -07001159 if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1160 !src_vma->anon_vma)
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08001161 return 0;
Nick Piggind9928952005-08-28 16:49:11 +10001162
Peter Xuc78f4632020-10-13 16:54:21 -07001163 if (is_vm_hugetlb_page(src_vma))
1164 return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Peter Xuc78f4632020-10-13 16:54:21 -07001166 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08001167 /*
1168 * We do not free on error cases below as remove_vma
1169 * gets called on error from higher level routine
1170 */
Peter Xuc78f4632020-10-13 16:54:21 -07001171 ret = track_pfn_copy(src_vma);
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08001172 if (ret)
1173 return ret;
1174 }
1175
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001176 /*
1177 * We need to invalidate the secondary MMU mappings only when
1178 * there could be a permission downgrade on the ptes of the
1179 * parent mm. And a permission downgrade will only happen if
1180 * is_cow_mapping() returns true.
1181 */
Peter Xuc78f4632020-10-13 16:54:21 -07001182 is_cow = is_cow_mapping(src_vma->vm_flags);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001183
1184 if (is_cow) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07001185 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
Peter Xuc78f4632020-10-13 16:54:21 -07001186 0, src_vma, src_mm, addr, end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001187 mmu_notifier_invalidate_range_start(&range);
1188 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001189
1190 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 dst_pgd = pgd_offset(dst_mm, addr);
1192 src_pgd = pgd_offset(src_mm, addr);
1193 do {
1194 next = pgd_addr_end(addr, end);
1195 if (pgd_none_or_clear_bad(src_pgd))
1196 continue;
Peter Xuc78f4632020-10-13 16:54:21 -07001197 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1198 addr, next))) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001199 ret = -ENOMEM;
1200 break;
1201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001203
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001204 if (is_cow)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001205 mmu_notifier_invalidate_range_end(&range);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001206 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207}
1208
Robin Holt51c6f662005-11-13 16:06:42 -08001209static unsigned long zap_pte_range(struct mmu_gather *tlb,
Nick Pigginb5810032005-10-29 18:16:12 -07001210 struct vm_area_struct *vma, pmd_t *pmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001212 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213{
Nick Pigginb5810032005-10-29 18:16:12 -07001214 struct mm_struct *mm = tlb->mm;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001215 int force_flush = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001216 int rss[NR_MM_COUNTERS];
Peter Zijlstra97a89412011-05-24 17:12:04 -07001217 spinlock_t *ptl;
Steven Rostedt5f1a1902011-06-15 15:08:23 -07001218 pte_t *start_pte;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001219 pte_t *pte;
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001220 swp_entry_t entry;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001221
Peter Zijlstraed6a7932018-08-31 14:46:08 +02001222 tlb_change_page_size(tlb, PAGE_SIZE);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001223again:
Peter Zijlstrae3032972011-05-24 17:12:01 -07001224 init_rss_vec(rss);
Steven Rostedt5f1a1902011-06-15 15:08:23 -07001225 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1226 pte = start_pte;
Mel Gorman3ea27712017-08-02 13:31:52 -07001227 flush_tlb_batched_pending(mm);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001228 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 do {
1230 pte_t ptent = *pte;
Tobin C Harding166f61b2017-02-24 14:59:01 -08001231 if (pte_none(ptent))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 continue;
Hugh Dickins6f5e6b92006-03-16 23:04:09 -08001233
Minchan Kim7b167b62019-09-24 00:02:24 +00001234 if (need_resched())
1235 break;
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if (pte_present(ptent)) {
Hugh Dickinsee498ed2005-11-21 21:32:18 -08001238 struct page *page;
Robin Holt51c6f662005-11-13 16:06:42 -08001239
Christoph Hellwig25b29952019-06-13 22:50:49 +02001240 page = vm_normal_page(vma, addr, ptent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (unlikely(details) && page) {
1242 /*
1243 * unmap_shared_mapping_pages() wants to
1244 * invalidate cache without truncating:
1245 * unmap shared but keep private pages.
1246 */
1247 if (details->check_mapping &&
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -07001248 details->check_mapping != page_rmapping(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 }
Nick Pigginb5810032005-10-29 18:16:12 -07001251 ptent = ptep_get_and_clear_full(mm, addr, pte,
Zachary Amsdena6003882005-09-03 15:55:04 -07001252 tlb->fullmm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 tlb_remove_tlb_entry(tlb, pte, addr);
1254 if (unlikely(!page))
1255 continue;
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001256
1257 if (!PageAnon(page)) {
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001258 if (pte_dirty(ptent)) {
1259 force_flush = 1;
Hugh Dickins6237bcd2005-10-29 18:15:54 -07001260 set_page_dirty(page);
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001261 }
Johannes Weiner4917e5d2009-01-06 14:39:17 -08001262 if (pte_young(ptent) &&
Joe Perches64363aa2013-07-08 16:00:18 -07001263 likely(!(vma->vm_flags & VM_SEQ_READ)))
Nick Pigginbf3f3bc2009-01-06 14:38:55 -08001264 mark_page_accessed(page);
Hugh Dickins6237bcd2005-10-29 18:15:54 -07001265 }
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001266 rss[mm_counter(page)]--;
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001267 page_remove_rmap(page, false);
Hugh Dickins3dc14742009-01-06 14:40:08 -08001268 if (unlikely(page_mapcount(page) < 0))
1269 print_bad_pte(vma, addr, ptent, page);
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -07001270 if (unlikely(__tlb_remove_page(tlb, page))) {
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001271 force_flush = 1;
Will Deaconce9ec372014-10-28 13:16:28 -07001272 addr += PAGE_SIZE;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001273 break;
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 continue;
1276 }
Jérôme Glisse5042db42017-09-08 16:11:43 -07001277
1278 entry = pte_to_swp_entry(ptent);
Ralph Campbell463b7a12020-08-06 23:22:21 -07001279 if (is_device_private_entry(entry)) {
Jérôme Glisse5042db42017-09-08 16:11:43 -07001280 struct page *page = device_private_entry_to_page(entry);
1281
1282 if (unlikely(details && details->check_mapping)) {
1283 /*
1284 * unmap_shared_mapping_pages() wants to
1285 * invalidate cache without truncating:
1286 * unmap shared but keep private pages.
1287 */
1288 if (details->check_mapping !=
1289 page_rmapping(page))
1290 continue;
1291 }
1292
1293 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1294 rss[mm_counter(page)]--;
1295 page_remove_rmap(page, false);
1296 put_page(page);
1297 continue;
1298 }
1299
Kirill A. Shutemov3e8715f2017-02-22 15:46:34 -08001300 /* If details->check_mapping, we leave swap entries. */
1301 if (unlikely(details))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 continue;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -08001303
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001304 if (!non_swap_entry(entry))
1305 rss[MM_SWAPENTS]--;
1306 else if (is_migration_entry(entry)) {
1307 struct page *page;
Konstantin Khlebnikov9f9f1ac2012-01-20 14:34:24 -08001308
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001309 page = migration_entry_to_page(entry);
Jerome Marchandeca56ff2016-01-14 15:19:26 -08001310 rss[mm_counter(page)]--;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -08001311 }
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001312 if (unlikely(!free_swap_and_cache(entry)))
1313 print_bad_pte(vma, addr, ptent, NULL);
Zachary Amsden9888a1c2006-09-30 23:29:31 -07001314 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001315 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickinsae859762005-10-29 18:16:05 -07001316
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -08001317 add_mm_rss_vec(mm, rss);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07001318 arch_leave_lazy_mmu_mode();
Robin Holt51c6f662005-11-13 16:06:42 -08001319
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001320 /* Do the actual TLB flush before dropping ptl */
Will Deaconfb7332a2014-10-29 10:03:09 +00001321 if (force_flush)
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001322 tlb_flush_mmu_tlbonly(tlb);
Linus Torvalds1cf35d42014-04-25 16:05:40 -07001323 pte_unmap_unlock(start_pte, ptl);
1324
1325 /*
1326 * If we forced a TLB flush (either due to running out of
1327 * batch buffers or because we needed to flush dirty TLB
1328 * entries before releasing the ptl), free the batched
1329 * memory too. Restart if we didn't do everything.
1330 */
1331 if (force_flush) {
1332 force_flush = 0;
Peter Zijlstrafa0aafb2018-09-20 10:54:04 +02001333 tlb_flush_mmu(tlb);
Minchan Kim7b167b62019-09-24 00:02:24 +00001334 }
1335
1336 if (addr != end) {
1337 cond_resched();
1338 goto again;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001339 }
1340
Robin Holt51c6f662005-11-13 16:06:42 -08001341 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342}
1343
Robin Holt51c6f662005-11-13 16:06:42 -08001344static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
Nick Pigginb5810032005-10-29 18:16:12 -07001345 struct vm_area_struct *vma, pud_t *pud,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001347 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348{
1349 pmd_t *pmd;
1350 unsigned long next;
1351
1352 pmd = pmd_offset(pud, addr);
1353 do {
1354 next = pmd_addr_end(addr, end);
Zi Yan84c3fc42017-09-08 16:11:01 -07001355 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
Hugh Dickins53406ed2018-08-01 11:31:52 -07001356 if (next - addr != HPAGE_PMD_SIZE)
David Rientjesfd607752016-12-12 16:42:20 -08001357 __split_huge_pmd(vma, pmd, addr, false, NULL);
Hugh Dickins53406ed2018-08-01 11:31:52 -07001358 else if (zap_huge_pmd(tlb, vma, pmd, addr))
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001359 goto next;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001360 /* fall through */
1361 }
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001362 /*
1363 * Here there can be other concurrent MADV_DONTNEED or
1364 * trans huge page faults running, and if the pmd is
1365 * none or trans huge it can change under us. This is
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001366 * because MADV_DONTNEED holds the mmap_lock in read
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001367 * mode.
1368 */
1369 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1370 goto next;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001371 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001372next:
Peter Zijlstra97a89412011-05-24 17:12:04 -07001373 cond_resched();
1374 } while (pmd++, addr = next, addr != end);
Robin Holt51c6f662005-11-13 16:06:42 -08001375
1376 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377}
1378
Robin Holt51c6f662005-11-13 16:06:42 -08001379static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001380 struct vm_area_struct *vma, p4d_t *p4d,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 unsigned long addr, unsigned long end,
Peter Zijlstra97a89412011-05-24 17:12:04 -07001382 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
1384 pud_t *pud;
1385 unsigned long next;
1386
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001387 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 do {
1389 next = pud_addr_end(addr, end);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001390 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1391 if (next - addr != HPAGE_PUD_SIZE) {
Michel Lespinasse42fc5412020-06-08 21:33:44 -07001392 mmap_assert_locked(tlb->mm);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001393 split_huge_pud(vma, pud, addr);
1394 } else if (zap_huge_pud(tlb, vma, pud, addr))
1395 goto next;
1396 /* fall through */
1397 }
Peter Zijlstra97a89412011-05-24 17:12:04 -07001398 if (pud_none_or_clear_bad(pud))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 continue;
Peter Zijlstra97a89412011-05-24 17:12:04 -07001400 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001401next:
1402 cond_resched();
Peter Zijlstra97a89412011-05-24 17:12:04 -07001403 } while (pud++, addr = next, addr != end);
Robin Holt51c6f662005-11-13 16:06:42 -08001404
1405 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406}
1407
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001408static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1409 struct vm_area_struct *vma, pgd_t *pgd,
1410 unsigned long addr, unsigned long end,
1411 struct zap_details *details)
1412{
1413 p4d_t *p4d;
1414 unsigned long next;
1415
1416 p4d = p4d_offset(pgd, addr);
1417 do {
1418 next = p4d_addr_end(addr, end);
1419 if (p4d_none_or_clear_bad(p4d))
1420 continue;
1421 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1422 } while (p4d++, addr = next, addr != end);
1423
1424 return addr;
1425}
1426
Michal Hockoaac45362016-03-25 14:20:24 -07001427void unmap_page_range(struct mmu_gather *tlb,
Al Viro038c7aa2012-03-05 13:25:09 -05001428 struct vm_area_struct *vma,
1429 unsigned long addr, unsigned long end,
1430 struct zap_details *details)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431{
1432 pgd_t *pgd;
1433 unsigned long next;
1434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 BUG_ON(addr >= end);
1436 tlb_start_vma(tlb, vma);
1437 pgd = pgd_offset(vma->vm_mm, addr);
1438 do {
1439 next = pgd_addr_end(addr, end);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001440 if (pgd_none_or_clear_bad(pgd))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001442 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
Peter Zijlstra97a89412011-05-24 17:12:04 -07001443 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 tlb_end_vma(tlb, vma);
1445}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
Al Virof5cc4ee2012-03-05 14:14:20 -05001447
1448static void unmap_single_vma(struct mmu_gather *tlb,
1449 struct vm_area_struct *vma, unsigned long start_addr,
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001450 unsigned long end_addr,
Al Virof5cc4ee2012-03-05 14:14:20 -05001451 struct zap_details *details)
1452{
1453 unsigned long start = max(vma->vm_start, start_addr);
1454 unsigned long end;
1455
1456 if (start >= vma->vm_end)
1457 return;
1458 end = min(vma->vm_end, end_addr);
1459 if (end <= vma->vm_start)
1460 return;
1461
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +05301462 if (vma->vm_file)
1463 uprobe_munmap(vma, start, end);
1464
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07001465 if (unlikely(vma->vm_flags & VM_PFNMAP))
Suresh Siddha5180da42012-10-08 16:28:29 -07001466 untrack_pfn(vma, 0, 0);
Al Virof5cc4ee2012-03-05 14:14:20 -05001467
1468 if (start != end) {
1469 if (unlikely(is_vm_hugetlb_page(vma))) {
1470 /*
1471 * It is undesirable to test vma->vm_file as it
1472 * should be non-null for valid hugetlb area.
1473 * However, vm_file will be NULL in the error
Davidlohr Bueso7aa6b4a2014-04-07 15:37:01 -07001474 * cleanup path of mmap_region. When
Al Virof5cc4ee2012-03-05 14:14:20 -05001475 * hugetlbfs ->mmap method fails,
Davidlohr Bueso7aa6b4a2014-04-07 15:37:01 -07001476 * mmap_region() nullifies vma->vm_file
Al Virof5cc4ee2012-03-05 14:14:20 -05001477 * before calling this function to clean up.
1478 * Since no pte has actually been setup, it is
1479 * safe to do nothing in this case.
1480 */
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07001481 if (vma->vm_file) {
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08001482 i_mmap_lock_write(vma->vm_file->f_mapping);
Mel Gormand8333522012-07-31 16:46:20 -07001483 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08001484 i_mmap_unlock_write(vma->vm_file->f_mapping);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07001485 }
Al Virof5cc4ee2012-03-05 14:14:20 -05001486 } else
1487 unmap_page_range(tlb, vma, start, end, details);
1488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489}
1490
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491/**
1492 * unmap_vmas - unmap a range of memory covered by a list of vma's
Randy Dunlap0164f692011-06-15 15:08:09 -07001493 * @tlb: address of the caller's struct mmu_gather
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 * @vma: the starting vma
1495 * @start_addr: virtual address at which to start unmapping
1496 * @end_addr: virtual address at which to end unmapping
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 *
Hugh Dickins508034a2005-10-29 18:16:30 -07001498 * Unmap all pages in the vma list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 * Only addresses between `start' and `end' will be unmapped.
1501 *
1502 * The VMA list must be sorted in ascending virtual address order.
1503 *
1504 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1505 * range after unmap_vmas() returns. So the only responsibility here is to
1506 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1507 * drops the lock and schedules.
1508 */
Al Viro6e8bb012012-03-05 13:41:15 -05001509void unmap_vmas(struct mmu_gather *tlb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 struct vm_area_struct *vma, unsigned long start_addr,
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001511 unsigned long end_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001513 struct mmu_notifier_range range;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001515 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1516 start_addr, end_addr);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001517 mmu_notifier_invalidate_range_start(&range);
Al Virof5cc4ee2012-03-05 14:14:20 -05001518 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001519 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001520 mmu_notifier_invalidate_range_end(&range);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521}
1522
1523/**
1524 * zap_page_range - remove user pages in a given range
1525 * @vma: vm_area_struct holding the applicable pages
Randy Dunlapeb4546b2012-06-20 12:53:02 -07001526 * @start: starting address of pages to zap
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 * @size: number of bytes to zap
Al Virof5cc4ee2012-03-05 14:14:20 -05001528 *
1529 * Caller must protect the VMA list
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 */
Linus Torvalds7e027b12012-05-06 13:43:15 -07001531void zap_page_range(struct vm_area_struct *vma, unsigned long start,
Kirill A. Shutemovecf13852017-02-22 15:46:37 -08001532 unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001534 struct mmu_notifier_range range;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001535 struct mmu_gather tlb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 lru_add_drain();
Jérôme Glisse7269f992019-05-13 17:20:53 -07001538 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001539 start, start + size);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001540 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1541 update_hiwater_rss(vma->vm_mm);
1542 mmu_notifier_invalidate_range_start(&range);
1543 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1544 unmap_single_vma(&tlb, vma, start, range.end, NULL);
1545 mmu_notifier_invalidate_range_end(&range);
1546 tlb_finish_mmu(&tlb, start, range.end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547}
1548
Jack Steinerc627f9c2008-07-29 22:33:53 -07001549/**
Al Virof5cc4ee2012-03-05 14:14:20 -05001550 * zap_page_range_single - remove user pages in a given range
1551 * @vma: vm_area_struct holding the applicable pages
1552 * @address: starting address of pages to zap
1553 * @size: number of bytes to zap
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08001554 * @details: details of shared cache invalidation
Al Virof5cc4ee2012-03-05 14:14:20 -05001555 *
1556 * The range must fit into one VMA.
1557 */
1558static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1559 unsigned long size, struct zap_details *details)
1560{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001561 struct mmu_notifier_range range;
Al Virof5cc4ee2012-03-05 14:14:20 -05001562 struct mmu_gather tlb;
Al Virof5cc4ee2012-03-05 14:14:20 -05001563
1564 lru_add_drain();
Jérôme Glisse7269f992019-05-13 17:20:53 -07001565 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001566 address, address + size);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001567 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1568 update_hiwater_rss(vma->vm_mm);
1569 mmu_notifier_invalidate_range_start(&range);
1570 unmap_single_vma(&tlb, vma, address, range.end, details);
1571 mmu_notifier_invalidate_range_end(&range);
1572 tlb_finish_mmu(&tlb, address, range.end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574
Jack Steinerc627f9c2008-07-29 22:33:53 -07001575/**
1576 * zap_vma_ptes - remove ptes mapping the vma
1577 * @vma: vm_area_struct holding ptes to be zapped
1578 * @address: starting address of pages to zap
1579 * @size: number of bytes to zap
1580 *
1581 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1582 *
1583 * The entire address range must be fully contained within the vma.
1584 *
Jack Steinerc627f9c2008-07-29 22:33:53 -07001585 */
Leon Romanovsky27d036e2018-05-29 15:14:07 +03001586void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
Jack Steinerc627f9c2008-07-29 22:33:53 -07001587 unsigned long size)
1588{
1589 if (address < vma->vm_start || address + size > vma->vm_end ||
1590 !(vma->vm_flags & VM_PFNMAP))
Leon Romanovsky27d036e2018-05-29 15:14:07 +03001591 return;
1592
Al Virof5cc4ee2012-03-05 14:14:20 -05001593 zap_page_range_single(vma, address, size, NULL);
Jack Steinerc627f9c2008-07-29 22:33:53 -07001594}
1595EXPORT_SYMBOL_GPL(zap_vma_ptes);
1596
Arjun Roy8cd39842020-04-10 14:33:01 -07001597static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001598{
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001599 pgd_t *pgd;
1600 p4d_t *p4d;
1601 pud_t *pud;
1602 pmd_t *pmd;
1603
1604 pgd = pgd_offset(mm, addr);
1605 p4d = p4d_alloc(mm, pgd, addr);
1606 if (!p4d)
1607 return NULL;
1608 pud = pud_alloc(mm, p4d, addr);
1609 if (!pud)
1610 return NULL;
1611 pmd = pmd_alloc(mm, pud, addr);
1612 if (!pmd)
1613 return NULL;
1614
1615 VM_BUG_ON(pmd_trans_huge(*pmd));
Arjun Roy8cd39842020-04-10 14:33:01 -07001616 return pmd;
1617}
1618
1619pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1620 spinlock_t **ptl)
1621{
1622 pmd_t *pmd = walk_to_pmd(mm, addr);
1623
1624 if (!pmd)
1625 return NULL;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03001626 return pte_alloc_map_lock(mm, pmd, addr, ptl);
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001627}
1628
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001629static int validate_page_before_insert(struct page *page)
1630{
1631 if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1632 return -EINVAL;
1633 flush_dcache_page(page);
1634 return 0;
1635}
1636
1637static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1638 unsigned long addr, struct page *page, pgprot_t prot)
1639{
1640 if (!pte_none(*pte))
1641 return -EBUSY;
1642 /* Ok, finally just insert the thing.. */
1643 get_page(page);
1644 inc_mm_counter_fast(mm, mm_counter_file(page));
1645 page_add_file_rmap(page, false);
1646 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1647 return 0;
1648}
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650/*
Linus Torvalds238f58d2005-11-29 13:01:56 -08001651 * This is the old fallback for page remapping.
1652 *
1653 * For historical reasons, it only allows reserved pages. Only
1654 * old drivers should use this, and they needed to mark their
1655 * pages reserved for the old functions anyway.
1656 */
Nick Piggin423bad602008-04-28 02:13:01 -07001657static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1658 struct page *page, pgprot_t prot)
Linus Torvalds238f58d2005-11-29 13:01:56 -08001659{
Nick Piggin423bad602008-04-28 02:13:01 -07001660 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds238f58d2005-11-29 13:01:56 -08001661 int retval;
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001662 pte_t *pte;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001663 spinlock_t *ptl;
1664
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001665 retval = validate_page_before_insert(page);
1666 if (retval)
KAMEZAWA Hiroyuki5b4e6552008-10-18 20:28:10 -07001667 goto out;
Linus Torvalds238f58d2005-11-29 13:01:56 -08001668 retval = -ENOMEM;
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -08001669 pte = get_locked_pte(mm, addr, &ptl);
Linus Torvalds238f58d2005-11-29 13:01:56 -08001670 if (!pte)
KAMEZAWA Hiroyuki5b4e6552008-10-18 20:28:10 -07001671 goto out;
Arjun Roy8efd6f5b2020-04-10 14:32:51 -07001672 retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
Linus Torvalds238f58d2005-11-29 13:01:56 -08001673 pte_unmap_unlock(pte, ptl);
1674out:
1675 return retval;
1676}
1677
Arjun Roy8cd39842020-04-10 14:33:01 -07001678#ifdef pte_index
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001679static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
Arjun Roy8cd39842020-04-10 14:33:01 -07001680 unsigned long addr, struct page *page, pgprot_t prot)
1681{
1682 int err;
1683
1684 if (!page_count(page))
1685 return -EINVAL;
1686 err = validate_page_before_insert(page);
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001687 if (err)
1688 return err;
1689 return insert_page_into_pte_locked(mm, pte, addr, page, prot);
Arjun Roy8cd39842020-04-10 14:33:01 -07001690}
1691
1692/* insert_pages() amortizes the cost of spinlock operations
1693 * when inserting pages in a loop. Arch *must* define pte_index.
1694 */
1695static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1696 struct page **pages, unsigned long *num, pgprot_t prot)
1697{
1698 pmd_t *pmd = NULL;
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001699 pte_t *start_pte, *pte;
1700 spinlock_t *pte_lock;
Arjun Roy8cd39842020-04-10 14:33:01 -07001701 struct mm_struct *const mm = vma->vm_mm;
1702 unsigned long curr_page_idx = 0;
1703 unsigned long remaining_pages_total = *num;
1704 unsigned long pages_to_write_in_pmd;
1705 int ret;
1706more:
1707 ret = -EFAULT;
1708 pmd = walk_to_pmd(mm, addr);
1709 if (!pmd)
1710 goto out;
1711
1712 pages_to_write_in_pmd = min_t(unsigned long,
1713 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1714
1715 /* Allocate the PTE if necessary; takes PMD lock once only. */
1716 ret = -ENOMEM;
1717 if (pte_alloc(mm, pmd))
1718 goto out;
Arjun Roy8cd39842020-04-10 14:33:01 -07001719
1720 while (pages_to_write_in_pmd) {
1721 int pte_idx = 0;
1722 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1723
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001724 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1725 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1726 int err = insert_page_in_batch_locked(mm, pte,
Arjun Roy8cd39842020-04-10 14:33:01 -07001727 addr, pages[curr_page_idx], prot);
1728 if (unlikely(err)) {
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001729 pte_unmap_unlock(start_pte, pte_lock);
Arjun Roy8cd39842020-04-10 14:33:01 -07001730 ret = err;
1731 remaining_pages_total -= pte_idx;
1732 goto out;
1733 }
1734 addr += PAGE_SIZE;
1735 ++curr_page_idx;
1736 }
Arjun Roy7f70c2a2020-06-25 20:30:01 -07001737 pte_unmap_unlock(start_pte, pte_lock);
Arjun Roy8cd39842020-04-10 14:33:01 -07001738 pages_to_write_in_pmd -= batch_size;
1739 remaining_pages_total -= batch_size;
1740 }
1741 if (remaining_pages_total)
1742 goto more;
1743 ret = 0;
1744out:
1745 *num = remaining_pages_total;
1746 return ret;
1747}
1748#endif /* ifdef pte_index */
1749
1750/**
1751 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1752 * @vma: user vma to map to
1753 * @addr: target start user address of these pages
1754 * @pages: source kernel pages
1755 * @num: in: number of pages to map. out: number of pages that were *not*
1756 * mapped. (0 means all pages were successfully mapped).
1757 *
1758 * Preferred over vm_insert_page() when inserting multiple pages.
1759 *
1760 * In case of error, we may have mapped a subset of the provided
1761 * pages. It is the caller's responsibility to account for this case.
1762 *
1763 * The same restrictions apply as in vm_insert_page().
1764 */
1765int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1766 struct page **pages, unsigned long *num)
1767{
1768#ifdef pte_index
1769 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1770
1771 if (addr < vma->vm_start || end_addr >= vma->vm_end)
1772 return -EFAULT;
1773 if (!(vma->vm_flags & VM_MIXEDMAP)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001774 BUG_ON(mmap_read_trylock(vma->vm_mm));
Arjun Roy8cd39842020-04-10 14:33:01 -07001775 BUG_ON(vma->vm_flags & VM_PFNMAP);
1776 vma->vm_flags |= VM_MIXEDMAP;
1777 }
1778 /* Defer page refcount checking till we're about to map that page. */
1779 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1780#else
1781 unsigned long idx = 0, pgcount = *num;
Tom Rix45779b02020-07-23 21:15:18 -07001782 int err = -EINVAL;
Arjun Roy8cd39842020-04-10 14:33:01 -07001783
1784 for (; idx < pgcount; ++idx) {
1785 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1786 if (err)
1787 break;
1788 }
1789 *num = pgcount - idx;
1790 return err;
1791#endif /* ifdef pte_index */
1792}
1793EXPORT_SYMBOL(vm_insert_pages);
1794
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07001795/**
1796 * vm_insert_page - insert single page into user vma
1797 * @vma: user vma to map to
1798 * @addr: target user address of this page
1799 * @page: source kernel page
1800 *
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001801 * This allows drivers to insert individual pages they've allocated
1802 * into a user vma.
1803 *
1804 * The page has to be a nice clean _individual_ kernel allocation.
1805 * If you allocate a compound page, you need to have marked it as
1806 * such (__GFP_COMP), or manually just split the page up yourself
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08001807 * (see split_page()).
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001808 *
1809 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1810 * took an arbitrary page protection parameter. This doesn't allow
1811 * that. Your vma protection will have to be set up correctly, which
1812 * means that if you want a shared writable mapping, you'd better
1813 * ask for a shared writable mapping!
1814 *
1815 * The page does not need to be reserved.
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001816 *
1817 * Usually this function is called from f_op->mmap() handler
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001818 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001819 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1820 * function from other places, for example from page-fault handler.
Mike Rapoporta862f682019-03-05 15:48:42 -08001821 *
1822 * Return: %0 on success, negative error code otherwise.
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001823 */
Nick Piggin423bad602008-04-28 02:13:01 -07001824int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1825 struct page *page)
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001826{
1827 if (addr < vma->vm_start || addr >= vma->vm_end)
1828 return -EFAULT;
1829 if (!page_count(page))
1830 return -EINVAL;
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001831 if (!(vma->vm_flags & VM_MIXEDMAP)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001832 BUG_ON(mmap_read_trylock(vma->vm_mm));
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001833 BUG_ON(vma->vm_flags & VM_PFNMAP);
1834 vma->vm_flags |= VM_MIXEDMAP;
1835 }
Nick Piggin423bad602008-04-28 02:13:01 -07001836 return insert_page(vma, addr, page, vma->vm_page_prot);
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001837}
Linus Torvaldse3c33742005-12-03 20:48:11 -08001838EXPORT_SYMBOL(vm_insert_page);
Linus Torvaldsa145dd42005-11-30 09:35:19 -08001839
Souptick Joardera667d742019-05-13 17:21:56 -07001840/*
1841 * __vm_map_pages - maps range of kernel pages into user vma
1842 * @vma: user vma to map to
1843 * @pages: pointer to array of source kernel pages
1844 * @num: number of pages in page array
1845 * @offset: user's requested vm_pgoff
1846 *
1847 * This allows drivers to map range of kernel pages into a user vma.
1848 *
1849 * Return: 0 on success and error code otherwise.
1850 */
1851static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1852 unsigned long num, unsigned long offset)
1853{
1854 unsigned long count = vma_pages(vma);
1855 unsigned long uaddr = vma->vm_start;
1856 int ret, i;
1857
1858 /* Fail if the user requested offset is beyond the end of the object */
Miguel Ojeda96756fc2019-07-11 20:58:47 -07001859 if (offset >= num)
Souptick Joardera667d742019-05-13 17:21:56 -07001860 return -ENXIO;
1861
1862 /* Fail if the user requested size exceeds available object size */
1863 if (count > num - offset)
1864 return -ENXIO;
1865
1866 for (i = 0; i < count; i++) {
1867 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1868 if (ret < 0)
1869 return ret;
1870 uaddr += PAGE_SIZE;
1871 }
1872
1873 return 0;
1874}
1875
1876/**
1877 * vm_map_pages - maps range of kernel pages starts with non zero offset
1878 * @vma: user vma to map to
1879 * @pages: pointer to array of source kernel pages
1880 * @num: number of pages in page array
1881 *
1882 * Maps an object consisting of @num pages, catering for the user's
1883 * requested vm_pgoff
1884 *
1885 * If we fail to insert any page into the vma, the function will return
1886 * immediately leaving any previously inserted pages present. Callers
1887 * from the mmap handler may immediately return the error as their caller
1888 * will destroy the vma, removing any successfully inserted pages. Other
1889 * callers should make their own arrangements for calling unmap_region().
1890 *
1891 * Context: Process context. Called by mmap handlers.
1892 * Return: 0 on success and error code otherwise.
1893 */
1894int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1895 unsigned long num)
1896{
1897 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1898}
1899EXPORT_SYMBOL(vm_map_pages);
1900
1901/**
1902 * vm_map_pages_zero - map range of kernel pages starts with zero offset
1903 * @vma: user vma to map to
1904 * @pages: pointer to array of source kernel pages
1905 * @num: number of pages in page array
1906 *
1907 * Similar to vm_map_pages(), except that it explicitly sets the offset
1908 * to 0. This function is intended for the drivers that did not consider
1909 * vm_pgoff.
1910 *
1911 * Context: Process context. Called by mmap handlers.
1912 * Return: 0 on success and error code otherwise.
1913 */
1914int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1915 unsigned long num)
1916{
1917 return __vm_map_pages(vma, pages, num, 0);
1918}
1919EXPORT_SYMBOL(vm_map_pages_zero);
1920
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001921static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
Ross Zwislerb2770da62017-09-06 16:18:35 -07001922 pfn_t pfn, pgprot_t prot, bool mkwrite)
Nick Piggin423bad602008-04-28 02:13:01 -07001923{
1924 struct mm_struct *mm = vma->vm_mm;
Nick Piggin423bad602008-04-28 02:13:01 -07001925 pte_t *pte, entry;
1926 spinlock_t *ptl;
1927
Nick Piggin423bad602008-04-28 02:13:01 -07001928 pte = get_locked_pte(mm, addr, &ptl);
1929 if (!pte)
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001930 return VM_FAULT_OOM;
Ross Zwislerb2770da62017-09-06 16:18:35 -07001931 if (!pte_none(*pte)) {
1932 if (mkwrite) {
1933 /*
1934 * For read faults on private mappings the PFN passed
1935 * in may not match the PFN we have mapped if the
1936 * mapped PFN is a writeable COW page. In the mkwrite
1937 * case we are creating a writable PTE for a shared
Jan Karaf2c57d92018-10-30 15:10:47 -07001938 * mapping and we expect the PFNs to match. If they
1939 * don't match, we are likely racing with block
1940 * allocation and mapping invalidation so just skip the
1941 * update.
Ross Zwislerb2770da62017-09-06 16:18:35 -07001942 */
Jan Karaf2c57d92018-10-30 15:10:47 -07001943 if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1944 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
Ross Zwislerb2770da62017-09-06 16:18:35 -07001945 goto out_unlock;
Jan Karaf2c57d92018-10-30 15:10:47 -07001946 }
Jan Karacae85cb2019-03-28 20:43:19 -07001947 entry = pte_mkyoung(*pte);
1948 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1949 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1950 update_mmu_cache(vma, addr, pte);
1951 }
1952 goto out_unlock;
Ross Zwislerb2770da62017-09-06 16:18:35 -07001953 }
Nick Piggin423bad602008-04-28 02:13:01 -07001954
1955 /* Ok, finally just insert the thing.. */
Dan Williams01c8f1c2016-01-15 16:56:40 -08001956 if (pfn_t_devmap(pfn))
1957 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1958 else
1959 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
Ross Zwislerb2770da62017-09-06 16:18:35 -07001960
Ross Zwislerb2770da62017-09-06 16:18:35 -07001961 if (mkwrite) {
1962 entry = pte_mkyoung(entry);
1963 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1964 }
1965
Nick Piggin423bad602008-04-28 02:13:01 -07001966 set_pte_at(mm, addr, pte, entry);
Russell King4b3073e2009-12-18 16:40:18 +00001967 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
Nick Piggin423bad602008-04-28 02:13:01 -07001968
Nick Piggin423bad602008-04-28 02:13:01 -07001969out_unlock:
1970 pte_unmap_unlock(pte, ptl);
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07001971 return VM_FAULT_NOPAGE;
Nick Piggin423bad602008-04-28 02:13:01 -07001972}
1973
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001974/**
1975 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1976 * @vma: user vma to map to
1977 * @addr: target user address of this page
1978 * @pfn: source kernel pfn
1979 * @pgprot: pgprot flags for the inserted page
1980 *
Randy Dunlapa1a0aea2020-08-11 18:33:05 -07001981 * This is exactly like vmf_insert_pfn(), except that it allows drivers
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001982 * to override pgprot on a per-page basis.
1983 *
1984 * This only makes sense for IO mappings, and it makes no sense for
1985 * COW mappings. In general, using multiple vmas is preferable;
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07001986 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001987 * impractical.
1988 *
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01001989 * See vmf_insert_mixed_prot() for a discussion of the implication of using
1990 * a value of @pgprot different from that of @vma->vm_page_prot.
1991 *
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07001992 * Context: Process context. May allocate using %GFP_KERNEL.
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07001993 * Return: vm_fault_t value.
1994 */
1995vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1996 unsigned long pfn, pgprot_t pgprot)
1997{
Matthew Wilcox6d958542018-10-26 15:04:33 -07001998 /*
1999 * Technically, architectures with pte_special can avoid all these
2000 * restrictions (same for remap_pfn_range). However we would like
2001 * consistency in testing and feature parity among all, so we should
2002 * try to keep these invariants in place for everybody.
2003 */
2004 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2005 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2006 (VM_PFNMAP|VM_MIXEDMAP));
2007 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2008 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2009
2010 if (addr < vma->vm_start || addr >= vma->vm_end)
2011 return VM_FAULT_SIGBUS;
2012
2013 if (!pfn_modify_allowed(pfn, pgprot))
2014 return VM_FAULT_SIGBUS;
2015
2016 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2017
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07002018 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
Matthew Wilcox6d958542018-10-26 15:04:33 -07002019 false);
Matthew Wilcoxf5e6d1d2018-10-26 15:04:13 -07002020}
2021EXPORT_SYMBOL(vmf_insert_pfn_prot);
Nick Piggine0dc0d82007-02-12 00:51:36 -08002022
Matthew Wilcoxae2b01f2018-10-26 15:04:29 -07002023/**
2024 * vmf_insert_pfn - insert single pfn into user vma
2025 * @vma: user vma to map to
2026 * @addr: target user address of this page
2027 * @pfn: source kernel pfn
2028 *
2029 * Similar to vm_insert_page, this allows drivers to insert individual pages
2030 * they've allocated into a user vma. Same comments apply.
2031 *
2032 * This function should only be called from a vm_ops->fault handler, and
2033 * in that case the handler should return the result of this function.
2034 *
2035 * vma cannot be a COW mapping.
2036 *
2037 * As this is called only for pages that do not currently exist, we
2038 * do not need to flush old virtual caches or the TLB.
2039 *
2040 * Context: Process context. May allocate using %GFP_KERNEL.
2041 * Return: vm_fault_t value.
2042 */
2043vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2044 unsigned long pfn)
2045{
2046 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2047}
2048EXPORT_SYMBOL(vmf_insert_pfn);
2049
Dan Williams785a3fa2017-10-23 07:20:00 -07002050static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2051{
2052 /* these checks mirror the abort conditions in vm_normal_page */
2053 if (vma->vm_flags & VM_MIXEDMAP)
2054 return true;
2055 if (pfn_t_devmap(pfn))
2056 return true;
2057 if (pfn_t_special(pfn))
2058 return true;
2059 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2060 return true;
2061 return false;
2062}
2063
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002064static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002065 unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2066 bool mkwrite)
Nick Piggin423bad602008-04-28 02:13:01 -07002067{
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002068 int err;
Dan Williams87744ab2016-10-07 17:00:18 -07002069
Dan Williams785a3fa2017-10-23 07:20:00 -07002070 BUG_ON(!vm_mixed_ok(vma, pfn));
Nick Piggin423bad602008-04-28 02:13:01 -07002071
2072 if (addr < vma->vm_start || addr >= vma->vm_end)
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002073 return VM_FAULT_SIGBUS;
Borislav Petkov308a0472016-10-26 19:43:43 +02002074
2075 track_pfn_insert(vma, &pgprot, pfn);
Nick Piggin423bad602008-04-28 02:13:01 -07002076
Andi Kleen42e40892018-06-13 15:48:27 -07002077 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002078 return VM_FAULT_SIGBUS;
Andi Kleen42e40892018-06-13 15:48:27 -07002079
Nick Piggin423bad602008-04-28 02:13:01 -07002080 /*
2081 * If we don't have pte special, then we have to use the pfn_valid()
2082 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2083 * refcount the page if pfn_valid is true (hence insert_page rather
Hugh Dickins62eede62009-09-21 17:03:34 -07002084 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2085 * without pte special, it would there be refcounted as a normal page.
Nick Piggin423bad602008-04-28 02:13:01 -07002086 */
Laurent Dufour00b3a332018-06-07 17:06:12 -07002087 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2088 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
Nick Piggin423bad602008-04-28 02:13:01 -07002089 struct page *page;
2090
Dan Williams03fc2da2016-01-26 09:48:05 -08002091 /*
2092 * At this point we are committed to insert_page()
2093 * regardless of whether the caller specified flags that
2094 * result in pfn_t_has_page() == false.
2095 */
2096 page = pfn_to_page(pfn_t_to_pfn(pfn));
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002097 err = insert_page(vma, addr, page, pgprot);
2098 } else {
Matthew Wilcox9b5a8e02018-10-26 15:04:40 -07002099 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
Nick Piggin423bad602008-04-28 02:13:01 -07002100 }
Ross Zwislerb2770da62017-09-06 16:18:35 -07002101
Matthew Wilcox5d747632018-10-26 15:04:10 -07002102 if (err == -ENOMEM)
2103 return VM_FAULT_OOM;
2104 if (err < 0 && err != -EBUSY)
2105 return VM_FAULT_SIGBUS;
2106
2107 return VM_FAULT_NOPAGE;
Nick Piggin423bad602008-04-28 02:13:01 -07002108}
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002109
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002110/**
2111 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2112 * @vma: user vma to map to
2113 * @addr: target user address of this page
2114 * @pfn: source kernel pfn
2115 * @pgprot: pgprot flags for the inserted page
2116 *
Randy Dunlapa1a0aea2020-08-11 18:33:05 -07002117 * This is exactly like vmf_insert_mixed(), except that it allows drivers
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002118 * to override pgprot on a per-page basis.
2119 *
2120 * Typically this function should be used by drivers to set caching- and
2121 * encryption bits different than those of @vma->vm_page_prot, because
2122 * the caching- or encryption mode may not be known at mmap() time.
2123 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2124 * to set caching and encryption bits for those vmas (except for COW pages).
2125 * This is ensured by core vm only modifying these page table entries using
2126 * functions that don't touch caching- or encryption bits, using pte_modify()
2127 * if needed. (See for example mprotect()).
2128 * Also when new page-table entries are created, this is only done using the
2129 * fault() callback, and never using the value of vma->vm_page_prot,
2130 * except for page-table entries that point to anonymous pages as the result
2131 * of COW.
2132 *
2133 * Context: Process context. May allocate using %GFP_KERNEL.
2134 * Return: vm_fault_t value.
2135 */
2136vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2137 pfn_t pfn, pgprot_t pgprot)
2138{
2139 return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2140}
Thomas Hellstrom5379e4d2019-11-22 09:34:35 +01002141EXPORT_SYMBOL(vmf_insert_mixed_prot);
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002142
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002143vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2144 pfn_t pfn)
2145{
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002146 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
Matthew Wilcox79f3aa52018-10-26 15:04:37 -07002147}
Matthew Wilcox5d747632018-10-26 15:04:10 -07002148EXPORT_SYMBOL(vmf_insert_mixed);
Nick Piggin423bad602008-04-28 02:13:01 -07002149
Souptick Joarderab77dab2018-06-07 17:04:29 -07002150/*
2151 * If the insertion of PTE failed because someone else already added a
2152 * different entry in the mean time, we treat that as success as we assume
2153 * the same entry was actually inserted.
2154 */
Souptick Joarderab77dab2018-06-07 17:04:29 -07002155vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2156 unsigned long addr, pfn_t pfn)
Ross Zwislerb2770da62017-09-06 16:18:35 -07002157{
Thomas Hellstrom574c5b32019-11-22 09:25:12 +01002158 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
Ross Zwislerb2770da62017-09-06 16:18:35 -07002159}
Souptick Joarderab77dab2018-06-07 17:04:29 -07002160EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
Ross Zwislerb2770da62017-09-06 16:18:35 -07002161
Linus Torvaldsa145dd42005-11-30 09:35:19 -08002162/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 * maps a range of physical memory into the requested pages. the old
2164 * mappings are removed. any references to nonexistent pages results
2165 * in null mappings (currently treated as "copy-on-access")
2166 */
2167static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2168 unsigned long addr, unsigned long end,
2169 unsigned long pfn, pgprot_t prot)
2170{
2171 pte_t *pte;
Hugh Dickinsc74df322005-10-29 18:16:23 -07002172 spinlock_t *ptl;
Andi Kleen42e40892018-06-13 15:48:27 -07002173 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Hugh Dickinsc74df322005-10-29 18:16:23 -07002175 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 if (!pte)
2177 return -ENOMEM;
Zachary Amsden6606c3e2006-09-30 23:29:33 -07002178 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 do {
2180 BUG_ON(!pte_none(*pte));
Andi Kleen42e40892018-06-13 15:48:27 -07002181 if (!pfn_modify_allowed(pfn, prot)) {
2182 err = -EACCES;
2183 break;
2184 }
Nick Piggin7e675132008-04-28 02:13:00 -07002185 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 pfn++;
2187 } while (pte++, addr += PAGE_SIZE, addr != end);
Zachary Amsden6606c3e2006-09-30 23:29:33 -07002188 arch_leave_lazy_mmu_mode();
Hugh Dickinsc74df322005-10-29 18:16:23 -07002189 pte_unmap_unlock(pte - 1, ptl);
Andi Kleen42e40892018-06-13 15:48:27 -07002190 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191}
2192
2193static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2194 unsigned long addr, unsigned long end,
2195 unsigned long pfn, pgprot_t prot)
2196{
2197 pmd_t *pmd;
2198 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002199 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
2201 pfn -= addr >> PAGE_SHIFT;
2202 pmd = pmd_alloc(mm, pud, addr);
2203 if (!pmd)
2204 return -ENOMEM;
Andrea Arcangelif66055ab2011-01-13 15:46:54 -08002205 VM_BUG_ON(pmd_trans_huge(*pmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 do {
2207 next = pmd_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002208 err = remap_pte_range(mm, pmd, addr, next,
2209 pfn + (addr >> PAGE_SHIFT), prot);
2210 if (err)
2211 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 } while (pmd++, addr = next, addr != end);
2213 return 0;
2214}
2215
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002216static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 unsigned long addr, unsigned long end,
2218 unsigned long pfn, pgprot_t prot)
2219{
2220 pud_t *pud;
2221 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002222 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
2224 pfn -= addr >> PAGE_SHIFT;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002225 pud = pud_alloc(mm, p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 if (!pud)
2227 return -ENOMEM;
2228 do {
2229 next = pud_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002230 err = remap_pmd_range(mm, pud, addr, next,
2231 pfn + (addr >> PAGE_SHIFT), prot);
2232 if (err)
2233 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 } while (pud++, addr = next, addr != end);
2235 return 0;
2236}
2237
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002238static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2239 unsigned long addr, unsigned long end,
2240 unsigned long pfn, pgprot_t prot)
2241{
2242 p4d_t *p4d;
2243 unsigned long next;
Andi Kleen42e40892018-06-13 15:48:27 -07002244 int err;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002245
2246 pfn -= addr >> PAGE_SHIFT;
2247 p4d = p4d_alloc(mm, pgd, addr);
2248 if (!p4d)
2249 return -ENOMEM;
2250 do {
2251 next = p4d_addr_end(addr, end);
Andi Kleen42e40892018-06-13 15:48:27 -07002252 err = remap_pud_range(mm, p4d, addr, next,
2253 pfn + (addr >> PAGE_SHIFT), prot);
2254 if (err)
2255 return err;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002256 } while (p4d++, addr = next, addr != end);
2257 return 0;
2258}
2259
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002260/**
2261 * remap_pfn_range - remap kernel memory to userspace
2262 * @vma: user vma to map to
Alex Zhang0c4123e2020-08-06 23:22:24 -07002263 * @addr: target page aligned user address to start at
WANG Wenhu86a76332020-04-01 21:09:03 -07002264 * @pfn: page frame number of kernel physical memory address
chenqiwu552657b2020-04-06 20:08:33 -07002265 * @size: size of mapping area
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002266 * @prot: page protection flags for this mapping
2267 *
Mike Rapoporta862f682019-03-05 15:48:42 -08002268 * Note: this is only safe if the mm semaphore is held when called.
2269 *
2270 * Return: %0 on success, negative error code otherwise.
Rolf Eike Beerbfa5bf62006-09-25 23:31:22 -07002271 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2273 unsigned long pfn, unsigned long size, pgprot_t prot)
2274{
2275 pgd_t *pgd;
2276 unsigned long next;
Hugh Dickins2d15cab2005-06-25 14:54:33 -07002277 unsigned long end = addr + PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 struct mm_struct *mm = vma->vm_mm;
Yongji Xied5957d22016-05-20 16:57:41 -07002279 unsigned long remap_pfn = pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 int err;
2281
Alex Zhang0c4123e2020-08-06 23:22:24 -07002282 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2283 return -EINVAL;
2284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 /*
2286 * Physically remapped pages are special. Tell the
2287 * rest of the world about it:
2288 * VM_IO tells people not to look at these pages
2289 * (accesses can have side effects).
Linus Torvalds6aab3412005-11-28 14:34:23 -08002290 * VM_PFNMAP tells the core MM that the base pages are just
2291 * raw PFN mappings, and do not have a "struct page" associated
2292 * with them.
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002293 * VM_DONTEXPAND
2294 * Disable vma merging and expanding with mremap().
2295 * VM_DONTDUMP
2296 * Omit vma from core dump, even when VM_IO turned off.
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002297 *
2298 * There's a horrible special case to handle copy-on-write
2299 * behaviour that some programs depend on. We mark the "original"
2300 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002301 * See vm_normal_page() for details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 */
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002303 if (is_cow_mapping(vma->vm_flags)) {
2304 if (addr != vma->vm_start || end != vma->vm_end)
2305 return -EINVAL;
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002306 vma->vm_pgoff = pfn;
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002307 }
2308
Yongji Xied5957d22016-05-20 16:57:41 -07002309 err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002310 if (err)
venkatesh.pallipadi@intel.com3c8bb732008-12-18 11:41:27 -08002311 return -EINVAL;
Linus Torvaldsfb155c12005-12-11 19:46:02 -08002312
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002313 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314
2315 BUG_ON(addr >= end);
2316 pfn -= addr >> PAGE_SHIFT;
2317 pgd = pgd_offset(mm, addr);
2318 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 do {
2320 next = pgd_addr_end(addr, end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002321 err = remap_p4d_range(mm, pgd, addr, next,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 pfn + (addr >> PAGE_SHIFT), prot);
2323 if (err)
2324 break;
2325 } while (pgd++, addr = next, addr != end);
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08002326
2327 if (err)
Yongji Xied5957d22016-05-20 16:57:41 -07002328 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
venkatesh.pallipadi@intel.com2ab64032008-12-18 11:41:29 -08002329
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 return err;
2331}
2332EXPORT_SYMBOL(remap_pfn_range);
2333
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002334/**
2335 * vm_iomap_memory - remap memory to userspace
2336 * @vma: user vma to map to
Wang Wenhuabd69b92020-04-01 21:09:07 -07002337 * @start: start of the physical memory to be mapped
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002338 * @len: size of area
2339 *
2340 * This is a simplified io_remap_pfn_range() for common driver use. The
2341 * driver just needs to give us the physical memory range to be mapped,
2342 * we'll figure out the rest from the vma information.
2343 *
2344 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2345 * whatever write-combining details or similar.
Mike Rapoporta862f682019-03-05 15:48:42 -08002346 *
2347 * Return: %0 on success, negative error code otherwise.
Linus Torvaldsb4cbb192013-04-16 13:45:37 -07002348 */
2349int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2350{
2351 unsigned long vm_len, pfn, pages;
2352
2353 /* Check that the physical memory area passed in looks valid */
2354 if (start + len < start)
2355 return -EINVAL;
2356 /*
2357 * You *really* shouldn't map things that aren't page-aligned,
2358 * but we've historically allowed it because IO memory might
2359 * just have smaller alignment.
2360 */
2361 len += start & ~PAGE_MASK;
2362 pfn = start >> PAGE_SHIFT;
2363 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2364 if (pfn + pages < pfn)
2365 return -EINVAL;
2366
2367 /* We start the mapping 'vm_pgoff' pages into the area */
2368 if (vma->vm_pgoff > pages)
2369 return -EINVAL;
2370 pfn += vma->vm_pgoff;
2371 pages -= vma->vm_pgoff;
2372
2373 /* Can we fit all of the mapping? */
2374 vm_len = vma->vm_end - vma->vm_start;
2375 if (vm_len >> PAGE_SHIFT > pages)
2376 return -EINVAL;
2377
2378 /* Ok, let it rip */
2379 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2380}
2381EXPORT_SYMBOL(vm_iomap_memory);
2382
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002383static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2384 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002385 pte_fn_t fn, void *data, bool create,
2386 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002387{
2388 pte_t *pte;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002389 int err = 0;
Kees Cook3f649ab2020-06-03 13:09:38 -07002390 spinlock_t *ptl;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002391
Daniel Axtensbe1db472019-12-17 20:51:41 -08002392 if (create) {
2393 pte = (mm == &init_mm) ?
Joerg Roedele80d3902020-09-04 16:35:43 -07002394 pte_alloc_kernel_track(pmd, addr, mask) :
Daniel Axtensbe1db472019-12-17 20:51:41 -08002395 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2396 if (!pte)
2397 return -ENOMEM;
2398 } else {
2399 pte = (mm == &init_mm) ?
2400 pte_offset_kernel(pmd, addr) :
2401 pte_offset_map_lock(mm, pmd, addr, &ptl);
2402 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002403
2404 BUG_ON(pmd_huge(*pmd));
2405
Jeremy Fitzhardinge38e0edb2009-01-06 14:39:21 -08002406 arch_enter_lazy_mmu_mode();
2407
Christoph Hellwigeeb4a052020-10-17 16:15:14 -07002408 if (fn) {
2409 do {
2410 if (create || !pte_none(*pte)) {
2411 err = fn(pte++, addr, data);
2412 if (err)
2413 break;
2414 }
2415 } while (addr += PAGE_SIZE, addr != end);
2416 }
Joerg Roedele80d3902020-09-04 16:35:43 -07002417 *mask |= PGTBL_PTE_MODIFIED;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002418
Jeremy Fitzhardinge38e0edb2009-01-06 14:39:21 -08002419 arch_leave_lazy_mmu_mode();
2420
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002421 if (mm != &init_mm)
2422 pte_unmap_unlock(pte-1, ptl);
2423 return err;
2424}
2425
2426static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2427 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002428 pte_fn_t fn, void *data, bool create,
2429 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002430{
2431 pmd_t *pmd;
2432 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002433 int err = 0;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002434
Andi Kleenceb86872008-07-23 21:27:50 -07002435 BUG_ON(pud_huge(*pud));
2436
Daniel Axtensbe1db472019-12-17 20:51:41 -08002437 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002438 pmd = pmd_alloc_track(mm, pud, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002439 if (!pmd)
2440 return -ENOMEM;
2441 } else {
2442 pmd = pmd_offset(pud, addr);
2443 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002444 do {
2445 next = pmd_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002446 if (create || !pmd_none_or_clear_bad(pmd)) {
2447 err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002448 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002449 if (err)
2450 break;
2451 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002452 } while (pmd++, addr = next, addr != end);
2453 return err;
2454}
2455
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002456static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002457 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002458 pte_fn_t fn, void *data, bool create,
2459 pgtbl_mod_mask *mask)
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002460{
2461 pud_t *pud;
2462 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002463 int err = 0;
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002464
Daniel Axtensbe1db472019-12-17 20:51:41 -08002465 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002466 pud = pud_alloc_track(mm, p4d, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002467 if (!pud)
2468 return -ENOMEM;
2469 } else {
2470 pud = pud_offset(p4d, addr);
2471 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002472 do {
2473 next = pud_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002474 if (create || !pud_none_or_clear_bad(pud)) {
2475 err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002476 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002477 if (err)
2478 break;
2479 }
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002480 } while (pud++, addr = next, addr != end);
2481 return err;
2482}
2483
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002484static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2485 unsigned long addr, unsigned long end,
Joerg Roedele80d3902020-09-04 16:35:43 -07002486 pte_fn_t fn, void *data, bool create,
2487 pgtbl_mod_mask *mask)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002488{
2489 p4d_t *p4d;
2490 unsigned long next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002491 int err = 0;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002492
Daniel Axtensbe1db472019-12-17 20:51:41 -08002493 if (create) {
Joerg Roedele80d3902020-09-04 16:35:43 -07002494 p4d = p4d_alloc_track(mm, pgd, addr, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002495 if (!p4d)
2496 return -ENOMEM;
2497 } else {
2498 p4d = p4d_offset(pgd, addr);
2499 }
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002500 do {
2501 next = p4d_addr_end(addr, end);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002502 if (create || !p4d_none_or_clear_bad(p4d)) {
2503 err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
Joerg Roedele80d3902020-09-04 16:35:43 -07002504 create, mask);
Daniel Axtensbe1db472019-12-17 20:51:41 -08002505 if (err)
2506 break;
2507 }
2508 } while (p4d++, addr = next, addr != end);
2509 return err;
2510}
2511
2512static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2513 unsigned long size, pte_fn_t fn,
2514 void *data, bool create)
2515{
2516 pgd_t *pgd;
Joerg Roedele80d3902020-09-04 16:35:43 -07002517 unsigned long start = addr, next;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002518 unsigned long end = addr + size;
Joerg Roedele80d3902020-09-04 16:35:43 -07002519 pgtbl_mod_mask mask = 0;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002520 int err = 0;
2521
2522 if (WARN_ON(addr >= end))
2523 return -EINVAL;
2524
2525 pgd = pgd_offset(mm, addr);
2526 do {
2527 next = pgd_addr_end(addr, end);
2528 if (!create && pgd_none_or_clear_bad(pgd))
2529 continue;
Joerg Roedele80d3902020-09-04 16:35:43 -07002530 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002531 if (err)
2532 break;
Daniel Axtensbe1db472019-12-17 20:51:41 -08002533 } while (pgd++, addr = next, addr != end);
2534
Joerg Roedele80d3902020-09-04 16:35:43 -07002535 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2536 arch_sync_kernel_mappings(start, start + size);
2537
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002538 return err;
2539}
2540
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002541/*
2542 * Scan a region of virtual memory, filling in page tables as necessary
2543 * and calling a provided function on each leaf page table.
2544 */
2545int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2546 unsigned long size, pte_fn_t fn, void *data)
2547{
Daniel Axtensbe1db472019-12-17 20:51:41 -08002548 return __apply_to_page_range(mm, addr, size, fn, data, true);
Jeremy Fitzhardingeaee16b32007-05-06 14:48:54 -07002549}
2550EXPORT_SYMBOL_GPL(apply_to_page_range);
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552/*
Daniel Axtensbe1db472019-12-17 20:51:41 -08002553 * Scan a region of virtual memory, calling a provided function on
2554 * each leaf page table where it exists.
2555 *
2556 * Unlike apply_to_page_range, this does _not_ fill in page tables
2557 * where they are absent.
2558 */
2559int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2560 unsigned long size, pte_fn_t fn, void *data)
2561{
2562 return __apply_to_page_range(mm, addr, size, fn, data, false);
2563}
2564EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2565
2566/*
Kirill A. Shutemov9b4bdd22015-02-10 14:09:51 -08002567 * handle_pte_fault chooses page fault handler according to an entry which was
2568 * read non-atomically. Before making any commitment, on those architectures
2569 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2570 * parts, do_swap_page must check under lock before unmapping the pte and
2571 * proceeding (but do_wp_page is only called after already making such a check;
Ryota Ozakia335b2e2011-02-10 13:56:28 +09002572 * and do_anonymous_page can safely check later on).
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002573 */
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07002574static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002575 pte_t *page_table, pte_t orig_pte)
2576{
2577 int same = 1;
Thomas Gleixner923717c2019-10-15 21:18:12 +02002578#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002579 if (sizeof(pte_t) > sizeof(unsigned long)) {
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07002580 spinlock_t *ptl = pte_lockptr(mm, pmd);
2581 spin_lock(ptl);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002582 same = pte_same(*page_table, orig_pte);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -07002583 spin_unlock(ptl);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07002584 }
2585#endif
2586 pte_unmap(page_table);
2587 return same;
2588}
2589
Jia He83d116c2019-10-11 22:09:39 +08002590static inline bool cow_user_page(struct page *dst, struct page *src,
2591 struct vm_fault *vmf)
Linus Torvalds6aab3412005-11-28 14:34:23 -08002592{
Jia He83d116c2019-10-11 22:09:39 +08002593 bool ret;
2594 void *kaddr;
2595 void __user *uaddr;
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002596 bool locked = false;
Jia He83d116c2019-10-11 22:09:39 +08002597 struct vm_area_struct *vma = vmf->vma;
2598 struct mm_struct *mm = vma->vm_mm;
2599 unsigned long addr = vmf->address;
2600
Jia He83d116c2019-10-11 22:09:39 +08002601 if (likely(src)) {
2602 copy_user_highpage(dst, src, addr, vma);
2603 return true;
2604 }
2605
Linus Torvalds6aab3412005-11-28 14:34:23 -08002606 /*
2607 * If the source page was a PFN mapping, we don't have
2608 * a "struct page" for it. We do a best-effort copy by
2609 * just copying from the original user address. If that
2610 * fails, we just zero-fill it. Live with it.
2611 */
Jia He83d116c2019-10-11 22:09:39 +08002612 kaddr = kmap_atomic(dst);
2613 uaddr = (void __user *)(addr & PAGE_MASK);
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002614
Jia He83d116c2019-10-11 22:09:39 +08002615 /*
2616 * On architectures with software "accessed" bits, we would
2617 * take a double page fault, so mark it accessed here.
2618 */
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002619 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
Jia He83d116c2019-10-11 22:09:39 +08002620 pte_t entry;
2621
2622 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002623 locked = true;
Jia He83d116c2019-10-11 22:09:39 +08002624 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2625 /*
2626 * Other thread has already handled the fault
Bibo Mao7df67692020-05-27 10:25:18 +08002627 * and update local tlb only
Jia He83d116c2019-10-11 22:09:39 +08002628 */
Bibo Mao7df67692020-05-27 10:25:18 +08002629 update_mmu_tlb(vma, addr, vmf->pte);
Jia He83d116c2019-10-11 22:09:39 +08002630 ret = false;
2631 goto pte_unlock;
2632 }
2633
2634 entry = pte_mkyoung(vmf->orig_pte);
2635 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2636 update_mmu_cache(vma, addr, vmf->pte);
2637 }
2638
2639 /*
2640 * This really shouldn't fail, because the page is there
2641 * in the page tables. But it might just be unreadable,
2642 * in which case we just give up and fill the result with
2643 * zeroes.
2644 */
2645 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002646 if (locked)
2647 goto warn;
2648
2649 /* Re-validate under PTL if the page is still mapped */
2650 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2651 locked = true;
2652 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
Bibo Mao7df67692020-05-27 10:25:18 +08002653 /* The PTE changed under us, update local tlb */
2654 update_mmu_tlb(vma, addr, vmf->pte);
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002655 ret = false;
2656 goto pte_unlock;
2657 }
2658
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002659 /*
Ethon Paul985ba002020-06-04 16:49:43 -07002660 * The same page can be mapped back since last copy attempt.
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002661 * Try to copy again under PTL.
Linus Torvalds5d2a2dbbc2005-11-29 14:07:55 -08002662 */
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002663 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2664 /*
2665 * Give a warn in case there can be some obscure
2666 * use-case
2667 */
2668warn:
2669 WARN_ON_ONCE(1);
2670 clear_page(kaddr);
2671 }
Jia He83d116c2019-10-11 22:09:39 +08002672 }
2673
2674 ret = true;
2675
2676pte_unlock:
Kirill A. Shutemovc3e5ea62020-03-05 22:28:32 -08002677 if (locked)
Jia He83d116c2019-10-11 22:09:39 +08002678 pte_unmap_unlock(vmf->pte, vmf->ptl);
2679 kunmap_atomic(kaddr);
2680 flush_dcache_page(dst);
2681
2682 return ret;
Linus Torvalds6aab3412005-11-28 14:34:23 -08002683}
2684
Michal Hockoc20cd452016-01-14 15:20:12 -08002685static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2686{
2687 struct file *vm_file = vma->vm_file;
2688
2689 if (vm_file)
2690 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2691
2692 /*
2693 * Special mappings (e.g. VDSO) do not have any file so fake
2694 * a default GFP_KERNEL for them.
2695 */
2696 return GFP_KERNEL;
2697}
2698
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699/*
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002700 * Notify the address space that the page is about to become writable so that
2701 * it can prohibit this or wait for the page to get into an appropriate state.
2702 *
2703 * We do this without the lock held, so that it can sleep if it needs to.
2704 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002705static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002706{
Souptick Joarder2b740302018-08-23 17:01:36 -07002707 vm_fault_t ret;
Jan Kara38b8cb72016-12-14 15:07:30 -08002708 struct page *page = vmf->page;
2709 unsigned int old_flags = vmf->flags;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002710
Jan Kara38b8cb72016-12-14 15:07:30 -08002711 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002712
Darrick J. Wongdc617f22019-08-20 07:55:16 -07002713 if (vmf->vma->vm_file &&
2714 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2715 return VM_FAULT_SIGBUS;
2716
Dave Jiang11bac802017-02-24 14:56:41 -08002717 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
Jan Kara38b8cb72016-12-14 15:07:30 -08002718 /* Restore original flags so that caller is not surprised */
2719 vmf->flags = old_flags;
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07002720 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2721 return ret;
2722 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2723 lock_page(page);
2724 if (!page->mapping) {
2725 unlock_page(page);
2726 return 0; /* retry */
2727 }
2728 ret |= VM_FAULT_LOCKED;
2729 } else
2730 VM_BUG_ON_PAGE(!PageLocked(page), page);
2731 return ret;
2732}
2733
2734/*
Jan Kara97ba0c22016-12-14 15:07:27 -08002735 * Handle dirtying of a page in shared file mapping on a write fault.
2736 *
2737 * The function expects the page to be locked and unlocks it.
2738 */
Johannes Weiner89b15332019-11-30 17:50:22 -08002739static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
Jan Kara97ba0c22016-12-14 15:07:27 -08002740{
Johannes Weiner89b15332019-11-30 17:50:22 -08002741 struct vm_area_struct *vma = vmf->vma;
Jan Kara97ba0c22016-12-14 15:07:27 -08002742 struct address_space *mapping;
Johannes Weiner89b15332019-11-30 17:50:22 -08002743 struct page *page = vmf->page;
Jan Kara97ba0c22016-12-14 15:07:27 -08002744 bool dirtied;
2745 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2746
2747 dirtied = set_page_dirty(page);
2748 VM_BUG_ON_PAGE(PageAnon(page), page);
2749 /*
2750 * Take a local copy of the address_space - page.mapping may be zeroed
2751 * by truncate after unlock_page(). The address_space itself remains
2752 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
2753 * release semantics to prevent the compiler from undoing this copying.
2754 */
2755 mapping = page_rmapping(page);
2756 unlock_page(page);
2757
Jan Kara97ba0c22016-12-14 15:07:27 -08002758 if (!page_mkwrite)
2759 file_update_time(vma->vm_file);
Johannes Weiner89b15332019-11-30 17:50:22 -08002760
2761 /*
2762 * Throttle page dirtying rate down to writeback speed.
2763 *
2764 * mapping may be NULL here because some device drivers do not
2765 * set page.mapping but still dirty their pages
2766 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002767 * Drop the mmap_lock before waiting on IO, if we can. The file
Johannes Weiner89b15332019-11-30 17:50:22 -08002768 * is pinning the mapping, as per above.
2769 */
2770 if ((dirtied || page_mkwrite) && mapping) {
2771 struct file *fpin;
2772
2773 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2774 balance_dirty_pages_ratelimited(mapping);
2775 if (fpin) {
2776 fput(fpin);
2777 return VM_FAULT_RETRY;
2778 }
2779 }
2780
2781 return 0;
Jan Kara97ba0c22016-12-14 15:07:27 -08002782}
2783
2784/*
Shachar Raindel4e047f82015-04-14 15:46:25 -07002785 * Handle write page faults for pages that can be reused in the current vma
2786 *
2787 * This can happen either due to the mapping being with the VM_SHARED flag,
2788 * or due to us being the last reference standing to the page. In either
2789 * case, all we need to do here is to mark the page as writable and update
2790 * any related book-keeping.
2791 */
Jan Kara997dd982016-12-14 15:07:36 -08002792static inline void wp_page_reuse(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08002793 __releases(vmf->ptl)
Shachar Raindel4e047f82015-04-14 15:46:25 -07002794{
Jan Kara82b0f8c2016-12-14 15:06:58 -08002795 struct vm_area_struct *vma = vmf->vma;
Jan Karaa41b70d2016-12-14 15:07:33 -08002796 struct page *page = vmf->page;
Shachar Raindel4e047f82015-04-14 15:46:25 -07002797 pte_t entry;
2798 /*
2799 * Clear the pages cpupid information as the existing
2800 * information potentially belongs to a now completely
2801 * unrelated process.
2802 */
2803 if (page)
2804 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2805
Jan Kara29943022016-12-14 15:07:16 -08002806 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2807 entry = pte_mkyoung(vmf->orig_pte);
Shachar Raindel4e047f82015-04-14 15:46:25 -07002808 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08002809 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2810 update_mmu_cache(vma, vmf->address, vmf->pte);
2811 pte_unmap_unlock(vmf->pte, vmf->ptl);
Peter Xu798a6b82020-08-21 19:49:58 -04002812 count_vm_event(PGREUSE);
Shachar Raindel4e047f82015-04-14 15:46:25 -07002813}
2814
2815/*
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002816 * Handle the case of a page which we actually need to copy to a new page.
2817 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002818 * Called with mmap_lock locked and the old page referenced, but
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002819 * without the ptl held.
2820 *
2821 * High level logic flow:
2822 *
2823 * - Allocate a page, copy the content of the old page to the new one.
2824 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2825 * - Take the PTL. If the pte changed, bail out and release the allocated page
2826 * - If the pte is still the way we remember it, update the page table and all
2827 * relevant references. This includes dropping the reference the page-table
2828 * held to the old page, as well as updating the rmap.
2829 * - In any case, unlock the PTL and drop the reference we took to the old page.
2830 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002831static vm_fault_t wp_page_copy(struct vm_fault *vmf)
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002832{
Jan Kara82b0f8c2016-12-14 15:06:58 -08002833 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002834 struct mm_struct *mm = vma->vm_mm;
Jan Karaa41b70d2016-12-14 15:07:33 -08002835 struct page *old_page = vmf->page;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002836 struct page *new_page = NULL;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002837 pte_t entry;
2838 int page_copied = 0;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002839 struct mmu_notifier_range range;
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002840
2841 if (unlikely(anon_vma_prepare(vma)))
2842 goto oom;
2843
Jan Kara29943022016-12-14 15:07:16 -08002844 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08002845 new_page = alloc_zeroed_user_highpage_movable(vma,
2846 vmf->address);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002847 if (!new_page)
2848 goto oom;
2849 } else {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002850 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
Jan Kara82b0f8c2016-12-14 15:06:58 -08002851 vmf->address);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002852 if (!new_page)
2853 goto oom;
Jia He83d116c2019-10-11 22:09:39 +08002854
2855 if (!cow_user_page(new_page, old_page, vmf)) {
2856 /*
2857 * COW failed, if the fault was solved by other,
2858 * it's fine. If not, userspace would re-fault on
2859 * the same address and we will handle the fault
2860 * from the second attempt.
2861 */
2862 put_page(new_page);
2863 if (old_page)
2864 put_page(old_page);
2865 return 0;
2866 }
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002867 }
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002868
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07002869 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002870 goto oom_free_new;
Johannes Weiner9d82c692020-06-03 16:02:04 -07002871 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002872
Mel Gormaneb3c24f2015-06-24 16:57:27 -07002873 __SetPageUptodate(new_page);
2874
Jérôme Glisse7269f992019-05-13 17:20:53 -07002875 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07002876 vmf->address & PAGE_MASK,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002877 (vmf->address & PAGE_MASK) + PAGE_SIZE);
2878 mmu_notifier_invalidate_range_start(&range);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002879
2880 /*
2881 * Re-check the pte - we dropped the lock
2882 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002883 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08002884 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002885 if (old_page) {
2886 if (!PageAnon(old_page)) {
Jerome Marchandeca56ff2016-01-14 15:19:26 -08002887 dec_mm_counter_fast(mm,
2888 mm_counter_file(old_page));
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002889 inc_mm_counter_fast(mm, MM_ANONPAGES);
2890 }
2891 } else {
2892 inc_mm_counter_fast(mm, MM_ANONPAGES);
2893 }
Jan Kara29943022016-12-14 15:07:16 -08002894 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002895 entry = mk_pte(new_page, vma->vm_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08002896 entry = pte_sw_mkyoung(entry);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002897 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2898 /*
2899 * Clear the pte entry and flush it first, before updating the
2900 * pte with the new entry. This will avoid a race condition
2901 * seen in the presence of one thread doing SMC and another
2902 * thread doing COW.
2903 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002904 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2905 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07002906 lru_cache_add_inactive_or_unevictable(new_page, vma);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002907 /*
2908 * We call the notify macro here because, when using secondary
2909 * mmu page tables (such as kvm shadow page tables), we want the
2910 * new page to be mapped directly into the secondary page table.
2911 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002912 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2913 update_mmu_cache(vma, vmf->address, vmf->pte);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002914 if (old_page) {
2915 /*
2916 * Only after switching the pte to the new page may
2917 * we remove the mapcount here. Otherwise another
2918 * process may come and find the rmap count decremented
2919 * before the pte is switched to the new page, and
2920 * "reuse" the old page writing into it while our pte
2921 * here still points into it and can be read by other
2922 * threads.
2923 *
2924 * The critical issue is to order this
2925 * page_remove_rmap with the ptp_clear_flush above.
2926 * Those stores are ordered by (if nothing else,)
2927 * the barrier present in the atomic_add_negative
2928 * in page_remove_rmap.
2929 *
2930 * Then the TLB flush in ptep_clear_flush ensures that
2931 * no process can access the old page before the
2932 * decremented mapcount is visible. And the old page
2933 * cannot be reused until after the decremented
2934 * mapcount is visible. So transitively, TLBs to
2935 * old page will be flushed before it can be reused.
2936 */
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002937 page_remove_rmap(old_page, false);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002938 }
2939
2940 /* Free the old page.. */
2941 new_page = old_page;
2942 page_copied = 1;
2943 } else {
Bibo Mao7df67692020-05-27 10:25:18 +08002944 update_mmu_tlb(vma, vmf->address, vmf->pte);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002945 }
2946
2947 if (new_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002948 put_page(new_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002949
Jan Kara82b0f8c2016-12-14 15:06:58 -08002950 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002951 /*
2952 * No need to double call mmu_notifier->invalidate_range() callback as
2953 * the above ptep_clear_flush_notify() did already call it.
2954 */
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002955 mmu_notifier_invalidate_range_only_end(&range);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002956 if (old_page) {
2957 /*
2958 * Don't let another task, with possibly unlocked vma,
2959 * keep the mlocked page.
2960 */
2961 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2962 lock_page(old_page); /* LRU manipulation */
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08002963 if (PageMlocked(old_page))
2964 munlock_vma_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002965 unlock_page(old_page);
2966 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002967 put_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002968 }
2969 return page_copied ? VM_FAULT_WRITE : 0;
2970oom_free_new:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002971 put_page(new_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002972oom:
2973 if (old_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002974 put_page(old_page);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07002975 return VM_FAULT_OOM;
2976}
2977
Jan Kara66a61972016-12-14 15:07:39 -08002978/**
2979 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2980 * writeable once the page is prepared
2981 *
2982 * @vmf: structure describing the fault
2983 *
2984 * This function handles all that is needed to finish a write page fault in a
2985 * shared mapping due to PTE being read-only once the mapped page is prepared.
Mike Rapoporta862f682019-03-05 15:48:42 -08002986 * It handles locking of PTE and modifying it.
Jan Kara66a61972016-12-14 15:07:39 -08002987 *
2988 * The function expects the page to be locked or other protection against
2989 * concurrent faults / writeback (such as DAX radix tree locks).
Mike Rapoporta862f682019-03-05 15:48:42 -08002990 *
2991 * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
2992 * we acquired PTE lock.
Jan Kara66a61972016-12-14 15:07:39 -08002993 */
Souptick Joarder2b740302018-08-23 17:01:36 -07002994vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
Jan Kara66a61972016-12-14 15:07:39 -08002995{
2996 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
2997 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
2998 &vmf->ptl);
2999 /*
3000 * We might have raced with another page fault while we released the
3001 * pte_offset_map_lock.
3002 */
3003 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
Bibo Mao7df67692020-05-27 10:25:18 +08003004 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
Jan Kara66a61972016-12-14 15:07:39 -08003005 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa19e2552016-12-14 15:07:42 -08003006 return VM_FAULT_NOPAGE;
Jan Kara66a61972016-12-14 15:07:39 -08003007 }
3008 wp_page_reuse(vmf);
Jan Karaa19e2552016-12-14 15:07:42 -08003009 return 0;
Jan Kara66a61972016-12-14 15:07:39 -08003010}
3011
Boaz Harroshdd906182015-04-15 16:15:11 -07003012/*
3013 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3014 * mapping
3015 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003016static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
Boaz Harroshdd906182015-04-15 16:15:11 -07003017{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003018 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003019
Boaz Harroshdd906182015-04-15 16:15:11 -07003020 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
Souptick Joarder2b740302018-08-23 17:01:36 -07003021 vm_fault_t ret;
Boaz Harroshdd906182015-04-15 16:15:11 -07003022
Jan Kara82b0f8c2016-12-14 15:06:58 -08003023 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karafe822212016-12-14 15:07:13 -08003024 vmf->flags |= FAULT_FLAG_MKWRITE;
Dave Jiang11bac802017-02-24 14:56:41 -08003025 ret = vma->vm_ops->pfn_mkwrite(vmf);
Jan Kara2f89dc12016-12-14 15:07:50 -08003026 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
Boaz Harroshdd906182015-04-15 16:15:11 -07003027 return ret;
Jan Kara66a61972016-12-14 15:07:39 -08003028 return finish_mkwrite_fault(vmf);
Boaz Harroshdd906182015-04-15 16:15:11 -07003029 }
Jan Kara997dd982016-12-14 15:07:36 -08003030 wp_page_reuse(vmf);
3031 return VM_FAULT_WRITE;
Boaz Harroshdd906182015-04-15 16:15:11 -07003032}
3033
Souptick Joarder2b740302018-08-23 17:01:36 -07003034static vm_fault_t wp_page_shared(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08003035 __releases(vmf->ptl)
Shachar Raindel93e478d2015-04-14 15:46:35 -07003036{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003037 struct vm_area_struct *vma = vmf->vma;
Johannes Weiner89b15332019-11-30 17:50:22 -08003038 vm_fault_t ret = VM_FAULT_WRITE;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003039
Jan Karaa41b70d2016-12-14 15:07:33 -08003040 get_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003041
Shachar Raindel93e478d2015-04-14 15:46:35 -07003042 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
Souptick Joarder2b740302018-08-23 17:01:36 -07003043 vm_fault_t tmp;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003044
Jan Kara82b0f8c2016-12-14 15:06:58 -08003045 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Kara38b8cb72016-12-14 15:07:30 -08003046 tmp = do_page_mkwrite(vmf);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003047 if (unlikely(!tmp || (tmp &
3048 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003049 put_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003050 return tmp;
3051 }
Jan Kara66a61972016-12-14 15:07:39 -08003052 tmp = finish_mkwrite_fault(vmf);
Jan Karaa19e2552016-12-14 15:07:42 -08003053 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003054 unlock_page(vmf->page);
Jan Karaa41b70d2016-12-14 15:07:33 -08003055 put_page(vmf->page);
Jan Kara66a61972016-12-14 15:07:39 -08003056 return tmp;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003057 }
Jan Kara66a61972016-12-14 15:07:39 -08003058 } else {
3059 wp_page_reuse(vmf);
Jan Kara997dd982016-12-14 15:07:36 -08003060 lock_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003061 }
Johannes Weiner89b15332019-11-30 17:50:22 -08003062 ret |= fault_dirty_shared_page(vmf);
Jan Kara997dd982016-12-14 15:07:36 -08003063 put_page(vmf->page);
Shachar Raindel93e478d2015-04-14 15:46:35 -07003064
Johannes Weiner89b15332019-11-30 17:50:22 -08003065 return ret;
Shachar Raindel93e478d2015-04-14 15:46:35 -07003066}
3067
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003068/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 * This routine handles present pages, when users try to write
3070 * to a shared page. It is done by copying the page to a new address
3071 * and decrementing the shared-page counter for the old page.
3072 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 * Note that this routine assumes that the protection checks have been
3074 * done by the caller (the low-level page fault routine in most cases).
3075 * Thus we can safely just mark it writable once we've done any necessary
3076 * COW.
3077 *
3078 * We also mark the page dirty at this point even though the page will
3079 * change only once the write actually happens. This avoids a few races,
3080 * and potentially makes it more efficient.
3081 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003082 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003083 * but allow concurrent faults), with pte both mapped and locked.
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003084 * We return with mmap_lock still held, but pte unmapped and unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003086static vm_fault_t do_wp_page(struct vm_fault *vmf)
Jan Kara82b0f8c2016-12-14 15:06:58 -08003087 __releases(vmf->ptl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003089 struct vm_area_struct *vma = vmf->vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090
Peter Xu292924b2020-04-06 20:05:49 -07003091 if (userfaultfd_pte_wp(vma, *vmf->pte)) {
Andrea Arcangeli529b9302020-04-06 20:05:29 -07003092 pte_unmap_unlock(vmf->pte, vmf->ptl);
3093 return handle_userfault(vmf, VM_UFFD_WP);
3094 }
3095
Jan Karaa41b70d2016-12-14 15:07:33 -08003096 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3097 if (!vmf->page) {
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003098 /*
Peter Feiner64e455072014-10-13 15:55:46 -07003099 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3100 * VM_PFNMAP VMA.
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003101 *
3102 * We should not cow pages in a shared writeable mapping.
Boaz Harroshdd906182015-04-15 16:15:11 -07003103 * Just mark the pages writable and/or call ops->pfn_mkwrite.
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003104 */
3105 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3106 (VM_WRITE|VM_SHARED))
Jan Kara29943022016-12-14 15:07:16 -08003107 return wp_pfn_shared(vmf);
Shachar Raindel2f38ab22015-04-14 15:46:32 -07003108
Jan Kara82b0f8c2016-12-14 15:06:58 -08003109 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa41b70d2016-12-14 15:07:33 -08003110 return wp_page_copy(vmf);
Peter Zijlstra251b97f2008-07-04 09:59:24 -07003111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003113 /*
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003114 * Take out anonymous pages first, anonymous shared vmas are
3115 * not dirty accountable.
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003116 */
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003117 if (PageAnon(vmf->page)) {
Linus Torvalds09854ba2020-08-21 19:49:55 -04003118 struct page *page = vmf->page;
3119
3120 /* PageKsm() doesn't necessarily raise the page refcount */
3121 if (PageKsm(page) || page_count(page) != 1)
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003122 goto copy;
Linus Torvalds09854ba2020-08-21 19:49:55 -04003123 if (!trylock_page(page))
3124 goto copy;
3125 if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3126 unlock_page(page);
3127 goto copy;
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003128 }
Linus Torvalds09854ba2020-08-21 19:49:55 -04003129 /*
3130 * Ok, we've got the only map reference, and the only
3131 * page count reference, and the page is locked,
3132 * it's dark out, and we're wearing sunglasses. Hit it.
3133 */
Linus Torvalds09854ba2020-08-21 19:49:55 -04003134 unlock_page(page);
Linus Torvaldsbe068f22020-09-24 08:41:32 -07003135 wp_page_reuse(vmf);
Linus Torvalds09854ba2020-08-21 19:49:55 -04003136 return VM_FAULT_WRITE;
Peter Zijlstraee6a6452006-09-25 23:31:00 -07003137 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
Peter Zijlstrad08b3852006-09-25 23:30:57 -07003138 (VM_WRITE|VM_SHARED))) {
Jan Karaa41b70d2016-12-14 15:07:33 -08003139 return wp_page_shared(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 }
Kirill Tkhai52d1e602019-03-05 15:43:06 -08003141copy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 /*
3143 * Ok, we need to copy. Oh, well..
3144 */
Jan Karaa41b70d2016-12-14 15:07:33 -08003145 get_page(vmf->page);
Shachar Raindel28766802015-04-14 15:46:29 -07003146
Jan Kara82b0f8c2016-12-14 15:06:58 -08003147 pte_unmap_unlock(vmf->pte, vmf->ptl);
Jan Karaa41b70d2016-12-14 15:07:33 -08003148 return wp_page_copy(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149}
3150
Peter Zijlstra97a89412011-05-24 17:12:04 -07003151static void unmap_mapping_range_vma(struct vm_area_struct *vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 unsigned long start_addr, unsigned long end_addr,
3153 struct zap_details *details)
3154{
Al Virof5cc4ee2012-03-05 14:14:20 -05003155 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156}
3157
Davidlohr Buesof808c132017-09-08 16:15:08 -07003158static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 struct zap_details *details)
3160{
3161 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 pgoff_t vba, vea, zba, zea;
3163
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -07003164 vma_interval_tree_foreach(vma, root,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 details->first_index, details->last_index) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166
3167 vba = vma->vm_pgoff;
Libind6e93212013-07-03 15:01:26 -07003168 vea = vba + vma_pages(vma) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 zba = details->first_index;
3170 if (zba < vba)
3171 zba = vba;
3172 zea = details->last_index;
3173 if (zea > vea)
3174 zea = vea;
3175
Peter Zijlstra97a89412011-05-24 17:12:04 -07003176 unmap_mapping_range_vma(vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3178 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
Peter Zijlstra97a89412011-05-24 17:12:04 -07003179 details);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 }
3181}
3182
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183/**
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003184 * unmap_mapping_pages() - Unmap pages from processes.
3185 * @mapping: The address space containing pages to be unmapped.
3186 * @start: Index of first page to be unmapped.
3187 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3188 * @even_cows: Whether to unmap even private COWed pages.
3189 *
3190 * Unmap the pages in this address space from any userspace process which
3191 * has them mmaped. Generally, you want to remove COWed pages as well when
3192 * a file is being truncated, but not when invalidating pages from the page
3193 * cache.
3194 */
3195void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3196 pgoff_t nr, bool even_cows)
3197{
3198 struct zap_details details = { };
3199
3200 details.check_mapping = even_cows ? NULL : mapping;
3201 details.first_index = start;
3202 details.last_index = start + nr - 1;
3203 if (details.last_index < details.first_index)
3204 details.last_index = ULONG_MAX;
3205
3206 i_mmap_lock_write(mapping);
3207 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3208 unmap_mapping_range_tree(&mapping->i_mmap, &details);
3209 i_mmap_unlock_write(mapping);
3210}
3211
3212/**
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08003213 * unmap_mapping_range - unmap the portion of all mmaps in the specified
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003214 * address_space corresponding to the specified byte range in the underlying
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -08003215 * file.
3216 *
Martin Waitz3d410882005-06-23 22:05:21 -07003217 * @mapping: the address space containing mmaps to be unmapped.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 * @holebegin: byte in first page to unmap, relative to the start of
3219 * the underlying file. This will be rounded down to a PAGE_SIZE
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +10003220 * boundary. Note that this is different from truncate_pagecache(), which
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 * must keep the partial page. In contrast, we must get rid of
3222 * partial pages.
3223 * @holelen: size of prospective hole in bytes. This will be rounded
3224 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3225 * end of the file.
3226 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3227 * but 0 when invalidating pagecache, don't throw away private data.
3228 */
3229void unmap_mapping_range(struct address_space *mapping,
3230 loff_t const holebegin, loff_t const holelen, int even_cows)
3231{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 pgoff_t hba = holebegin >> PAGE_SHIFT;
3233 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3234
3235 /* Check for overflow. */
3236 if (sizeof(holelen) > sizeof(hlen)) {
3237 long long holeend =
3238 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3239 if (holeend & ~(long long)ULONG_MAX)
3240 hlen = ULONG_MAX - hba + 1;
3241 }
3242
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08003243 unmap_mapping_pages(mapping, hba, hlen, even_cows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244}
3245EXPORT_SYMBOL(unmap_mapping_range);
3246
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003248 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003249 * but allow concurrent faults), and pte mapped but not yet locked.
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003250 * We return with pte unmapped and unlocked.
3251 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003252 * We return with the mmap_lock locked or unlocked in the same cases
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003253 * as does filemap_fault().
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003255vm_fault_t do_swap_page(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003257 struct vm_area_struct *vma = vmf->vma;
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003258 struct page *page = NULL, *swapcache;
Hugh Dickins65500d22005-10-29 18:15:59 -07003259 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 pte_t pte;
Michel Lespinassed065bd82010-10-26 14:21:57 -07003261 int locked;
Rik van Rielad8c2ee2010-08-09 17:19:48 -07003262 int exclusive = 0;
Souptick Joarder2b740302018-08-23 17:01:36 -07003263 vm_fault_t ret = 0;
Joonsoo Kimaae466b2020-08-11 18:30:50 -07003264 void *shadow = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003266 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003267 goto out;
Hugh Dickins65500d22005-10-29 18:15:59 -07003268
Jan Kara29943022016-12-14 15:07:16 -08003269 entry = pte_to_swp_entry(vmf->orig_pte);
Andi Kleend1737fd2009-09-16 11:50:06 +02003270 if (unlikely(non_swap_entry(entry))) {
3271 if (is_migration_entry(entry)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003272 migration_entry_wait(vma->vm_mm, vmf->pmd,
3273 vmf->address);
Jérôme Glisse5042db42017-09-08 16:11:43 -07003274 } else if (is_device_private_entry(entry)) {
Christoph Hellwig897e6362019-06-26 14:27:11 +02003275 vmf->page = device_private_entry_to_page(entry);
3276 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
Andi Kleend1737fd2009-09-16 11:50:06 +02003277 } else if (is_hwpoison_entry(entry)) {
3278 ret = VM_FAULT_HWPOISON;
3279 } else {
Jan Kara29943022016-12-14 15:07:16 -08003280 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
Hugh Dickinsd99be1a2009-12-14 17:59:04 -08003281 ret = VM_FAULT_SIGBUS;
Andi Kleend1737fd2009-09-16 11:50:06 +02003282 }
Christoph Lameter06972122006-06-23 02:03:35 -07003283 goto out;
3284 }
Minchan Kim0bcac062017-11-15 17:33:07 -08003285
3286
Shailabh Nagar0ff92242006-07-14 00:24:37 -07003287 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
Minchan Kimeaf649eb2018-04-05 16:23:39 -07003288 page = lookup_swap_cache(entry, vma, vmf->address);
3289 swapcache = page;
Minchan Kimf8020772018-01-18 16:33:50 -08003290
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 if (!page) {
Minchan Kim0bcac062017-11-15 17:33:07 -08003292 struct swap_info_struct *si = swp_swap_info(entry);
3293
Qian Caia449bf52020-08-14 17:31:31 -07003294 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3295 __swap_count(entry) == 1) {
Minchan Kim0bcac062017-11-15 17:33:07 -08003296 /* skip swapcache */
Chris Goldsworthy62e32cf2020-11-09 22:26:47 -08003297 gfp_t flags = GFP_HIGHUSER_MOVABLE;
3298
3299 trace_android_rvh_set_skip_swapcache_flags(&flags);
3300 page = alloc_page_vma(flags, vma, vmf->address);
Minchan Kim0bcac062017-11-15 17:33:07 -08003301 if (page) {
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003302 int err;
3303
Minchan Kim0bcac062017-11-15 17:33:07 -08003304 __SetPageLocked(page);
3305 __SetPageSwapBacked(page);
3306 set_page_private(page, entry.val);
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003307
3308 /* Tell memcg to use swap ownership records */
3309 SetPageSwapCache(page);
3310 err = mem_cgroup_charge(page, vma->vm_mm,
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07003311 GFP_KERNEL);
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003312 ClearPageSwapCache(page);
Michal Hocko545b1b02020-06-25 20:29:21 -07003313 if (err) {
3314 ret = VM_FAULT_OOM;
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003315 goto out_page;
Michal Hocko545b1b02020-06-25 20:29:21 -07003316 }
Johannes Weiner4c6355b2020-06-03 16:02:17 -07003317
Joonsoo Kimaae466b2020-08-11 18:30:50 -07003318 shadow = get_shadow_from_swap_cache(entry);
3319 if (shadow)
3320 workingset_refault(page, shadow);
Minchan Kim0bcac062017-11-15 17:33:07 -08003321
Johannes Weiner6058eae2020-06-03 16:02:40 -07003322 lru_cache_add(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003323 swap_readpage(page, true);
3324 }
Minchan Kimaa8d22a2017-11-15 17:33:11 -08003325 } else {
Minchan Kime9e9b7e2018-04-05 16:23:42 -07003326 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3327 vmf);
Minchan Kimaa8d22a2017-11-15 17:33:11 -08003328 swapcache = page;
Minchan Kim0bcac062017-11-15 17:33:07 -08003329 }
3330
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 if (!page) {
3332 /*
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003333 * Back out if somebody else faulted in this pte
3334 * while we released the pte lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003336 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3337 vmf->address, &vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08003338 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 ret = VM_FAULT_OOM;
Shailabh Nagar0ff92242006-07-14 00:24:37 -07003340 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Hugh Dickins65500d22005-10-29 18:15:59 -07003341 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 }
3343
3344 /* Had to read the page from swap area: Major fault */
3345 ret = VM_FAULT_MAJOR;
Christoph Lameterf8891e52006-06-30 01:55:45 -07003346 count_vm_event(PGMAJFAULT);
Roman Gushchin22621852017-07-06 15:40:25 -07003347 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
Andi Kleend1737fd2009-09-16 11:50:06 +02003348 } else if (PageHWPoison(page)) {
Wu Fengguang71f72522009-12-16 12:19:58 +01003349 /*
3350 * hwpoisoned dirty swapcache pages are kept for killing
3351 * owner processes (which may be unknown at hwpoison time)
3352 */
Andi Kleend1737fd2009-09-16 11:50:06 +02003353 ret = VM_FAULT_HWPOISON;
3354 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Andi Kleen4779cb32009-10-14 01:51:41 +02003355 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 }
3357
Jan Kara82b0f8c2016-12-14 15:06:58 -08003358 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
Rik van Riele709ffd2012-05-29 15:06:18 -07003359
Balbir Singh20a10222007-11-14 17:00:33 -08003360 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
Michel Lespinassed065bd82010-10-26 14:21:57 -07003361 if (!locked) {
3362 ret |= VM_FAULT_RETRY;
3363 goto out_release;
3364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003366 /*
Hugh Dickins31c4a3d2010-09-19 19:40:22 -07003367 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3368 * release the swapcache from under us. The page pin, and pte_same
3369 * test below, are not enough to exclude that. Even if it is still
3370 * swapcache, we need to check that the page's swap has not changed.
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003371 */
Minchan Kim0bcac062017-11-15 17:33:07 -08003372 if (unlikely((!PageSwapCache(page) ||
3373 page_private(page) != entry.val)) && swapcache)
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003374 goto out_page;
3375
Jan Kara82b0f8c2016-12-14 15:06:58 -08003376 page = ksm_might_need_to_copy(page, vma, vmf->address);
Hugh Dickinscbf86cf2013-02-22 16:35:08 -08003377 if (unlikely(!page)) {
3378 ret = VM_FAULT_OOM;
3379 page = swapcache;
Hugh Dickinscbf86cf2013-02-22 16:35:08 -08003380 goto out_page;
Hugh Dickins5ad64682009-12-14 17:59:24 -08003381 }
3382
Johannes Weiner9d82c692020-06-03 16:02:04 -07003383 cgroup_throttle_swaprate(page, GFP_KERNEL);
KAMEZAWA Hiroyuki073e5872008-10-18 20:28:08 -07003384
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 /*
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003386 * Back out if somebody else already faulted in this pte.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003388 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3389 &vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08003390 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
Kirill Korotaevb8107482005-05-16 21:53:50 -07003391 goto out_nomap;
Kirill Korotaevb8107482005-05-16 21:53:50 -07003392
3393 if (unlikely(!PageUptodate(page))) {
3394 ret = VM_FAULT_SIGBUS;
3395 goto out_nomap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 }
3397
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003398 /*
3399 * The page isn't present yet, go ahead with the fault.
3400 *
3401 * Be careful about the sequence of operations here.
3402 * To get its accounting right, reuse_swap_page() must be called
3403 * while the page is counted on swap but not yet in mapcount i.e.
3404 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3405 * must be called after the swap_free(), or it will never succeed.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003406 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003408 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3409 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 pte = mk_pte(page, vma->vm_page_prot);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003411 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003413 vmf->flags &= ~FAULT_FLAG_WRITE;
Andrea Arcangeli9a5b4892010-08-09 17:19:49 -07003414 ret |= VM_FAULT_WRITE;
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08003415 exclusive = RMAP_EXCLUSIVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 flush_icache_page(vma, page);
Jan Kara29943022016-12-14 15:07:16 -08003418 if (pte_swp_soft_dirty(vmf->orig_pte))
Cyrill Gorcunov179ef712013-08-13 16:00:49 -07003419 pte = pte_mksoft_dirty(pte);
Peter Xuf45ec5f2020-04-06 20:06:01 -07003420 if (pte_swp_uffd_wp(vmf->orig_pte)) {
3421 pte = pte_mkuffd_wp(pte);
3422 pte = pte_wrprotect(pte);
3423 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08003424 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
Khalid Azizca827d52018-02-21 10:15:44 -07003425 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
Jan Kara29943022016-12-14 15:07:16 -08003426 vmf->orig_pte = pte;
Minchan Kim0bcac062017-11-15 17:33:07 -08003427
3428 /* ksm created a completely new copy */
3429 if (unlikely(page != swapcache && swapcache)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003430 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003431 lru_cache_add_inactive_or_unevictable(page, vma);
Minchan Kim0bcac062017-11-15 17:33:07 -08003432 } else {
3433 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
Johannes Weiner00501b52014-08-08 14:19:20 -07003434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003436 swap_free(entry);
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08003437 if (mem_cgroup_swap_full(page) ||
3438 (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -08003439 try_to_free_swap(page);
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003440 unlock_page(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003441 if (page != swapcache && swapcache) {
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003442 /*
3443 * Hold the lock to avoid the swap entry to be reused
3444 * until we take the PT lock for the pte_same() check
3445 * (to avoid false positives from pte_same). For
3446 * further safety release the lock after the swap_free
3447 * so that the swap count won't change under a
3448 * parallel locked swapcache.
3449 */
3450 unlock_page(swapcache);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003451 put_page(swapcache);
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003452 }
Hugh Dickinsc475a8a2005-06-21 17:15:12 -07003453
Jan Kara82b0f8c2016-12-14 15:06:58 -08003454 if (vmf->flags & FAULT_FLAG_WRITE) {
Jan Kara29943022016-12-14 15:07:16 -08003455 ret |= do_wp_page(vmf);
Hugh Dickins61469f12008-03-04 14:29:04 -08003456 if (ret & VM_FAULT_ERROR)
3457 ret &= VM_FAULT_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 goto out;
3459 }
3460
3461 /* No need to invalidate - it was non-present before */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003462 update_mmu_cache(vma, vmf->address, vmf->pte);
Hugh Dickins65500d22005-10-29 18:15:59 -07003463unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003464 pte_unmap_unlock(vmf->pte, vmf->ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465out:
3466 return ret;
Kirill Korotaevb8107482005-05-16 21:53:50 -07003467out_nomap:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003468 pte_unmap_unlock(vmf->pte, vmf->ptl);
Johannes Weinerbc43f752009-04-30 15:08:08 -07003469out_page:
Kirill Korotaevb8107482005-05-16 21:53:50 -07003470 unlock_page(page);
Andi Kleen4779cb32009-10-14 01:51:41 +02003471out_release:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003472 put_page(page);
Minchan Kim0bcac062017-11-15 17:33:07 -08003473 if (page != swapcache && swapcache) {
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003474 unlock_page(swapcache);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003475 put_page(swapcache);
Andrea Arcangeli4969c112010-09-09 16:37:52 -07003476 }
Hugh Dickins65500d22005-10-29 18:15:59 -07003477 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478}
3479
3480/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003481 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003482 * but allow concurrent faults), and pte mapped but not yet locked.
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003483 * We return with mmap_lock still held, but pte unmapped and unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003485static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003487 struct vm_area_struct *vma = vmf->vma;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003488 struct page *page;
Souptick Joarder2b740302018-08-23 17:01:36 -07003489 vm_fault_t ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 pte_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491
Kirill A. Shutemov6b7339f2015-07-06 23:18:37 +03003492 /* File mapping without ->vm_ops ? */
3493 if (vma->vm_flags & VM_SHARED)
3494 return VM_FAULT_SIGBUS;
3495
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003496 /*
3497 * Use pte_alloc() instead of pte_alloc_map(). We can't run
3498 * pte_offset_map() on pmds where a huge pmd might be created
3499 * from a different thread.
3500 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07003501 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003502 * parallel threads are excluded by other means.
3503 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07003504 * Here we only have mmap_read_lock(mm).
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003505 */
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003506 if (pte_alloc(vma->vm_mm, vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003507 return VM_FAULT_OOM;
3508
3509 /* See the comment in pte_alloc_one_map() */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003510 if (unlikely(pmd_trans_unstable(vmf->pmd)))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003511 return 0;
3512
Linus Torvalds11ac5522010-08-14 11:44:56 -07003513 /* Use the zero-page for reads */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003514 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003515 !mm_forbids_zeropage(vma->vm_mm)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003516 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
Hugh Dickins62eede62009-09-21 17:03:34 -07003517 vma->vm_page_prot));
Jan Kara82b0f8c2016-12-14 15:06:58 -08003518 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3519 vmf->address, &vmf->ptl);
Bibo Mao7df67692020-05-27 10:25:18 +08003520 if (!pte_none(*vmf->pte)) {
3521 update_mmu_tlb(vma, vmf->address, vmf->pte);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003522 goto unlock;
Bibo Mao7df67692020-05-27 10:25:18 +08003523 }
Michal Hocko6b31d592017-08-18 15:16:15 -07003524 ret = check_stable_address_space(vma->vm_mm);
3525 if (ret)
3526 goto unlock;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003527 /* Deliver the page fault to userland, check inside PT lock */
3528 if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003529 pte_unmap_unlock(vmf->pte, vmf->ptl);
3530 return handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003531 }
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003532 goto setpte;
3533 }
3534
Nick Piggin557ed1f2007-10-16 01:24:40 -07003535 /* Allocate our own private page. */
Nick Piggin557ed1f2007-10-16 01:24:40 -07003536 if (unlikely(anon_vma_prepare(vma)))
3537 goto oom;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003538 page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
Nick Piggin557ed1f2007-10-16 01:24:40 -07003539 if (!page)
3540 goto oom;
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003541
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07003542 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003543 goto oom_free_page;
Johannes Weiner9d82c692020-06-03 16:02:04 -07003544 cgroup_throttle_swaprate(page, GFP_KERNEL);
Mel Gormaneb3c24f2015-06-24 16:57:27 -07003545
Minchan Kim52f37622013-04-29 15:08:15 -07003546 /*
3547 * The memory barrier inside __SetPageUptodate makes sure that
Wei Yangf4f53292019-11-30 17:58:17 -08003548 * preceding stores to the page contents become visible before
Minchan Kim52f37622013-04-29 15:08:15 -07003549 * the set_pte_at() write.
3550 */
Nick Piggin0ed361d2008-02-04 22:29:34 -08003551 __SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552
Nick Piggin557ed1f2007-10-16 01:24:40 -07003553 entry = mk_pte(page, vma->vm_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08003554 entry = pte_sw_mkyoung(entry);
Hugh Dickins1ac0cb52009-09-21 17:03:29 -07003555 if (vma->vm_flags & VM_WRITE)
3556 entry = pte_mkwrite(pte_mkdirty(entry));
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003557
Jan Kara82b0f8c2016-12-14 15:06:58 -08003558 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3559 &vmf->ptl);
Bibo Mao7df67692020-05-27 10:25:18 +08003560 if (!pte_none(*vmf->pte)) {
3561 update_mmu_cache(vma, vmf->address, vmf->pte);
Nick Piggin557ed1f2007-10-16 01:24:40 -07003562 goto release;
Bibo Mao7df67692020-05-27 10:25:18 +08003563 }
Hugh Dickins9ba69292009-09-21 17:02:20 -07003564
Michal Hocko6b31d592017-08-18 15:16:15 -07003565 ret = check_stable_address_space(vma->vm_mm);
3566 if (ret)
3567 goto release;
3568
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003569 /* Deliver the page fault to userland, check inside PT lock */
3570 if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003571 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003572 put_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003573 return handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07003574 }
3575
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003576 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003577 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003578 lru_cache_add_inactive_or_unevictable(page, vma);
Hugh Dickinsa13ea5b2009-09-21 17:03:30 -07003579setpte:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003580 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581
3582 /* No need to invalidate - it was non-present before */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003583 update_mmu_cache(vma, vmf->address, vmf->pte);
Hugh Dickins65500d22005-10-29 18:15:59 -07003584unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003585 pte_unmap_unlock(vmf->pte, vmf->ptl);
Michal Hocko6b31d592017-08-18 15:16:15 -07003586 return ret;
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003587release:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003588 put_page(page);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07003589 goto unlock;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003590oom_free_page:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003591 put_page(page);
Hugh Dickins65500d22005-10-29 18:15:59 -07003592oom:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 return VM_FAULT_OOM;
3594}
3595
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003596/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07003597 * The mmap_lock must have been held on entry, and may have been
Paul Cassella9a95f3c2014-08-06 16:07:24 -07003598 * released depending on flags and vma->vm_ops->fault() return value.
3599 * See filemap_fault() and __lock_page_retry().
3600 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003601static vm_fault_t __do_fault(struct vm_fault *vmf)
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003602{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003603 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07003604 vm_fault_t ret;
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003605
Michal Hocko63f36552019-01-08 15:23:07 -08003606 /*
3607 * Preallocate pte before we take page_lock because this might lead to
3608 * deadlocks for memcg reclaim which waits for pages under writeback:
3609 * lock_page(A)
3610 * SetPageWriteback(A)
3611 * unlock_page(A)
3612 * lock_page(B)
3613 * lock_page(B)
Yanfei Xud3838072020-10-13 16:53:26 -07003614 * pte_alloc_one
Michal Hocko63f36552019-01-08 15:23:07 -08003615 * shrink_page_list
3616 * wait_on_page_writeback(A)
3617 * SetPageWriteback(B)
3618 * unlock_page(B)
3619 * # flush A, B to clear the writeback
3620 */
3621 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
Yanfei Xua7069ee2020-10-13 16:53:29 -07003622 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
Michal Hocko63f36552019-01-08 15:23:07 -08003623 if (!vmf->prealloc_pte)
3624 return VM_FAULT_OOM;
3625 smp_wmb(); /* See comment in __pte_alloc() */
3626 }
3627
Dave Jiang11bac802017-02-24 14:56:41 -08003628 ret = vma->vm_ops->fault(vmf);
Jan Kara39170482016-12-14 15:07:18 -08003629 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
Jan Karab1aa8122016-12-14 15:07:24 -08003630 VM_FAULT_DONE_COW)))
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003631 return ret;
3632
Jan Kara667240e2016-12-14 15:07:07 -08003633 if (unlikely(PageHWPoison(vmf->page))) {
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003634 if (ret & VM_FAULT_LOCKED)
Jan Kara667240e2016-12-14 15:07:07 -08003635 unlock_page(vmf->page);
3636 put_page(vmf->page);
Jan Kara936ca802016-12-14 15:07:10 -08003637 vmf->page = NULL;
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003638 return VM_FAULT_HWPOISON;
3639 }
3640
3641 if (unlikely(!(ret & VM_FAULT_LOCKED)))
Jan Kara667240e2016-12-14 15:07:07 -08003642 lock_page(vmf->page);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003643 else
Jan Kara667240e2016-12-14 15:07:07 -08003644 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003645
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07003646 return ret;
3647}
3648
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003649/*
3650 * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3651 * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3652 * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3653 * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3654 */
3655static int pmd_devmap_trans_unstable(pmd_t *pmd)
3656{
3657 return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3658}
3659
Souptick Joarder2b740302018-08-23 17:01:36 -07003660static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003661{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003662 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003663
Jan Kara82b0f8c2016-12-14 15:06:58 -08003664 if (!pmd_none(*vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003665 goto map_pte;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003666 if (vmf->prealloc_pte) {
3667 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3668 if (unlikely(!pmd_none(*vmf->pmd))) {
3669 spin_unlock(vmf->ptl);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003670 goto map_pte;
3671 }
3672
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08003673 mm_inc_nr_ptes(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003674 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3675 spin_unlock(vmf->ptl);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08003676 vmf->prealloc_pte = NULL;
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003677 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003678 return VM_FAULT_OOM;
3679 }
3680map_pte:
3681 /*
3682 * If a huge pmd materialized under us just retry later. Use
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003683 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3684 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3685 * under us and then back to pmd_none, as a result of MADV_DONTNEED
3686 * running immediately after a huge pmd fault in a different thread of
3687 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3688 * All we have to ensure is that it is a regular pmd that we can walk
3689 * with pte_offset_map() and we can do that through an atomic read in
3690 * C, which is what pmd_trans_unstable() provides.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003691 */
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003692 if (pmd_devmap_trans_unstable(vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003693 return VM_FAULT_NOPAGE;
3694
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07003695 /*
3696 * At this point we know that our vmf->pmd points to a page of ptes
3697 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3698 * for the duration of the fault. If a racing MADV_DONTNEED runs and
3699 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3700 * be valid and we will re-check to make sure the vmf->pte isn't
3701 * pte_none() under vmf->ptl protection when we return to
3702 * alloc_set_pte().
3703 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003704 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3705 &vmf->ptl);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003706 return 0;
3707}
3708
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07003709#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jan Kara82b0f8c2016-12-14 15:06:58 -08003710static void deposit_prealloc_pte(struct vm_fault *vmf)
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003711{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003712 struct vm_area_struct *vma = vmf->vma;
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003713
Jan Kara82b0f8c2016-12-14 15:06:58 -08003714 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003715 /*
3716 * We are going to consume the prealloc table,
3717 * count that as nr_ptes.
3718 */
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08003719 mm_inc_nr_ptes(vma->vm_mm);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08003720 vmf->prealloc_pte = NULL;
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003721}
3722
Souptick Joarder2b740302018-08-23 17:01:36 -07003723static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003724{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003725 struct vm_area_struct *vma = vmf->vma;
3726 bool write = vmf->flags & FAULT_FLAG_WRITE;
3727 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003728 pmd_t entry;
Souptick Joarder2b740302018-08-23 17:01:36 -07003729 int i;
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003730 vm_fault_t ret = VM_FAULT_FALLBACK;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003731
3732 if (!transhuge_vma_suitable(vma, haddr))
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003733 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003734
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003735 page = compound_head(page);
Matthew Wilcox (Oracle)d01ac3c2020-10-15 20:05:26 -07003736 if (compound_order(page) != HPAGE_PMD_ORDER)
3737 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003738
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003739 /*
3740 * Archs like ppc64 need additonal space to store information
3741 * related to pte entry. Use the preallocated table for that.
3742 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003743 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003744 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003745 if (!vmf->prealloc_pte)
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003746 return VM_FAULT_OOM;
3747 smp_wmb(); /* See comment in __pte_alloc() */
3748 }
3749
Jan Kara82b0f8c2016-12-14 15:06:58 -08003750 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3751 if (unlikely(!pmd_none(*vmf->pmd)))
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003752 goto out;
3753
3754 for (i = 0; i < HPAGE_PMD_NR; i++)
3755 flush_icache_page(vma, page + i);
3756
3757 entry = mk_huge_pmd(page, vma->vm_page_prot);
3758 if (write)
Linus Torvaldsf55e1012017-11-29 09:01:01 -08003759 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003760
Yang Shifadae292018-08-17 15:44:55 -07003761 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003762 page_add_file_rmap(page, true);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08003763 /*
3764 * deposit and withdraw with pmd lock held
3765 */
3766 if (arch_needs_pgtable_deposit())
Jan Kara82b0f8c2016-12-14 15:06:58 -08003767 deposit_prealloc_pte(vmf);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003768
Jan Kara82b0f8c2016-12-14 15:06:58 -08003769 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003770
Jan Kara82b0f8c2016-12-14 15:06:58 -08003771 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003772
3773 /* fault is handled */
3774 ret = 0;
Kirill A. Shutemov95ecedc2016-07-26 15:25:31 -07003775 count_vm_event(THP_FILE_MAPPED);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003776out:
Jan Kara82b0f8c2016-12-14 15:06:58 -08003777 spin_unlock(vmf->ptl);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003778 return ret;
3779}
3780#else
Souptick Joarder2b740302018-08-23 17:01:36 -07003781static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003782{
3783 BUILD_BUG();
3784 return 0;
3785}
3786#endif
3787
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003788/**
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003789 * alloc_set_pte - setup new PTE entry for given page and add reverse page
Randy Dunlapf1dc1682020-10-13 16:54:01 -07003790 * mapping. If needed, the function allocates page table or use pre-allocated.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003791 *
Jan Kara82b0f8c2016-12-14 15:06:58 -08003792 * @vmf: fault environment
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003793 * @page: page to map
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003794 *
Jan Kara82b0f8c2016-12-14 15:06:58 -08003795 * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3796 * return.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003797 *
3798 * Target users are page handler itself and implementations of
3799 * vm_ops->map_pages.
Mike Rapoporta862f682019-03-05 15:48:42 -08003800 *
3801 * Return: %0 on success, %VM_FAULT_ code in case of error.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003802 */
Johannes Weiner9d82c692020-06-03 16:02:04 -07003803vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003804{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003805 struct vm_area_struct *vma = vmf->vma;
3806 bool write = vmf->flags & FAULT_FLAG_WRITE;
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003807 pte_t entry;
Souptick Joarder2b740302018-08-23 17:01:36 -07003808 vm_fault_t ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003809
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07003810 if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08003811 ret = do_set_pmd(vmf, page);
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003812 if (ret != VM_FAULT_FALLBACK)
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003813 return ret;
Kirill A. Shutemov10102452016-07-26 15:25:29 -07003814 }
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003815
Jan Kara82b0f8c2016-12-14 15:06:58 -08003816 if (!vmf->pte) {
3817 ret = pte_alloc_one_map(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003818 if (ret)
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003819 return ret;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003820 }
3821
3822 /* Re-check under ptl */
Bibo Mao7df67692020-05-27 10:25:18 +08003823 if (unlikely(!pte_none(*vmf->pte))) {
3824 update_mmu_tlb(vma, vmf->address, vmf->pte);
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003825 return VM_FAULT_NOPAGE;
Bibo Mao7df67692020-05-27 10:25:18 +08003826 }
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003827
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003828 flush_icache_page(vma, page);
3829 entry = mk_pte(page, vma->vm_page_prot);
Bibo Mao44bf4312020-05-27 10:25:19 +08003830 entry = pte_sw_mkyoung(entry);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003831 if (write)
3832 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003833 /* copy-on-write page */
3834 if (write && !(vma->vm_flags & VM_SHARED)) {
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003835 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003836 page_add_new_anon_rmap(page, vma, vmf->address, false);
Joonsoo Kimb5181542020-08-11 18:30:40 -07003837 lru_cache_add_inactive_or_unevictable(page, vma);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003838 } else {
Jerome Marchandeca56ff2016-01-14 15:19:26 -08003839 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07003840 page_add_file_rmap(page, false);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003841 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08003842 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003843
3844 /* no need to invalidate: a not-present page won't be cached */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003845 update_mmu_cache(vma, vmf->address, vmf->pte);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003846
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08003847 return 0;
Kirill A. Shutemov3bb97792014-04-03 14:48:16 -07003848}
3849
Jan Kara9118c0c2016-12-14 15:07:21 -08003850
3851/**
3852 * finish_fault - finish page fault once we have prepared the page to fault
3853 *
3854 * @vmf: structure describing the fault
3855 *
3856 * This function handles all that is needed to finish a page fault once the
3857 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3858 * given page, adds reverse page mapping, handles memcg charges and LRU
Mike Rapoporta862f682019-03-05 15:48:42 -08003859 * addition.
Jan Kara9118c0c2016-12-14 15:07:21 -08003860 *
3861 * The function expects the page to be locked and on success it consumes a
3862 * reference of a page being mapped (for the PTE which maps it).
Mike Rapoporta862f682019-03-05 15:48:42 -08003863 *
3864 * Return: %0 on success, %VM_FAULT_ code in case of error.
Jan Kara9118c0c2016-12-14 15:07:21 -08003865 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003866vm_fault_t finish_fault(struct vm_fault *vmf)
Jan Kara9118c0c2016-12-14 15:07:21 -08003867{
3868 struct page *page;
Souptick Joarder2b740302018-08-23 17:01:36 -07003869 vm_fault_t ret = 0;
Jan Kara9118c0c2016-12-14 15:07:21 -08003870
3871 /* Did we COW the page? */
3872 if ((vmf->flags & FAULT_FLAG_WRITE) &&
3873 !(vmf->vma->vm_flags & VM_SHARED))
3874 page = vmf->cow_page;
3875 else
3876 page = vmf->page;
Michal Hocko6b31d592017-08-18 15:16:15 -07003877
3878 /*
3879 * check even for read faults because we might have lost our CoWed
3880 * page
3881 */
3882 if (!(vmf->vma->vm_flags & VM_SHARED))
3883 ret = check_stable_address_space(vmf->vma->vm_mm);
3884 if (!ret)
Johannes Weiner9d82c692020-06-03 16:02:04 -07003885 ret = alloc_set_pte(vmf, page);
Jan Kara9118c0c2016-12-14 15:07:21 -08003886 if (vmf->pte)
3887 pte_unmap_unlock(vmf->pte, vmf->ptl);
3888 return ret;
3889}
3890
Kirill A. Shutemov3a910532014-08-06 16:08:07 -07003891static unsigned long fault_around_bytes __read_mostly =
3892 rounddown_pow_of_two(65536);
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003893
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003894#ifdef CONFIG_DEBUG_FS
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003895static int fault_around_bytes_get(void *data, u64 *val)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003896{
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003897 *val = fault_around_bytes;
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003898 return 0;
3899}
3900
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003901/*
William Kucharskida391d62018-01-31 16:21:11 -08003902 * fault_around_bytes must be rounded down to the nearest page order as it's
3903 * what do_fault_around() expects to see.
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003904 */
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003905static int fault_around_bytes_set(void *data, u64 val)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003906{
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003907 if (val / PAGE_SIZE > PTRS_PER_PTE)
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003908 return -EINVAL;
Andrey Ryabininb4903d62014-07-30 16:08:35 -07003909 if (val > PAGE_SIZE)
3910 fault_around_bytes = rounddown_pow_of_two(val);
3911 else
3912 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003913 return 0;
3914}
Yevgen Pronenko0a1345f2017-07-10 15:47:17 -07003915DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
Kirill A. Shutemova9b0f862014-06-04 16:10:54 -07003916 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003917
3918static int __init fault_around_debugfs(void)
3919{
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08003920 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3921 &fault_around_bytes_fops);
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003922 return 0;
3923}
3924late_initcall(fault_around_debugfs);
Kirill A. Shutemov1592eef2014-04-07 15:37:22 -07003925#endif
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003926
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07003927/*
3928 * do_fault_around() tries to map few pages around the fault address. The hope
3929 * is that the pages will be needed soon and this will lower the number of
3930 * faults to handle.
3931 *
3932 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
3933 * not ready to be mapped: not up-to-date, locked, etc.
3934 *
3935 * This function is called with the page table lock taken. In the split ptlock
3936 * case the page table lock only protects only those entries which belong to
3937 * the page table corresponding to the fault address.
3938 *
3939 * This function doesn't cross the VMA boundaries, in order to call map_pages()
3940 * only once.
3941 *
William Kucharskida391d62018-01-31 16:21:11 -08003942 * fault_around_bytes defines how many bytes we'll try to map.
3943 * do_fault_around() expects it to be set to a power of two less than or equal
3944 * to PTRS_PER_PTE.
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07003945 *
William Kucharskida391d62018-01-31 16:21:11 -08003946 * The virtual address of the area that we map is naturally aligned to
3947 * fault_around_bytes rounded down to the machine page size
3948 * (and therefore to page order). This way it's easier to guarantee
3949 * that we don't cross page table boundaries.
Kirill A. Shutemov1fdb4122014-06-04 16:10:55 -07003950 */
Souptick Joarder2b740302018-08-23 17:01:36 -07003951static vm_fault_t do_fault_around(struct vm_fault *vmf)
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003952{
Jan Kara82b0f8c2016-12-14 15:06:58 -08003953 unsigned long address = vmf->address, nr_pages, mask;
Jan Kara0721ec82016-12-14 15:07:04 -08003954 pgoff_t start_pgoff = vmf->pgoff;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003955 pgoff_t end_pgoff;
Souptick Joarder2b740302018-08-23 17:01:36 -07003956 int off;
3957 vm_fault_t ret = 0;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003958
Jason Low4db0c3c2015-04-15 16:14:08 -07003959 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
Kirill A. Shutemovaecd6f42014-08-06 16:08:05 -07003960 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
3961
Jan Kara82b0f8c2016-12-14 15:06:58 -08003962 vmf->address = max(address & mask, vmf->vma->vm_start);
3963 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003964 start_pgoff -= off;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003965
3966 /*
William Kucharskida391d62018-01-31 16:21:11 -08003967 * end_pgoff is either the end of the page table, the end of
3968 * the vma or nr_pages from start_pgoff, depending what is nearest.
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003969 */
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003970 end_pgoff = start_pgoff -
Jan Kara82b0f8c2016-12-14 15:06:58 -08003971 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003972 PTRS_PER_PTE - 1;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003973 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07003974 start_pgoff + nr_pages - 1);
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003975
Jan Kara82b0f8c2016-12-14 15:06:58 -08003976 if (pmd_none(*vmf->pmd)) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08003977 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -08003978 if (!vmf->prealloc_pte)
Vegard Nossumc5f88bd2016-08-02 14:02:22 -07003979 goto out;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003980 smp_wmb(); /* See comment in __pte_alloc() */
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07003981 }
3982
Jan Kara82b0f8c2016-12-14 15:06:58 -08003983 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003984
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003985 /* Huge page is mapped? Page fault is solved */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003986 if (pmd_trans_huge(*vmf->pmd)) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003987 ret = VM_FAULT_NOPAGE;
3988 goto out;
3989 }
3990
3991 /* ->map_pages() haven't done anything useful. Cold page cache? */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003992 if (!vmf->pte)
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003993 goto out;
3994
3995 /* check if the page fault is solved */
Jan Kara82b0f8c2016-12-14 15:06:58 -08003996 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
3997 if (!pte_none(*vmf->pte))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07003998 ret = VM_FAULT_NOPAGE;
Jan Kara82b0f8c2016-12-14 15:06:58 -08003999 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004000out:
Jan Kara82b0f8c2016-12-14 15:06:58 -08004001 vmf->address = address;
4002 vmf->pte = NULL;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004003 return ret;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004004}
4005
Souptick Joarder2b740302018-08-23 17:01:36 -07004006static vm_fault_t do_read_fault(struct vm_fault *vmf)
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004007{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004008 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004009 vm_fault_t ret = 0;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004010
4011 /*
4012 * Let's call ->map_pages() first and use ->fault() as fallback
4013 * if page by the offset is not ready to be mapped (cold cache or
4014 * something).
4015 */
Kirill A. Shutemov9b4bdd22015-02-10 14:09:51 -08004016 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
Jan Kara0721ec82016-12-14 15:07:04 -08004017 ret = do_fault_around(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004018 if (ret)
4019 return ret;
Kirill A. Shutemov8c6e50b2014-04-07 15:37:18 -07004020 }
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004021
Jan Kara936ca802016-12-14 15:07:10 -08004022 ret = __do_fault(vmf);
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004023 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4024 return ret;
4025
Jan Kara9118c0c2016-12-14 15:07:21 -08004026 ret |= finish_fault(vmf);
Jan Kara936ca802016-12-14 15:07:10 -08004027 unlock_page(vmf->page);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004028 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
Jan Kara936ca802016-12-14 15:07:10 -08004029 put_page(vmf->page);
Kirill A. Shutemove655fb22014-04-03 14:48:11 -07004030 return ret;
4031}
4032
Souptick Joarder2b740302018-08-23 17:01:36 -07004033static vm_fault_t do_cow_fault(struct vm_fault *vmf)
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004034{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004035 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004036 vm_fault_t ret;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004037
4038 if (unlikely(anon_vma_prepare(vma)))
4039 return VM_FAULT_OOM;
4040
Jan Kara936ca802016-12-14 15:07:10 -08004041 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4042 if (!vmf->cow_page)
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004043 return VM_FAULT_OOM;
4044
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07004045 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
Jan Kara936ca802016-12-14 15:07:10 -08004046 put_page(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004047 return VM_FAULT_OOM;
4048 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07004049 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004050
Jan Kara936ca802016-12-14 15:07:10 -08004051 ret = __do_fault(vmf);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004052 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4053 goto uncharge_out;
Jan Kara39170482016-12-14 15:07:18 -08004054 if (ret & VM_FAULT_DONE_COW)
4055 return ret;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004056
Jan Karab1aa8122016-12-14 15:07:24 -08004057 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
Jan Kara936ca802016-12-14 15:07:10 -08004058 __SetPageUptodate(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004059
Jan Kara9118c0c2016-12-14 15:07:21 -08004060 ret |= finish_fault(vmf);
Jan Karab1aa8122016-12-14 15:07:24 -08004061 unlock_page(vmf->page);
4062 put_page(vmf->page);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004063 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4064 goto uncharge_out;
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004065 return ret;
4066uncharge_out:
Jan Kara936ca802016-12-14 15:07:10 -08004067 put_page(vmf->cow_page);
Kirill A. Shutemovec47c3b2014-04-03 14:48:12 -07004068 return ret;
4069}
4070
Souptick Joarder2b740302018-08-23 17:01:36 -07004071static vm_fault_t do_shared_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004073 struct vm_area_struct *vma = vmf->vma;
Souptick Joarder2b740302018-08-23 17:01:36 -07004074 vm_fault_t ret, tmp;
KAMEZAWA Hiroyuki1d65f862011-07-25 17:12:27 -07004075
Jan Kara936ca802016-12-14 15:07:10 -08004076 ret = __do_fault(vmf);
Kirill A. Shutemov7eae74a2014-04-03 14:48:10 -07004077 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004078 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
4080 /*
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004081 * Check if the backing address space wants to know that the page is
4082 * about to become writable
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 */
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004084 if (vma->vm_ops->page_mkwrite) {
Jan Kara936ca802016-12-14 15:07:10 -08004085 unlock_page(vmf->page);
Jan Kara38b8cb72016-12-14 15:07:30 -08004086 tmp = do_page_mkwrite(vmf);
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004087 if (unlikely(!tmp ||
4088 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
Jan Kara936ca802016-12-14 15:07:10 -08004089 put_page(vmf->page);
Kirill A. Shutemovfb09a462014-04-03 14:48:15 -07004090 return tmp;
4091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092 }
4093
Jan Kara9118c0c2016-12-14 15:07:21 -08004094 ret |= finish_fault(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004095 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4096 VM_FAULT_RETRY))) {
Jan Kara936ca802016-12-14 15:07:10 -08004097 unlock_page(vmf->page);
4098 put_page(vmf->page);
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004099 return ret;
Peter Zijlstrad08b3852006-09-25 23:30:57 -07004100 }
Kirill A. Shutemovf0c6d4d2014-04-03 14:48:13 -07004101
Johannes Weiner89b15332019-11-30 17:50:22 -08004102 ret |= fault_dirty_shared_page(vmf);
KAMEZAWA Hiroyuki1d65f862011-07-25 17:12:27 -07004103 return ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004104}
Nick Piggind00806b2007-07-19 01:46:57 -07004105
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004106/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004107 * We enter with non-exclusive mmap_lock (to exclude vma changes,
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004108 * but allow concurrent faults).
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004109 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004110 * return value. See filemap_fault() and __lock_page_or_retry().
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004111 * If mmap_lock is released, vma may become invalid (for example
Jan Stancekfc8efd22019-03-05 15:50:08 -08004112 * by other thread calling munmap()).
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004113 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004114static vm_fault_t do_fault(struct vm_fault *vmf)
Nick Piggin54cb8822007-07-19 01:46:59 -07004115{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004116 struct vm_area_struct *vma = vmf->vma;
Jan Stancekfc8efd22019-03-05 15:50:08 -08004117 struct mm_struct *vm_mm = vma->vm_mm;
Souptick Joarder2b740302018-08-23 17:01:36 -07004118 vm_fault_t ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004119
Aneesh Kumar K.Vff09d7e2018-10-26 15:09:01 -07004120 /*
4121 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4122 */
4123 if (!vma->vm_ops->fault) {
4124 /*
4125 * If we find a migration pmd entry or a none pmd entry, which
4126 * should never happen, return SIGBUS
4127 */
4128 if (unlikely(!pmd_present(*vmf->pmd)))
4129 ret = VM_FAULT_SIGBUS;
4130 else {
4131 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4132 vmf->pmd,
4133 vmf->address,
4134 &vmf->ptl);
4135 /*
4136 * Make sure this is not a temporary clearing of pte
4137 * by holding ptl and checking again. A R/M/W update
4138 * of pte involves: take ptl, clearing the pte so that
4139 * we don't have concurrent modification by hardware
4140 * followed by an update.
4141 */
4142 if (unlikely(pte_none(*vmf->pte)))
4143 ret = VM_FAULT_SIGBUS;
4144 else
4145 ret = VM_FAULT_NOPAGE;
4146
4147 pte_unmap_unlock(vmf->pte, vmf->ptl);
4148 }
4149 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08004150 ret = do_read_fault(vmf);
4151 else if (!(vma->vm_flags & VM_SHARED))
4152 ret = do_cow_fault(vmf);
4153 else
4154 ret = do_shared_fault(vmf);
4155
4156 /* preallocated pagetable is unused: free it */
4157 if (vmf->prealloc_pte) {
Jan Stancekfc8efd22019-03-05 15:50:08 -08004158 pte_free(vm_mm, vmf->prealloc_pte);
Tobin C Harding7f2b6ce2017-02-24 14:58:59 -08004159 vmf->prealloc_pte = NULL;
Hugh Dickinsb0b9b3d2017-01-07 15:37:31 -08004160 }
4161 return ret;
Nick Piggin54cb8822007-07-19 01:46:59 -07004162}
4163
Rashika Kheriab19a9932014-04-03 14:48:02 -07004164static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
Rik van Riel04bb2f92013-10-07 11:29:36 +01004165 unsigned long addr, int page_nid,
4166 int *flags)
Mel Gorman9532fec2012-11-15 01:24:32 +00004167{
4168 get_page(page);
4169
4170 count_vm_numa_event(NUMA_HINT_FAULTS);
Rik van Riel04bb2f92013-10-07 11:29:36 +01004171 if (page_nid == numa_node_id()) {
Mel Gorman9532fec2012-11-15 01:24:32 +00004172 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
Rik van Riel04bb2f92013-10-07 11:29:36 +01004173 *flags |= TNF_FAULT_LOCAL;
4174 }
Mel Gorman9532fec2012-11-15 01:24:32 +00004175
4176 return mpol_misplaced(page, vma, addr);
4177}
4178
Souptick Joarder2b740302018-08-23 17:01:36 -07004179static vm_fault_t do_numa_page(struct vm_fault *vmf)
Mel Gormand10e63f2012-10-25 14:16:31 +02004180{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004181 struct vm_area_struct *vma = vmf->vma;
Mel Gorman4daae3b2012-11-02 11:33:45 +00004182 struct page *page = NULL;
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004183 int page_nid = NUMA_NO_NODE;
Peter Zijlstra90572892013-10-07 11:29:20 +01004184 int last_cpupid;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02004185 int target_nid;
Mel Gormanb8593bf2012-11-21 01:18:23 +00004186 bool migrated = false;
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004187 pte_t pte, old_pte;
Aneesh Kumar K.V288bc542017-02-24 14:59:16 -08004188 bool was_writable = pte_savedwrite(vmf->orig_pte);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004189 int flags = 0;
Mel Gormand10e63f2012-10-25 14:16:31 +02004190
4191 /*
Tobin C Harding166f61b2017-02-24 14:59:01 -08004192 * The "pte" at this point cannot be used safely without
4193 * validation through pte_unmap_same(). It's of NUMA type but
4194 * the pfn may be screwed if the read is non atomic.
Tobin C Harding166f61b2017-02-24 14:59:01 -08004195 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004196 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4197 spin_lock(vmf->ptl);
Aneesh Kumar K.Vcee216a2017-02-24 14:59:13 -08004198 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004199 pte_unmap_unlock(vmf->pte, vmf->ptl);
Mel Gorman4daae3b2012-11-02 11:33:45 +00004200 goto out;
4201 }
4202
Aneesh Kumar K.Vcee216a2017-02-24 14:59:13 -08004203 /*
4204 * Make it present again, Depending on how arch implementes non
4205 * accessible ptes, some can allow access by kernel mode.
4206 */
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004207 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4208 pte = pte_modify(old_pte, vma->vm_page_prot);
Mel Gorman4d942462015-02-12 14:58:28 -08004209 pte = pte_mkyoung(pte);
Mel Gormanb191f9b2015-03-25 15:55:40 -07004210 if (was_writable)
4211 pte = pte_mkwrite(pte);
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08004212 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004213 update_mmu_cache(vma, vmf->address, vmf->pte);
Mel Gormand10e63f2012-10-25 14:16:31 +02004214
Jan Kara82b0f8c2016-12-14 15:06:58 -08004215 page = vm_normal_page(vma, vmf->address, pte);
Mel Gormand10e63f2012-10-25 14:16:31 +02004216 if (!page) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004217 pte_unmap_unlock(vmf->pte, vmf->ptl);
Mel Gormand10e63f2012-10-25 14:16:31 +02004218 return 0;
4219 }
4220
Kirill A. Shutemove81c4802016-01-15 16:53:49 -08004221 /* TODO: handle PTE-mapped THP */
4222 if (PageCompound(page)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004223 pte_unmap_unlock(vmf->pte, vmf->ptl);
Kirill A. Shutemove81c4802016-01-15 16:53:49 -08004224 return 0;
4225 }
4226
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004227 /*
Mel Gormanbea66fb2015-03-25 15:55:37 -07004228 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4229 * much anyway since they can be in shared cache state. This misses
4230 * the case where a mapping is writable but the process never writes
4231 * to it but pte_write gets cleared during protection updates and
4232 * pte_dirty has unpredictable behaviour between PTE scan updates,
4233 * background writeback, dirty balancing and application behaviour.
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004234 */
Rik van Rield59dc7b2016-09-08 21:30:53 -04004235 if (!pte_write(pte))
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004236 flags |= TNF_NO_GROUP;
4237
Rik van Rieldabe1d92013-10-07 11:29:34 +01004238 /*
4239 * Flag if the page is shared between multiple address spaces. This
4240 * is later used when determining whether to group tasks together
4241 */
4242 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4243 flags |= TNF_SHARED;
4244
Peter Zijlstra90572892013-10-07 11:29:20 +01004245 last_cpupid = page_cpupid_last(page);
Mel Gorman8191acb2013-10-07 11:28:45 +01004246 page_nid = page_to_nid(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004247 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004248 &flags);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004249 pte_unmap_unlock(vmf->pte, vmf->ptl);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004250 if (target_nid == NUMA_NO_NODE) {
Mel Gorman4daae3b2012-11-02 11:33:45 +00004251 put_page(page);
4252 goto out;
4253 }
4254
4255 /* Migrate to the requested node */
Mel Gorman1bc115d2013-10-07 11:29:05 +01004256 migrated = migrate_misplaced_page(page, vma, target_nid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004257 if (migrated) {
Mel Gorman8191acb2013-10-07 11:28:45 +01004258 page_nid = target_nid;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004259 flags |= TNF_MIGRATED;
Mel Gorman074c2382015-03-25 15:55:42 -07004260 } else
4261 flags |= TNF_MIGRATE_FAIL;
Mel Gorman4daae3b2012-11-02 11:33:45 +00004262
4263out:
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08004264 if (page_nid != NUMA_NO_NODE)
Peter Zijlstra6688cc02013-10-07 11:29:24 +01004265 task_numa_fault(last_cpupid, page_nid, 1, flags);
Mel Gormand10e63f2012-10-25 14:16:31 +02004266 return 0;
4267}
4268
Souptick Joarder2b740302018-08-23 17:01:36 -07004269static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004270{
Dave Jiangf4200392017-02-22 15:40:06 -08004271 if (vma_is_anonymous(vmf->vma))
Jan Kara82b0f8c2016-12-14 15:06:58 -08004272 return do_huge_pmd_anonymous_page(vmf);
Dave Jianga2d58162017-02-24 14:56:59 -08004273 if (vmf->vma->vm_ops->huge_fault)
Dave Jiangc791ace2017-02-24 14:57:08 -08004274 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004275 return VM_FAULT_FALLBACK;
4276}
4277
Geert Uytterhoeven183f24a2017-12-14 15:32:52 -08004278/* `inline' is required to avoid gcc 4.1.2 build error */
Souptick Joarder2b740302018-08-23 17:01:36 -07004279static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004280{
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004281 if (vma_is_anonymous(vmf->vma)) {
Peter Xu292924b2020-04-06 20:05:49 -07004282 if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004283 return handle_userfault(vmf, VM_UFFD_WP);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004284 return do_huge_pmd_wp_page(vmf, orig_pmd);
Andrea Arcangeli529b9302020-04-06 20:05:29 -07004285 }
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004286 if (vmf->vma->vm_ops->huge_fault) {
4287 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
Kirill A. Shutemovaf9e4d52016-07-26 15:25:40 -07004288
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004289 if (!(ret & VM_FAULT_FALLBACK))
4290 return ret;
4291 }
4292
4293 /* COW or write-notify handled on pte level: split pmd. */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004294 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
Kirill A. Shutemovaf9e4d52016-07-26 15:25:40 -07004295
Matthew Wilcoxb96375f2015-09-08 14:58:48 -07004296 return VM_FAULT_FALLBACK;
4297}
4298
Souptick Joarder2b740302018-08-23 17:01:36 -07004299static vm_fault_t create_huge_pud(struct vm_fault *vmf)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004300{
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004301#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4302 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004303 /* No support for anonymous transparent PUD pages yet */
4304 if (vma_is_anonymous(vmf->vma))
Thomas Hellstrom (VMware)327e9fd2020-03-24 18:47:47 +01004305 goto split;
4306 if (vmf->vma->vm_ops->huge_fault) {
4307 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4308
4309 if (!(ret & VM_FAULT_FALLBACK))
4310 return ret;
4311 }
4312split:
4313 /* COW or write-notify not handled on PUD level: split pud.*/
4314 __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004315#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4316 return VM_FAULT_FALLBACK;
4317}
4318
Souptick Joarder2b740302018-08-23 17:01:36 -07004319static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004320{
4321#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4322 /* No support for anonymous transparent PUD pages yet */
4323 if (vma_is_anonymous(vmf->vma))
4324 return VM_FAULT_FALLBACK;
4325 if (vmf->vma->vm_ops->huge_fault)
Dave Jiangc791ace2017-02-24 14:57:08 -08004326 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004327#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4328 return VM_FAULT_FALLBACK;
4329}
4330
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331/*
4332 * These routines also need to handle stuff like marking pages dirty
4333 * and/or accessed for architectures that don't do it in hardware (most
4334 * RISC architectures). The early dirtying is also good on the i386.
4335 *
4336 * There is also a hook called "update_mmu_cache()" that architectures
4337 * with external mmu caches can use to update those (ie the Sparc or
4338 * PowerPC hashed page tables that act as extended TLBs).
4339 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004340 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004341 * concurrent faults).
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004342 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004343 * The mmap_lock may have been released depending on flags and our return value.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004344 * See filemap_fault() and __lock_page_or_retry().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004346static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347{
4348 pte_t entry;
4349
Jan Kara82b0f8c2016-12-14 15:06:58 -08004350 if (unlikely(pmd_none(*vmf->pmd))) {
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004351 /*
4352 * Leave __pte_alloc() until later: because vm_ops->fault may
4353 * want to allocate huge page, and if we expose page table
4354 * for an instant, it will be difficult to retract from
4355 * concurrent faults and from rmap lookups.
4356 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004357 vmf->pte = NULL;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004358 } else {
4359 /* See comment in pte_alloc_one_map() */
Ross Zwislerd0f0931d2017-06-02 14:46:34 -07004360 if (pmd_devmap_trans_unstable(vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004361 return 0;
4362 /*
4363 * A regular pmd is established and it can't morph into a huge
4364 * pmd from under us anymore at this point because we hold the
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004365 * mmap_lock read mode and khugepaged takes it in write mode.
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004366 * So now it's safe to run pte_offset_map().
4367 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004368 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
Jan Kara29943022016-12-14 15:07:16 -08004369 vmf->orig_pte = *vmf->pte;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004370
4371 /*
4372 * some architectures can have larger ptes than wordsize,
4373 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
Paul E. McKenneyb03a0fe2017-10-23 14:07:25 -07004374 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4375 * accesses. The code below just needs a consistent view
4376 * for the ifs and we later double check anyway with the
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004377 * ptl lock held. So here a barrier will do.
4378 */
4379 barrier();
Jan Kara29943022016-12-14 15:07:16 -08004380 if (pte_none(vmf->orig_pte)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004381 pte_unmap(vmf->pte);
4382 vmf->pte = NULL;
Hugh Dickins65500d22005-10-29 18:15:59 -07004383 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 }
4385
Jan Kara82b0f8c2016-12-14 15:06:58 -08004386 if (!vmf->pte) {
4387 if (vma_is_anonymous(vmf->vma))
4388 return do_anonymous_page(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004389 else
Jan Kara82b0f8c2016-12-14 15:06:58 -08004390 return do_fault(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004391 }
4392
Jan Kara29943022016-12-14 15:07:16 -08004393 if (!pte_present(vmf->orig_pte))
4394 return do_swap_page(vmf);
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07004395
Jan Kara29943022016-12-14 15:07:16 -08004396 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4397 return do_numa_page(vmf);
Mel Gormand10e63f2012-10-25 14:16:31 +02004398
Jan Kara82b0f8c2016-12-14 15:06:58 -08004399 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4400 spin_lock(vmf->ptl);
Jan Kara29943022016-12-14 15:07:16 -08004401 entry = vmf->orig_pte;
Bibo Mao7df67692020-05-27 10:25:18 +08004402 if (unlikely(!pte_same(*vmf->pte, entry))) {
4403 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
Hugh Dickins8f4e2102005-10-29 18:16:26 -07004404 goto unlock;
Bibo Mao7df67692020-05-27 10:25:18 +08004405 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08004406 if (vmf->flags & FAULT_FLAG_WRITE) {
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004407 if (!pte_write(entry))
Jan Kara29943022016-12-14 15:07:16 -08004408 return do_wp_page(vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 entry = pte_mkdirty(entry);
4410 }
4411 entry = pte_mkyoung(entry);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004412 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4413 vmf->flags & FAULT_FLAG_WRITE)) {
4414 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004415 } else {
Yang Shib7333b52020-08-14 21:30:41 -07004416 /* Skip spurious TLB flush for retried page fault */
4417 if (vmf->flags & FAULT_FLAG_TRIED)
4418 goto unlock;
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004419 /*
4420 * This is needed only for protection faults but the arch code
4421 * is not yet telling us if this is a protection fault or not.
4422 * This still avoids useless tlb flushes for .text page faults
4423 * with threads.
4424 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08004425 if (vmf->flags & FAULT_FLAG_WRITE)
4426 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
Andrea Arcangeli1a44e142005-10-29 18:16:48 -07004427 }
Hugh Dickins8f4e2102005-10-29 18:16:26 -07004428unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08004429 pte_unmap_unlock(vmf->pte, vmf->ptl);
Nick Piggin83c54072007-07-19 01:47:05 -07004430 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431}
4432
4433/*
4434 * By the time we get here, we already hold the mm semaphore
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004435 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004436 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004437 * return value. See filemap_fault() and __lock_page_or_retry().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004439static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4440 unsigned long address, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441{
Jan Kara82b0f8c2016-12-14 15:06:58 -08004442 struct vm_fault vmf = {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004443 .vma = vma,
Jan Kara1a29d852016-12-14 15:07:01 -08004444 .address = address & PAGE_MASK,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004445 .flags = flags,
Jan Kara0721ec82016-12-14 15:07:04 -08004446 .pgoff = linear_page_index(vma, address),
Jan Kara667240e2016-12-14 15:07:07 -08004447 .gfp_mask = __get_fault_gfp_mask(vma),
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004448 };
Anshuman Khandualfde26be2017-09-08 16:12:45 -07004449 unsigned int dirty = flags & FAULT_FLAG_WRITE;
Kirill A. Shutemovdcddffd2016-07-26 15:25:18 -07004450 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004452 p4d_t *p4d;
Souptick Joarder2b740302018-08-23 17:01:36 -07004453 vm_fault_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 pgd = pgd_offset(mm, address);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004456 p4d = p4d_alloc(mm, pgd, address);
4457 if (!p4d)
4458 return VM_FAULT_OOM;
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004459
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004460 vmf.pud = pud_alloc(mm, p4d, address);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004461 if (!vmf.pud)
Hugh Dickinsc74df322005-10-29 18:16:23 -07004462 return VM_FAULT_OOM;
Thomas Hellstrom625110b2019-11-30 17:51:32 -08004463retry_pud:
Michal Hocko7635d9c2018-12-28 00:38:21 -08004464 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004465 ret = create_huge_pud(&vmf);
4466 if (!(ret & VM_FAULT_FALLBACK))
4467 return ret;
4468 } else {
4469 pud_t orig_pud = *vmf.pud;
4470
4471 barrier();
4472 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004473
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004474 /* NUMA case for anonymous PUDs would go here */
4475
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004476 if (dirty && !pud_write(orig_pud)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004477 ret = wp_huge_pud(&vmf, orig_pud);
4478 if (!(ret & VM_FAULT_FALLBACK))
4479 return ret;
4480 } else {
4481 huge_pud_set_accessed(&vmf, orig_pud);
4482 return 0;
4483 }
4484 }
4485 }
4486
4487 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
Jan Kara82b0f8c2016-12-14 15:06:58 -08004488 if (!vmf.pmd)
Hugh Dickinsc74df322005-10-29 18:16:23 -07004489 return VM_FAULT_OOM;
Thomas Hellstrom625110b2019-11-30 17:51:32 -08004490
4491 /* Huge pud page fault raced with pmd_alloc? */
4492 if (pud_trans_unstable(vmf.pud))
4493 goto retry_pud;
4494
Michal Hocko7635d9c2018-12-28 00:38:21 -08004495 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
Dave Jianga2d58162017-02-24 14:56:59 -08004496 ret = create_huge_pmd(&vmf);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -07004497 if (!(ret & VM_FAULT_FALLBACK))
4498 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004499 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004500 pmd_t orig_pmd = *vmf.pmd;
David Rientjes1f1d06c2012-05-29 15:06:23 -07004501
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004502 barrier();
Zi Yan84c3fc42017-09-08 16:11:01 -07004503 if (unlikely(is_swap_pmd(orig_pmd))) {
4504 VM_BUG_ON(thp_migration_supported() &&
4505 !is_pmd_migration_entry(orig_pmd));
4506 if (is_pmd_migration_entry(orig_pmd))
4507 pmd_migration_entry_wait(mm, vmf.pmd);
4508 return 0;
4509 }
Dan Williams5c7fb562016-01-15 16:56:52 -08004510 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
Lorenzo Stoakes38e08852016-09-11 23:54:25 +01004511 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
Jan Kara82b0f8c2016-12-14 15:06:58 -08004512 return do_huge_pmd_numa_page(&vmf, orig_pmd);
Mel Gormand10e63f2012-10-25 14:16:31 +02004513
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004514 if (dirty && !pmd_write(orig_pmd)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004515 ret = wp_huge_pmd(&vmf, orig_pmd);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08004516 if (!(ret & VM_FAULT_FALLBACK))
4517 return ret;
Will Deacona1dd4502012-12-11 16:01:27 -08004518 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -08004519 huge_pmd_set_accessed(&vmf, orig_pmd);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08004520 return 0;
David Rientjes1f1d06c2012-05-29 15:06:23 -07004521 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004522 }
4523 }
4524
Jan Kara82b0f8c2016-12-14 15:06:58 -08004525 return handle_pte_fault(&vmf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526}
4527
Peter Xubce617e2020-08-11 18:37:44 -07004528/**
4529 * mm_account_fault - Do page fault accountings
4530 *
4531 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
4532 * of perf event counters, but we'll still do the per-task accounting to
4533 * the task who triggered this page fault.
4534 * @address: the faulted address.
4535 * @flags: the fault flags.
4536 * @ret: the fault retcode.
4537 *
4538 * This will take care of most of the page fault accountings. Meanwhile, it
4539 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4540 * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4541 * still be in per-arch page fault handlers at the entry of page fault.
4542 */
4543static inline void mm_account_fault(struct pt_regs *regs,
4544 unsigned long address, unsigned int flags,
4545 vm_fault_t ret)
4546{
4547 bool major;
4548
4549 /*
4550 * We don't do accounting for some specific faults:
4551 *
4552 * - Unsuccessful faults (e.g. when the address wasn't valid). That
4553 * includes arch_vma_access_permitted() failing before reaching here.
4554 * So this is not a "this many hardware page faults" counter. We
4555 * should use the hw profiling for that.
4556 *
4557 * - Incomplete faults (VM_FAULT_RETRY). They will only be counted
4558 * once they're completed.
4559 */
4560 if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4561 return;
4562
4563 /*
4564 * We define the fault as a major fault when the final successful fault
4565 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4566 * handle it immediately previously).
4567 */
4568 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4569
Peter Xua2beb5f2020-08-11 18:38:57 -07004570 if (major)
4571 current->maj_flt++;
4572 else
4573 current->min_flt++;
4574
Peter Xubce617e2020-08-11 18:37:44 -07004575 /*
Peter Xua2beb5f2020-08-11 18:38:57 -07004576 * If the fault is done for GUP, regs will be NULL. We only do the
4577 * accounting for the per thread fault counters who triggered the
4578 * fault, and we skip the perf event updates.
Peter Xubce617e2020-08-11 18:37:44 -07004579 */
4580 if (!regs)
4581 return;
4582
Peter Xua2beb5f2020-08-11 18:38:57 -07004583 if (major)
Peter Xubce617e2020-08-11 18:37:44 -07004584 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
Peter Xua2beb5f2020-08-11 18:38:57 -07004585 else
Peter Xubce617e2020-08-11 18:37:44 -07004586 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
Peter Xubce617e2020-08-11 18:37:44 -07004587}
4588
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004589/*
4590 * By the time we get here, we already hold the mm semaphore
4591 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004592 * The mmap_lock may have been released depending on flags and our
Paul Cassella9a95f3c2014-08-06 16:07:24 -07004593 * return value. See filemap_fault() and __lock_page_or_retry().
4594 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004595vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
Peter Xubce617e2020-08-11 18:37:44 -07004596 unsigned int flags, struct pt_regs *regs)
Johannes Weiner519e5242013-09-12 15:13:42 -07004597{
Souptick Joarder2b740302018-08-23 17:01:36 -07004598 vm_fault_t ret;
Johannes Weiner519e5242013-09-12 15:13:42 -07004599
4600 __set_current_state(TASK_RUNNING);
4601
4602 count_vm_event(PGFAULT);
Roman Gushchin22621852017-07-06 15:40:25 -07004603 count_memcg_event_mm(vma->vm_mm, PGFAULT);
Johannes Weiner519e5242013-09-12 15:13:42 -07004604
4605 /* do counter updates before entering really critical section. */
4606 check_sync_rss_stat(current);
4607
Laurent Dufourde0c7992017-09-08 16:13:12 -07004608 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4609 flags & FAULT_FLAG_INSTRUCTION,
4610 flags & FAULT_FLAG_REMOTE))
4611 return VM_FAULT_SIGSEGV;
4612
Johannes Weiner519e5242013-09-12 15:13:42 -07004613 /*
4614 * Enable the memcg OOM handling for faults triggered in user
4615 * space. Kernel faults are handled more gracefully.
4616 */
4617 if (flags & FAULT_FLAG_USER)
Michal Hocko29ef6802018-08-17 15:47:11 -07004618 mem_cgroup_enter_user_fault();
Johannes Weiner519e5242013-09-12 15:13:42 -07004619
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07004620 if (unlikely(is_vm_hugetlb_page(vma)))
4621 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4622 else
4623 ret = __handle_mm_fault(vma, address, flags);
Johannes Weiner519e5242013-09-12 15:13:42 -07004624
Johannes Weiner49426422013-10-16 13:46:59 -07004625 if (flags & FAULT_FLAG_USER) {
Michal Hocko29ef6802018-08-17 15:47:11 -07004626 mem_cgroup_exit_user_fault();
Tobin C Harding166f61b2017-02-24 14:59:01 -08004627 /*
4628 * The task may have entered a memcg OOM situation but
4629 * if the allocation error was handled gracefully (no
4630 * VM_FAULT_OOM), there is no need to kill anything.
4631 * Just clean up the OOM state peacefully.
4632 */
4633 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4634 mem_cgroup_oom_synchronize(false);
Johannes Weiner49426422013-10-16 13:46:59 -07004635 }
Johannes Weiner3812c8c2013-09-12 15:13:44 -07004636
Peter Xubce617e2020-08-11 18:37:44 -07004637 mm_account_fault(regs, address, flags, ret);
4638
Johannes Weiner519e5242013-09-12 15:13:42 -07004639 return ret;
4640}
Jesse Barnese1d6d012014-12-12 16:55:27 -08004641EXPORT_SYMBOL_GPL(handle_mm_fault);
Johannes Weiner519e5242013-09-12 15:13:42 -07004642
Kirill A. Shutemov90eceff2017-03-09 17:24:08 +03004643#ifndef __PAGETABLE_P4D_FOLDED
4644/*
4645 * Allocate p4d page table.
4646 * We've already handled the fast-path in-line.
4647 */
4648int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4649{
4650 p4d_t *new = p4d_alloc_one(mm, address);
4651 if (!new)
4652 return -ENOMEM;
4653
4654 smp_wmb(); /* See comment in __pte_alloc */
4655
4656 spin_lock(&mm->page_table_lock);
4657 if (pgd_present(*pgd)) /* Another has populated it */
4658 p4d_free(mm, new);
4659 else
4660 pgd_populate(mm, pgd, new);
4661 spin_unlock(&mm->page_table_lock);
4662 return 0;
4663}
4664#endif /* __PAGETABLE_P4D_FOLDED */
4665
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666#ifndef __PAGETABLE_PUD_FOLDED
4667/*
4668 * Allocate page upper directory.
Hugh Dickins872fec12005-10-29 18:16:21 -07004669 * We've already handled the fast-path in-line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004671int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672{
Hugh Dickinsc74df322005-10-29 18:16:23 -07004673 pud_t *new = pud_alloc_one(mm, address);
4674 if (!new)
Hugh Dickins1bb36302005-10-29 18:16:22 -07004675 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676
Nick Piggin362a61a2008-05-14 06:37:36 +02004677 smp_wmb(); /* See comment in __pte_alloc */
4678
Hugh Dickins872fec12005-10-29 18:16:21 -07004679 spin_lock(&mm->page_table_lock);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -08004680 if (!p4d_present(*p4d)) {
4681 mm_inc_nr_puds(mm);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004682 p4d_populate(mm, p4d, new);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -08004683 } else /* Another has populated it */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004684 pud_free(mm, new);
Hugh Dickinsc74df322005-10-29 18:16:23 -07004685 spin_unlock(&mm->page_table_lock);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004686 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687}
4688#endif /* __PAGETABLE_PUD_FOLDED */
4689
4690#ifndef __PAGETABLE_PMD_FOLDED
4691/*
4692 * Allocate page middle directory.
Hugh Dickins872fec12005-10-29 18:16:21 -07004693 * We've already handled the fast-path in-line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 */
Hugh Dickins1bb36302005-10-29 18:16:22 -07004695int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696{
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004697 spinlock_t *ptl;
Hugh Dickinsc74df322005-10-29 18:16:23 -07004698 pmd_t *new = pmd_alloc_one(mm, address);
4699 if (!new)
Hugh Dickins1bb36302005-10-29 18:16:22 -07004700 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701
Nick Piggin362a61a2008-05-14 06:37:36 +02004702 smp_wmb(); /* See comment in __pte_alloc */
4703
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004704 ptl = pud_lock(mm, pud);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08004705 if (!pud_present(*pud)) {
4706 mm_inc_nr_pmds(mm);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004707 pud_populate(mm, pud, new);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08004708 } else /* Another has populated it */
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -08004709 pmd_free(mm, new);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08004710 spin_unlock(ptl);
Hugh Dickins1bb36302005-10-29 18:16:22 -07004711 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712}
4713#endif /* __PAGETABLE_PMD_FOLDED */
4714
Ross Zwisler09796392017-01-10 16:57:21 -08004715static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004716 struct mmu_notifier_range *range,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004717 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004718{
4719 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004720 p4d_t *p4d;
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004721 pud_t *pud;
4722 pmd_t *pmd;
4723 pte_t *ptep;
4724
4725 pgd = pgd_offset(mm, address);
4726 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4727 goto out;
4728
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03004729 p4d = p4d_offset(pgd, address);
4730 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4731 goto out;
4732
4733 pud = pud_offset(p4d, address);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004734 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4735 goto out;
4736
4737 pmd = pmd_offset(pud, address);
Andrea Arcangelif66055ab2011-01-13 15:46:54 -08004738 VM_BUG_ON(pmd_trans_huge(*pmd));
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004739
Ross Zwisler09796392017-01-10 16:57:21 -08004740 if (pmd_huge(*pmd)) {
4741 if (!pmdpp)
4742 goto out;
4743
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004744 if (range) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07004745 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07004746 NULL, mm, address & PMD_MASK,
4747 (address & PMD_MASK) + PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004748 mmu_notifier_invalidate_range_start(range);
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004749 }
Ross Zwisler09796392017-01-10 16:57:21 -08004750 *ptlp = pmd_lock(mm, pmd);
4751 if (pmd_huge(*pmd)) {
4752 *pmdpp = pmd;
4753 return 0;
4754 }
4755 spin_unlock(*ptlp);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004756 if (range)
4757 mmu_notifier_invalidate_range_end(range);
Ross Zwisler09796392017-01-10 16:57:21 -08004758 }
4759
4760 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004761 goto out;
4762
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004763 if (range) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07004764 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07004765 address & PAGE_MASK,
4766 (address & PAGE_MASK) + PAGE_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004767 mmu_notifier_invalidate_range_start(range);
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004768 }
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004769 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004770 if (!pte_present(*ptep))
4771 goto unlock;
4772 *ptepp = ptep;
4773 return 0;
4774unlock:
4775 pte_unmap_unlock(ptep, *ptlp);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004776 if (range)
4777 mmu_notifier_invalidate_range_end(range);
Johannes Weinerf8ad0f492009-06-16 15:32:33 -07004778out:
4779 return -EINVAL;
4780}
4781
Ross Zwislerf729c8c2017-01-10 16:57:24 -08004782static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4783 pte_t **ptepp, spinlock_t **ptlp)
Namhyung Kim1b36ba82010-10-26 14:22:00 -07004784{
4785 int res;
4786
4787 /* (void) is needed to make gcc happy */
4788 (void) __cond_lock(*ptlp,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004789 !(res = __follow_pte_pmd(mm, address, NULL,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004790 ptepp, NULL, ptlp)));
Namhyung Kim1b36ba82010-10-26 14:22:00 -07004791 return res;
4792}
4793
Ross Zwisler09796392017-01-10 16:57:21 -08004794int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004795 struct mmu_notifier_range *range,
4796 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
Ross Zwisler09796392017-01-10 16:57:21 -08004797{
4798 int res;
4799
4800 /* (void) is needed to make gcc happy */
4801 (void) __cond_lock(*ptlp,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004802 !(res = __follow_pte_pmd(mm, address, range,
Jérôme Glissea4d1a882017-08-31 17:17:26 -04004803 ptepp, pmdpp, ptlp)));
Ross Zwisler09796392017-01-10 16:57:21 -08004804 return res;
4805}
4806EXPORT_SYMBOL(follow_pte_pmd);
4807
Johannes Weiner3b6748e2009-06-16 15:32:35 -07004808/**
4809 * follow_pfn - look up PFN at a user virtual address
4810 * @vma: memory mapping
4811 * @address: user virtual address
4812 * @pfn: location to store found PFN
4813 *
4814 * Only IO mappings and raw PFN mappings are allowed.
4815 *
Mike Rapoporta862f682019-03-05 15:48:42 -08004816 * Return: zero and the pfn at @pfn on success, -ve otherwise.
Johannes Weiner3b6748e2009-06-16 15:32:35 -07004817 */
4818int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4819 unsigned long *pfn)
4820{
4821 int ret = -EINVAL;
4822 spinlock_t *ptl;
4823 pte_t *ptep;
4824
4825 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4826 return ret;
4827
4828 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4829 if (ret)
4830 return ret;
4831 *pfn = pte_pfn(*ptep);
4832 pte_unmap_unlock(ptep, ptl);
4833 return 0;
4834}
4835EXPORT_SYMBOL(follow_pfn);
4836
Rik van Riel28b2ee22008-07-23 21:27:05 -07004837#ifdef CONFIG_HAVE_IOREMAP_PROT
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004838int follow_phys(struct vm_area_struct *vma,
4839 unsigned long address, unsigned int flags,
4840 unsigned long *prot, resource_size_t *phys)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004841{
Johannes Weiner03668a42009-06-16 15:32:34 -07004842 int ret = -EINVAL;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004843 pte_t *ptep, pte;
4844 spinlock_t *ptl;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004845
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004846 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4847 goto out;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004848
Johannes Weiner03668a42009-06-16 15:32:34 -07004849 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004850 goto out;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004851 pte = *ptep;
Johannes Weiner03668a42009-06-16 15:32:34 -07004852
Linus Torvaldsf6f37322017-12-15 18:53:22 -08004853 if ((flags & FOLL_WRITE) && !pte_write(pte))
Rik van Riel28b2ee22008-07-23 21:27:05 -07004854 goto unlock;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004855
4856 *prot = pgprot_val(pte_pgprot(pte));
Johannes Weiner03668a42009-06-16 15:32:34 -07004857 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004858
Johannes Weiner03668a42009-06-16 15:32:34 -07004859 ret = 0;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004860unlock:
4861 pte_unmap_unlock(ptep, ptl);
4862out:
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004863 return ret;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004864}
4865
4866int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4867 void *buf, int len, int write)
4868{
4869 resource_size_t phys_addr;
4870 unsigned long prot = 0;
KOSAKI Motohiro2bc72732009-01-06 14:39:43 -08004871 void __iomem *maddr;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004872 int offset = addr & (PAGE_SIZE-1);
4873
venkatesh.pallipadi@intel.comd87fe662008-12-19 13:47:27 -08004874 if (follow_phys(vma, addr, write, &prot, &phys_addr))
Rik van Riel28b2ee22008-07-23 21:27:05 -07004875 return -EINVAL;
4876
Grazvydas Ignotas9cb12d72015-02-12 15:00:19 -08004877 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
jie@chenjie6@huwei.com24eee1e2018-08-10 17:23:06 -07004878 if (!maddr)
4879 return -ENOMEM;
4880
Rik van Riel28b2ee22008-07-23 21:27:05 -07004881 if (write)
4882 memcpy_toio(maddr + offset, buf, len);
4883 else
4884 memcpy_fromio(buf, maddr + offset, len);
4885 iounmap(maddr);
4886
4887 return len;
4888}
Uwe Kleine-König5a736332013-08-07 13:02:52 +02004889EXPORT_SYMBOL_GPL(generic_access_phys);
Rik van Riel28b2ee22008-07-23 21:27:05 -07004890#endif
4891
David Howells0ec76a12006-09-27 01:50:15 -07004892/*
Stephen Wilson206cb632011-03-13 15:49:19 -04004893 * Access another process' address space as given in mm. If non-NULL, use the
4894 * given task for page fault accounting.
David Howells0ec76a12006-09-27 01:50:15 -07004895 */
Eric W. Biederman84d77d32016-11-22 12:06:50 -06004896int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004897 unsigned long addr, void *buf, int len, unsigned int gup_flags)
David Howells0ec76a12006-09-27 01:50:15 -07004898{
David Howells0ec76a12006-09-27 01:50:15 -07004899 struct vm_area_struct *vma;
David Howells0ec76a12006-09-27 01:50:15 -07004900 void *old_buf = buf;
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004901 int write = gup_flags & FOLL_WRITE;
David Howells0ec76a12006-09-27 01:50:15 -07004902
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07004903 if (mmap_read_lock_killable(mm))
Konstantin Khlebnikov1e426fe2019-07-11 21:00:07 -07004904 return 0;
4905
Simon Arlott183ff222007-10-20 01:27:18 +02004906 /* ignore errors, just check how much was successfully transferred */
David Howells0ec76a12006-09-27 01:50:15 -07004907 while (len) {
4908 int bytes, ret, offset;
4909 void *maddr;
Rik van Riel28b2ee22008-07-23 21:27:05 -07004910 struct page *page = NULL;
David Howells0ec76a12006-09-27 01:50:15 -07004911
Peter Xu64019a22020-08-11 18:39:01 -07004912 ret = get_user_pages_remote(mm, addr, 1,
Lorenzo Stoakes5b56d492016-12-14 15:06:52 -08004913 gup_flags, &page, &vma, NULL);
Rik van Riel28b2ee22008-07-23 21:27:05 -07004914 if (ret <= 0) {
Rik van Rieldbffcd02014-08-06 16:08:12 -07004915#ifndef CONFIG_HAVE_IOREMAP_PROT
4916 break;
4917#else
Rik van Riel28b2ee22008-07-23 21:27:05 -07004918 /*
4919 * Check if this is a VM_IO | VM_PFNMAP VMA, which
4920 * we can access using slightly different code.
4921 */
Rik van Riel28b2ee22008-07-23 21:27:05 -07004922 vma = find_vma(mm, addr);
Michael Ellermanfe936df2011-04-14 15:22:10 -07004923 if (!vma || vma->vm_start > addr)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004924 break;
4925 if (vma->vm_ops && vma->vm_ops->access)
4926 ret = vma->vm_ops->access(vma, addr, buf,
4927 len, write);
4928 if (ret <= 0)
Rik van Riel28b2ee22008-07-23 21:27:05 -07004929 break;
4930 bytes = ret;
Rik van Rieldbffcd02014-08-06 16:08:12 -07004931#endif
David Howells0ec76a12006-09-27 01:50:15 -07004932 } else {
Rik van Riel28b2ee22008-07-23 21:27:05 -07004933 bytes = len;
4934 offset = addr & (PAGE_SIZE-1);
4935 if (bytes > PAGE_SIZE-offset)
4936 bytes = PAGE_SIZE-offset;
4937
4938 maddr = kmap(page);
4939 if (write) {
4940 copy_to_user_page(vma, page, addr,
4941 maddr + offset, buf, bytes);
4942 set_page_dirty_lock(page);
4943 } else {
4944 copy_from_user_page(vma, page, addr,
4945 buf, maddr + offset, bytes);
4946 }
4947 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004948 put_page(page);
David Howells0ec76a12006-09-27 01:50:15 -07004949 }
David Howells0ec76a12006-09-27 01:50:15 -07004950 len -= bytes;
4951 buf += bytes;
4952 addr += bytes;
4953 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07004954 mmap_read_unlock(mm);
David Howells0ec76a12006-09-27 01:50:15 -07004955
4956 return buf - old_buf;
4957}
Andi Kleen03252912008-01-30 13:33:18 +01004958
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004959/**
Randy Dunlapae91dbf2011-03-26 13:27:01 -07004960 * access_remote_vm - access another process' address space
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004961 * @mm: the mm_struct of the target address space
4962 * @addr: start address to access
4963 * @buf: source or destination buffer
4964 * @len: number of bytes to transfer
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01004965 * @gup_flags: flags modifying lookup behaviour
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004966 *
4967 * The caller must hold a reference on @mm.
Mike Rapoporta862f682019-03-05 15:48:42 -08004968 *
4969 * Return: number of bytes copied from source to destination.
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004970 */
4971int access_remote_vm(struct mm_struct *mm, unsigned long addr,
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01004972 void *buf, int len, unsigned int gup_flags)
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004973{
Lorenzo Stoakes6347e8d2016-10-13 01:20:19 +01004974 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
Stephen Wilson5ddd36b2011-03-13 15:49:20 -04004975}
4976
Andi Kleen03252912008-01-30 13:33:18 +01004977/*
Stephen Wilson206cb632011-03-13 15:49:19 -04004978 * Access another process' address space.
4979 * Source/target buffer must be kernel space,
4980 * Do not walk the page table directly, use get_user_pages
4981 */
4982int access_process_vm(struct task_struct *tsk, unsigned long addr,
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +01004983 void *buf, int len, unsigned int gup_flags)
Stephen Wilson206cb632011-03-13 15:49:19 -04004984{
4985 struct mm_struct *mm;
4986 int ret;
4987
4988 mm = get_task_mm(tsk);
4989 if (!mm)
4990 return 0;
4991
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +01004992 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
Lorenzo Stoakes442486e2016-10-13 01:20:18 +01004993
Stephen Wilson206cb632011-03-13 15:49:19 -04004994 mmput(mm);
4995
4996 return ret;
4997}
Catalin Marinasfcd35852016-11-01 14:43:25 -07004998EXPORT_SYMBOL_GPL(access_process_vm);
Stephen Wilson206cb632011-03-13 15:49:19 -04004999
Andi Kleen03252912008-01-30 13:33:18 +01005000/*
5001 * Print the name of a VMA.
5002 */
5003void print_vma_addr(char *prefix, unsigned long ip)
5004{
5005 struct mm_struct *mm = current->mm;
5006 struct vm_area_struct *vma;
5007
Ingo Molnare8bff742008-02-13 20:21:06 +01005008 /*
Michal Hocko0a7f6822017-11-15 17:38:59 -08005009 * we might be running from an atomic context so we cannot sleep
Ingo Molnare8bff742008-02-13 20:21:06 +01005010 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005011 if (!mmap_read_trylock(mm))
Ingo Molnare8bff742008-02-13 20:21:06 +01005012 return;
5013
Andi Kleen03252912008-01-30 13:33:18 +01005014 vma = find_vma(mm, ip);
5015 if (vma && vma->vm_file) {
5016 struct file *f = vma->vm_file;
Michal Hocko0a7f6822017-11-15 17:38:59 -08005017 char *buf = (char *)__get_free_page(GFP_NOWAIT);
Andi Kleen03252912008-01-30 13:33:18 +01005018 if (buf) {
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -08005019 char *p;
Andi Kleen03252912008-01-30 13:33:18 +01005020
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02005021 p = file_path(f, buf, PAGE_SIZE);
Andi Kleen03252912008-01-30 13:33:18 +01005022 if (IS_ERR(p))
5023 p = "?";
Andy Shevchenko2fbc57c2012-12-17 16:01:23 -08005024 printk("%s%s[%lx+%lx]", prefix, kbasename(p),
Andi Kleen03252912008-01-30 13:33:18 +01005025 vma->vm_start,
5026 vma->vm_end - vma->vm_start);
5027 free_page((unsigned long)buf);
5028 }
5029 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005030 mmap_read_unlock(mm);
Andi Kleen03252912008-01-30 13:33:18 +01005031}
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005032
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005033#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
David Hildenbrand9ec23532015-05-11 17:52:07 +02005034void __might_fault(const char *file, int line)
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005035{
Peter Zijlstra95156f02009-01-12 13:02:11 +01005036 /*
5037 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07005038 * holding the mmap_lock, this is safe because kernel memory doesn't
Peter Zijlstra95156f02009-01-12 13:02:11 +01005039 * get paged out, therefore we'll never actually fault, and the
5040 * below annotations will generate false positives.
5041 */
Al Virodb68ce12017-03-20 21:08:07 -04005042 if (uaccess_kernel())
Peter Zijlstra95156f02009-01-12 13:02:11 +01005043 return;
David Hildenbrand9ec23532015-05-11 17:52:07 +02005044 if (pagefault_disabled())
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005045 return;
David Hildenbrand9ec23532015-05-11 17:52:07 +02005046 __might_sleep(file, line, 0);
5047#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
Michael S. Tsirkin662bbcb2013-05-26 17:32:23 +03005048 if (current->mm)
Michel Lespinasseda1c55f2020-06-08 21:33:47 -07005049 might_lock_read(&current->mm->mmap_lock);
David Hildenbrand9ec23532015-05-11 17:52:07 +02005050#endif
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005051}
David Hildenbrand9ec23532015-05-11 17:52:07 +02005052EXPORT_SYMBOL(__might_fault);
Nick Piggin3ee1afa2008-09-10 13:37:17 +02005053#endif
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005054
5055#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
Huang Yingc6ddfb62018-08-17 15:45:46 -07005056/*
5057 * Process all subpages of the specified huge page with the specified
5058 * operation. The target subpage will be processed last to keep its
5059 * cache lines hot.
5060 */
5061static inline void process_huge_page(
5062 unsigned long addr_hint, unsigned int pages_per_huge_page,
5063 void (*process_subpage)(unsigned long addr, int idx, void *arg),
5064 void *arg)
5065{
5066 int i, n, base, l;
5067 unsigned long addr = addr_hint &
5068 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5069
5070 /* Process target subpage last to keep its cache lines hot */
5071 might_sleep();
5072 n = (addr_hint - addr) / PAGE_SIZE;
5073 if (2 * n <= pages_per_huge_page) {
5074 /* If target subpage in first half of huge page */
5075 base = 0;
5076 l = n;
5077 /* Process subpages at the end of huge page */
5078 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5079 cond_resched();
5080 process_subpage(addr + i * PAGE_SIZE, i, arg);
5081 }
5082 } else {
5083 /* If target subpage in second half of huge page */
5084 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5085 l = pages_per_huge_page - n;
5086 /* Process subpages at the begin of huge page */
5087 for (i = 0; i < base; i++) {
5088 cond_resched();
5089 process_subpage(addr + i * PAGE_SIZE, i, arg);
5090 }
5091 }
5092 /*
5093 * Process remaining subpages in left-right-left-right pattern
5094 * towards the target subpage
5095 */
5096 for (i = 0; i < l; i++) {
5097 int left_idx = base + i;
5098 int right_idx = base + 2 * l - 1 - i;
5099
5100 cond_resched();
5101 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5102 cond_resched();
5103 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5104 }
5105}
5106
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005107static void clear_gigantic_page(struct page *page,
5108 unsigned long addr,
5109 unsigned int pages_per_huge_page)
5110{
5111 int i;
5112 struct page *p = page;
5113
5114 might_sleep();
5115 for (i = 0; i < pages_per_huge_page;
5116 i++, p = mem_map_next(p, page, i)) {
5117 cond_resched();
5118 clear_user_highpage(p, addr + i * PAGE_SIZE);
5119 }
5120}
Huang Yingc6ddfb62018-08-17 15:45:46 -07005121
5122static void clear_subpage(unsigned long addr, int idx, void *arg)
5123{
5124 struct page *page = arg;
5125
5126 clear_user_highpage(page + idx, addr);
5127}
5128
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005129void clear_huge_page(struct page *page,
Huang Yingc79b57e2017-09-06 16:25:04 -07005130 unsigned long addr_hint, unsigned int pages_per_huge_page)
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005131{
Huang Yingc79b57e2017-09-06 16:25:04 -07005132 unsigned long addr = addr_hint &
5133 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005134
5135 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5136 clear_gigantic_page(page, addr, pages_per_huge_page);
5137 return;
5138 }
5139
Huang Yingc6ddfb62018-08-17 15:45:46 -07005140 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005141}
5142
5143static void copy_user_gigantic_page(struct page *dst, struct page *src,
5144 unsigned long addr,
5145 struct vm_area_struct *vma,
5146 unsigned int pages_per_huge_page)
5147{
5148 int i;
5149 struct page *dst_base = dst;
5150 struct page *src_base = src;
5151
5152 for (i = 0; i < pages_per_huge_page; ) {
5153 cond_resched();
5154 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5155
5156 i++;
5157 dst = mem_map_next(dst, dst_base, i);
5158 src = mem_map_next(src, src_base, i);
5159 }
5160}
5161
Huang Yingc9f4cd72018-08-17 15:45:49 -07005162struct copy_subpage_arg {
5163 struct page *dst;
5164 struct page *src;
5165 struct vm_area_struct *vma;
5166};
5167
5168static void copy_subpage(unsigned long addr, int idx, void *arg)
5169{
5170 struct copy_subpage_arg *copy_arg = arg;
5171
5172 copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5173 addr, copy_arg->vma);
5174}
5175
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005176void copy_user_huge_page(struct page *dst, struct page *src,
Huang Yingc9f4cd72018-08-17 15:45:49 -07005177 unsigned long addr_hint, struct vm_area_struct *vma,
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005178 unsigned int pages_per_huge_page)
5179{
Huang Yingc9f4cd72018-08-17 15:45:49 -07005180 unsigned long addr = addr_hint &
5181 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5182 struct copy_subpage_arg arg = {
5183 .dst = dst,
5184 .src = src,
5185 .vma = vma,
5186 };
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005187
5188 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5189 copy_user_gigantic_page(dst, src, addr, vma,
5190 pages_per_huge_page);
5191 return;
5192 }
5193
Huang Yingc9f4cd72018-08-17 15:45:49 -07005194 process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005195}
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005196
5197long copy_huge_page_from_user(struct page *dst_page,
5198 const void __user *usr_src,
Mike Kravetz810a56b2017-02-22 15:42:58 -08005199 unsigned int pages_per_huge_page,
5200 bool allow_pagefault)
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005201{
5202 void *src = (void *)usr_src;
5203 void *page_kaddr;
5204 unsigned long i, rc = 0;
5205 unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5206
5207 for (i = 0; i < pages_per_huge_page; i++) {
Mike Kravetz810a56b2017-02-22 15:42:58 -08005208 if (allow_pagefault)
5209 page_kaddr = kmap(dst_page + i);
5210 else
5211 page_kaddr = kmap_atomic(dst_page + i);
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005212 rc = copy_from_user(page_kaddr,
5213 (const void __user *)(src + i * PAGE_SIZE),
5214 PAGE_SIZE);
Mike Kravetz810a56b2017-02-22 15:42:58 -08005215 if (allow_pagefault)
5216 kunmap(dst_page + i);
5217 else
5218 kunmap_atomic(page_kaddr);
Mike Kravetzfa4d75c2017-02-22 15:42:49 -08005219
5220 ret_val -= (PAGE_SIZE - rc);
5221 if (rc)
5222 break;
5223
5224 cond_resched();
5225 }
5226 return ret_val;
5227}
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08005228#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005229
Olof Johansson40b64ac2013-12-20 14:28:05 -08005230#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005231
5232static struct kmem_cache *page_ptl_cachep;
5233
5234void __init ptlock_cache_init(void)
5235{
5236 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5237 SLAB_PANIC, NULL);
5238}
5239
Peter Zijlstra539edb52013-11-14 14:31:52 -08005240bool ptlock_alloc(struct page *page)
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005241{
5242 spinlock_t *ptl;
5243
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005244 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005245 if (!ptl)
5246 return false;
Peter Zijlstra539edb52013-11-14 14:31:52 -08005247 page->ptl = ptl;
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005248 return true;
5249}
5250
Peter Zijlstra539edb52013-11-14 14:31:52 -08005251void ptlock_free(struct page *page)
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005252{
Kirill A. Shutemovb35f1812014-01-21 15:49:07 -08005253 kmem_cache_free(page_ptl_cachep, page->ptl);
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -08005254}
5255#endif