blob: 11fc65f81ecd087d356d698367e7189cc7832eb8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
Prasanna Meda05b74382005-06-21 17:14:37 -070011#include <linux/mempolicy.h>
Andi Kleenafcf9382009-12-16 12:20:00 +010012#include <linux/page-isolation.h>
Pavel Emelyanov05ce7722017-02-22 15:42:40 -080013#include <linux/userfaultfd_k.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070015#include <linux/falloc.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040016#include <linux/sched.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070017#include <linux/ksm.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070018#include <linux/fs.h>
Andy Lutomirski9ab42332012-07-05 16:00:11 -070019#include <linux/file.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080020#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040021#include <linux/backing-dev.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080022#include <linux/swap.h>
23#include <linux/swapops.h>
Minchan Kim854e9ed2016-01-15 16:54:53 -080024#include <linux/mmu_notifier.h>
25
26#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Kirill A. Shutemov23519072017-02-22 15:46:39 -080028#include "internal.h"
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/*
Nick Piggin0a27a142007-05-06 14:49:53 -070031 * Any behaviour which results in changes to the vma->vm_flags needs to
32 * take mmap_sem for writing. Others, which simply traverse vmas, need
33 * to only take it for reading.
34 */
35static int madvise_need_mmap_write(int behavior)
36{
37 switch (behavior) {
38 case MADV_REMOVE:
39 case MADV_WILLNEED:
40 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -080041 case MADV_FREE:
Nick Piggin0a27a142007-05-06 14:49:53 -070042 return 0;
43 default:
44 /* be safe, default to 1. list exceptions explicitly */
45 return 1;
46 }
47}
48
49/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * We can potentially split a vm area into separate
51 * areas, each area with its own behavior.
52 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -070053static long madvise_behavior(struct vm_area_struct *vma,
Prasanna Meda05b74382005-06-21 17:14:37 -070054 struct vm_area_struct **prev,
55 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Vladimir Cernovec9bed92013-09-11 14:20:15 -070057 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 int error = 0;
Prasanna Meda05b74382005-06-21 17:14:37 -070059 pgoff_t pgoff;
Hugh Dickins3866ea92009-09-21 17:01:52 -070060 unsigned long new_flags = vma->vm_flags;
Prasanna Medae798c6e2005-06-21 17:14:36 -070061
62 switch (behavior) {
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080063 case MADV_NORMAL:
64 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
65 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070066 case MADV_SEQUENTIAL:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080067 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070068 break;
69 case MADV_RANDOM:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080070 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070071 break;
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080072 case MADV_DONTFORK:
73 new_flags |= VM_DONTCOPY;
74 break;
75 case MADV_DOFORK:
Hugh Dickins3866ea92009-09-21 17:01:52 -070076 if (vma->vm_flags & VM_IO) {
77 error = -EINVAL;
78 goto out;
79 }
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080080 new_flags &= ~VM_DONTCOPY;
Prasanna Medae798c6e2005-06-21 17:14:36 -070081 break;
Jason Baronaccb61f2012-03-23 15:02:51 -070082 case MADV_DONTDUMP:
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -070083 new_flags |= VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -070084 break;
85 case MADV_DODUMP:
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -070086 if (new_flags & VM_SPECIAL) {
87 error = -EINVAL;
88 goto out;
89 }
90 new_flags &= ~VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -070091 break;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070092 case MADV_MERGEABLE:
93 case MADV_UNMERGEABLE:
94 error = ksm_madvise(vma, start, end, behavior, &new_flags);
David Rientjesdef5efe2017-02-24 14:58:47 -080095 if (error) {
96 /*
97 * madvise() returns EAGAIN if kernel resources, such as
98 * slab, are temporarily unavailable.
99 */
100 if (error == -ENOMEM)
101 error = -EAGAIN;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700102 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800103 }
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700104 break;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800105 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800106 case MADV_NOHUGEPAGE:
Andrea Arcangeli60ab3242011-01-13 15:47:18 -0800107 error = hugepage_madvise(vma, &new_flags, behavior);
David Rientjesdef5efe2017-02-24 14:58:47 -0800108 if (error) {
109 /*
110 * madvise() returns EAGAIN if kernel resources, such as
111 * slab, are temporarily unavailable.
112 */
113 if (error == -ENOMEM)
114 error = -EAGAIN;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800115 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800116 }
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800117 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -0700118 }
119
Prasanna Meda05b74382005-06-21 17:14:37 -0700120 if (new_flags == vma->vm_flags) {
121 *prev = vma;
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700122 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700123 }
124
125 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
126 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700127 vma->vm_file, pgoff, vma_policy(vma),
128 vma->vm_userfaultfd_ctx);
Prasanna Meda05b74382005-06-21 17:14:37 -0700129 if (*prev) {
130 vma = *prev;
131 goto success;
132 }
133
134 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136 if (start != vma->vm_start) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800137 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
138 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800140 }
141 error = __split_vma(mm, vma, start, 1);
142 if (error) {
143 /*
144 * madvise() returns EAGAIN if kernel resources, such as
145 * slab, are temporarily unavailable.
146 */
147 if (error == -ENOMEM)
148 error = -EAGAIN;
149 goto out;
150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 }
152
153 if (end != vma->vm_end) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800154 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
155 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800157 }
158 error = __split_vma(mm, vma, end, 0);
159 if (error) {
160 /*
161 * madvise() returns EAGAIN if kernel resources, such as
162 * slab, are temporarily unavailable.
163 */
164 if (error == -ENOMEM)
165 error = -EAGAIN;
166 goto out;
167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 }
169
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700170success:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 /*
172 * vm_flags is protected by the mmap_sem held in write mode.
173 */
Prasanna Medae798c6e2005-06-21 17:14:36 -0700174 vma->vm_flags = new_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 return error;
177}
178
Shaohua Li1998cc02013-02-22 16:32:31 -0800179#ifdef CONFIG_SWAP
180static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
181 unsigned long end, struct mm_walk *walk)
182{
183 pte_t *orig_pte;
184 struct vm_area_struct *vma = walk->private;
185 unsigned long index;
186
187 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
188 return 0;
189
190 for (index = start; index != end; index += PAGE_SIZE) {
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *page;
194 spinlock_t *ptl;
195
196 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
197 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
198 pte_unmap_unlock(orig_pte, ptl);
199
Kirill A. Shutemov0661a332015-02-10 14:10:04 -0800200 if (pte_present(pte) || pte_none(pte))
Shaohua Li1998cc02013-02-22 16:32:31 -0800201 continue;
202 entry = pte_to_swp_entry(pte);
203 if (unlikely(non_swap_entry(entry)))
204 continue;
205
206 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
207 vma, index);
208 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300209 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800210 }
211
212 return 0;
213}
214
215static void force_swapin_readahead(struct vm_area_struct *vma,
216 unsigned long start, unsigned long end)
217{
218 struct mm_walk walk = {
219 .mm = vma->vm_mm,
220 .pmd_entry = swapin_walk_pmd_entry,
221 .private = vma,
222 };
223
224 walk_page_range(start, end, &walk);
225
226 lru_add_drain(); /* Push any new pages onto the LRU now */
227}
228
229static void force_shm_swapin_readahead(struct vm_area_struct *vma,
230 unsigned long start, unsigned long end,
231 struct address_space *mapping)
232{
233 pgoff_t index;
234 struct page *page;
235 swp_entry_t swap;
236
237 for (; start < end; start += PAGE_SIZE) {
238 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
239
Johannes Weiner55231e52014-05-22 11:54:17 -0700240 page = find_get_entry(mapping, index);
Shaohua Li1998cc02013-02-22 16:32:31 -0800241 if (!radix_tree_exceptional_entry(page)) {
242 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300243 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800244 continue;
245 }
246 swap = radix_to_swp_entry(page);
247 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
248 NULL, 0);
249 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300250 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800251 }
252
253 lru_add_drain(); /* Push any new pages onto the LRU now */
254}
255#endif /* CONFIG_SWAP */
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257/*
258 * Schedule all required I/O operations. Do not wait for completion.
259 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700260static long madvise_willneed(struct vm_area_struct *vma,
261 struct vm_area_struct **prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 unsigned long start, unsigned long end)
263{
264 struct file *file = vma->vm_file;
265
Shaohua Li1998cc02013-02-22 16:32:31 -0800266#ifdef CONFIG_SWAP
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100267 if (!file) {
Shaohua Li1998cc02013-02-22 16:32:31 -0800268 *prev = vma;
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100269 force_swapin_readahead(vma, start, end);
Shaohua Li1998cc02013-02-22 16:32:31 -0800270 return 0;
271 }
Shaohua Li1998cc02013-02-22 16:32:31 -0800272
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100273 if (shmem_mapping(file->f_mapping)) {
274 *prev = vma;
275 force_shm_swapin_readahead(vma, start, end,
276 file->f_mapping);
277 return 0;
278 }
279#else
Suzuki1bef4002005-10-11 08:29:06 -0700280 if (!file)
281 return -EBADF;
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100282#endif
Suzuki1bef4002005-10-11 08:29:06 -0700283
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -0800284 if (IS_DAX(file_inode(file))) {
Carsten Ottefe77ba62005-06-23 22:05:29 -0700285 /* no bad return value, but ignore advice */
286 return 0;
287 }
288
Prasanna Meda05b74382005-06-21 17:14:37 -0700289 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
291 if (end > vma->vm_end)
292 end = vma->vm_end;
293 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
294
Wu Fengguangf7e839d2009-06-16 15:31:20 -0700295 force_page_cache_readahead(file->f_mapping, file, start, end - start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 return 0;
297}
298
Minchan Kim854e9ed2016-01-15 16:54:53 -0800299static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
300 unsigned long end, struct mm_walk *walk)
301
302{
303 struct mmu_gather *tlb = walk->private;
304 struct mm_struct *mm = tlb->mm;
305 struct vm_area_struct *vma = walk->vma;
306 spinlock_t *ptl;
307 pte_t *orig_pte, *pte, ptent;
308 struct page *page;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800309 int nr_swap = 0;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800310 unsigned long next;
Minchan Kim854e9ed2016-01-15 16:54:53 -0800311
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800312 next = pmd_addr_end(addr, end);
313 if (pmd_trans_huge(*pmd))
314 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
315 goto next;
316
Minchan Kim854e9ed2016-01-15 16:54:53 -0800317 if (pmd_trans_unstable(pmd))
318 return 0;
319
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800320 tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800321 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
322 arch_enter_lazy_mmu_mode();
323 for (; addr != end; pte++, addr += PAGE_SIZE) {
324 ptent = *pte;
325
Minchan Kim64b42bc2016-01-15 16:55:06 -0800326 if (pte_none(ptent))
Minchan Kim854e9ed2016-01-15 16:54:53 -0800327 continue;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800328 /*
329 * If the pte has swp_entry, just clear page table to
330 * prevent swap-in which is more expensive rather than
331 * (page allocation + zeroing).
332 */
333 if (!pte_present(ptent)) {
334 swp_entry_t entry;
335
336 entry = pte_to_swp_entry(ptent);
337 if (non_swap_entry(entry))
338 continue;
339 nr_swap--;
340 free_swap_and_cache(entry);
341 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
342 continue;
343 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800344
345 page = vm_normal_page(vma, addr, ptent);
346 if (!page)
347 continue;
348
349 /*
350 * If pmd isn't transhuge but the page is THP and
351 * is owned by only this process, split it and
352 * deactivate all pages.
353 */
354 if (PageTransCompound(page)) {
355 if (page_mapcount(page) != 1)
356 goto out;
357 get_page(page);
358 if (!trylock_page(page)) {
359 put_page(page);
360 goto out;
361 }
362 pte_unmap_unlock(orig_pte, ptl);
363 if (split_huge_page(page)) {
364 unlock_page(page);
365 put_page(page);
366 pte_offset_map_lock(mm, pmd, addr, &ptl);
367 goto out;
368 }
369 put_page(page);
370 unlock_page(page);
371 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
372 pte--;
373 addr -= PAGE_SIZE;
374 continue;
375 }
376
377 VM_BUG_ON_PAGE(PageTransCompound(page), page);
378
379 if (PageSwapCache(page) || PageDirty(page)) {
380 if (!trylock_page(page))
381 continue;
382 /*
383 * If page is shared with others, we couldn't clear
384 * PG_dirty of the page.
385 */
386 if (page_mapcount(page) != 1) {
387 unlock_page(page);
388 continue;
389 }
390
391 if (PageSwapCache(page) && !try_to_free_swap(page)) {
392 unlock_page(page);
393 continue;
394 }
395
396 ClearPageDirty(page);
397 unlock_page(page);
398 }
399
400 if (pte_young(ptent) || pte_dirty(ptent)) {
401 /*
402 * Some of architecture(ex, PPC) don't update TLB
403 * with set_pte_at and tlb_remove_tlb_entry so for
404 * the portability, remap the pte with old|clean
405 * after pte clearing.
406 */
407 ptent = ptep_get_and_clear_full(mm, addr, pte,
408 tlb->fullmm);
409
410 ptent = pte_mkold(ptent);
411 ptent = pte_mkclean(ptent);
412 set_pte_at(mm, addr, pte, ptent);
Minchan Kim10853a02016-01-15 16:55:11 -0800413 if (PageActive(page))
414 deactivate_page(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800415 tlb_remove_tlb_entry(tlb, pte, addr);
416 }
417 }
418out:
Minchan Kim64b42bc2016-01-15 16:55:06 -0800419 if (nr_swap) {
420 if (current->mm == mm)
421 sync_mm_rss(mm);
422
423 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
424 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800425 arch_leave_lazy_mmu_mode();
426 pte_unmap_unlock(orig_pte, ptl);
427 cond_resched();
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800428next:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800429 return 0;
430}
431
432static void madvise_free_page_range(struct mmu_gather *tlb,
433 struct vm_area_struct *vma,
434 unsigned long addr, unsigned long end)
435{
436 struct mm_walk free_walk = {
437 .pmd_entry = madvise_free_pte_range,
438 .mm = vma->vm_mm,
439 .private = tlb,
440 };
441
442 tlb_start_vma(tlb, vma);
443 walk_page_range(addr, end, &free_walk);
444 tlb_end_vma(tlb, vma);
445}
446
447static int madvise_free_single_vma(struct vm_area_struct *vma,
448 unsigned long start_addr, unsigned long end_addr)
449{
450 unsigned long start, end;
451 struct mm_struct *mm = vma->vm_mm;
452 struct mmu_gather tlb;
453
454 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
455 return -EINVAL;
456
457 /* MADV_FREE works for only anon vma at the moment */
458 if (!vma_is_anonymous(vma))
459 return -EINVAL;
460
461 start = max(vma->vm_start, start_addr);
462 if (start >= vma->vm_end)
463 return -EINVAL;
464 end = min(vma->vm_end, end_addr);
465 if (end <= vma->vm_start)
466 return -EINVAL;
467
468 lru_add_drain();
469 tlb_gather_mmu(&tlb, mm, start, end);
470 update_hiwater_rss(mm);
471
472 mmu_notifier_invalidate_range_start(mm, start, end);
473 madvise_free_page_range(&tlb, vma, start, end);
474 mmu_notifier_invalidate_range_end(mm, start, end);
475 tlb_finish_mmu(&tlb, start, end);
476
477 return 0;
478}
479
480static long madvise_free(struct vm_area_struct *vma,
481 struct vm_area_struct **prev,
482 unsigned long start, unsigned long end)
483{
484 *prev = vma;
485 return madvise_free_single_vma(vma, start, end);
486}
487
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488/*
489 * Application no longer needs these pages. If the pages are dirty,
490 * it's OK to just throw them away. The app will be more careful about
491 * data it wants to keep. Be sure to free swap resources too. The
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700492 * zap_page_range call sets things up for shrink_active_list to actually free
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 * these pages later if no one else has touched them in the meantime,
494 * although we could add these pages to a global reuse list for
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700495 * shrink_active_list to pick up before reclaiming other pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 *
497 * NB: This interface discards data rather than pushes it out to swap,
498 * as some implementations do. This has performance implications for
499 * applications like large transactional databases which want to discard
500 * pages in anonymous maps after committing to backing store the data
501 * that was kept in them. There is no reason to write this data out to
502 * the swap area if the application is discarding it.
503 *
504 * An interface that causes the system to free clean pages and flush
505 * dirty pages is already available as msync(MS_INVALIDATE).
506 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700507static long madvise_dontneed(struct vm_area_struct *vma,
508 struct vm_area_struct **prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 unsigned long start, unsigned long end)
510{
Prasanna Meda05b74382005-06-21 17:14:37 -0700511 *prev = vma;
Kirill A. Shutemov23519072017-02-22 15:46:39 -0800512 if (!can_madv_dontneed_vma(vma))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 return -EINVAL;
514
Mike Rapoportd8119142017-02-24 14:56:02 -0800515 userfaultfd_remove(vma, prev, start, end);
Kirill A. Shutemovecf13852017-02-22 15:46:37 -0800516 zap_page_range(vma, start, end - start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 return 0;
518}
519
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800520/*
521 * Application wants to free up the pages and associated backing store.
522 * This is effectively punching a hole into the middle of a file.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800523 */
524static long madvise_remove(struct vm_area_struct *vma,
Nick Piggin00e9fa22007-03-16 13:38:10 -0800525 struct vm_area_struct **prev,
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800526 unsigned long start, unsigned long end)
527{
Hugh Dickins3f31d072012-05-29 15:06:40 -0700528 loff_t offset;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700529 int error;
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700530 struct file *f;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800531
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700532 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
Nick Piggin00e9fa22007-03-16 13:38:10 -0800533
Mike Kravetz72079ba2015-09-08 15:01:57 -0700534 if (vma->vm_flags & VM_LOCKED)
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800535 return -EINVAL;
536
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700537 f = vma->vm_file;
538
539 if (!f || !f->f_mapping || !f->f_mapping->host) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800540 return -EINVAL;
541 }
542
Hugh Dickins69cf0fa2006-04-17 22:46:32 +0100543 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
544 return -EACCES;
545
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800546 offset = (loff_t)(start - vma->vm_start)
547 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700548
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700549 /*
550 * Filesystem's fallocate may need to take i_mutex. We need to
551 * explicitly grab a reference because the vma (and hence the
552 * vma's reference to the file) can go away as soon as we drop
553 * mmap_sem.
554 */
555 get_file(f);
Mike Rapoporta6bf53eb2017-02-24 14:56:05 -0800556 userfaultfd_remove(vma, prev, start, end);
Nick Piggin0a27a142007-05-06 14:49:53 -0700557 up_read(&current->mm->mmap_sem);
Anna Schumaker72c72bd2014-11-07 14:44:25 -0500558 error = vfs_fallocate(f,
Hugh Dickins3f31d072012-05-29 15:06:40 -0700559 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
560 offset, end - start);
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700561 fput(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700562 down_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700563 return error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800564}
565
Andi Kleen9893e492009-09-16 11:50:17 +0200566#ifdef CONFIG_MEMORY_FAILURE
567/*
568 * Error injection support for memory error handling.
569 */
Andi Kleenafcf9382009-12-16 12:20:00 +0100570static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
Andi Kleen9893e492009-09-16 11:50:17 +0200571{
Wanpeng Li20cb6ca2013-09-30 13:45:21 -0700572 struct page *p;
Andi Kleen9893e492009-09-16 11:50:17 +0200573 if (!capable(CAP_SYS_ADMIN))
574 return -EPERM;
Wanpeng Li20cb6ca2013-09-30 13:45:21 -0700575 for (; start < end; start += PAGE_SIZE <<
576 compound_order(compound_head(p))) {
Andrew Morton325c4ef2013-09-11 14:23:03 -0700577 int ret;
578
579 ret = get_user_pages_fast(start, 1, 0, &p);
Andi Kleen9893e492009-09-16 11:50:17 +0200580 if (ret != 1)
581 return ret;
Andrew Morton325c4ef2013-09-11 14:23:03 -0700582
Wanpeng Li29b4eed2013-09-11 14:22:59 -0700583 if (PageHWPoison(p)) {
584 put_page(p);
585 continue;
586 }
Andi Kleenafcf9382009-12-16 12:20:00 +0100587 if (bhv == MADV_SOFT_OFFLINE) {
Wanpeng Lib194b8c2013-09-11 14:22:57 -0700588 pr_info("Soft offlining page %#lx at %#lx\n",
Andi Kleenafcf9382009-12-16 12:20:00 +0100589 page_to_pfn(p), start);
590 ret = soft_offline_page(p, MF_COUNT_INCREASED);
591 if (ret)
Wanpeng Li83024232013-09-11 14:23:02 -0700592 return ret;
Andi Kleenafcf9382009-12-16 12:20:00 +0100593 continue;
594 }
Wanpeng Lib194b8c2013-09-11 14:22:57 -0700595 pr_info("Injecting memory failure for page %#lx at %#lx\n",
Andi Kleen9893e492009-09-16 11:50:17 +0200596 page_to_pfn(p), start);
Naoya Horiguchi23a003b2016-03-15 14:56:36 -0700597 ret = memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
598 if (ret)
599 return ret;
Andi Kleen9893e492009-09-16 11:50:17 +0200600 }
Andrew Morton325c4ef2013-09-11 14:23:03 -0700601 return 0;
Andi Kleen9893e492009-09-16 11:50:17 +0200602}
603#endif
604
suzuki165cd402005-07-27 11:43:59 -0700605static long
606madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
607 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 switch (behavior) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800610 case MADV_REMOVE:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700611 return madvise_remove(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 case MADV_WILLNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700613 return madvise_willneed(vma, prev, start, end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800614 case MADV_FREE:
615 /*
616 * XXX: In this implementation, MADV_FREE works like
617 * MADV_DONTNEED on swapless system or full swap.
618 */
619 if (get_nr_swap_pages() > 0)
620 return madvise_free(vma, prev, start, end);
621 /* passthrough */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 case MADV_DONTNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700623 return madvise_dontneed(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 default:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700625 return madvise_behavior(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627}
628
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700629static bool
Nick Piggin75927af2009-06-16 15:32:38 -0700630madvise_behavior_valid(int behavior)
631{
632 switch (behavior) {
633 case MADV_DOFORK:
634 case MADV_DONTFORK:
635 case MADV_NORMAL:
636 case MADV_SEQUENTIAL:
637 case MADV_RANDOM:
638 case MADV_REMOVE:
639 case MADV_WILLNEED:
640 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800641 case MADV_FREE:
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700642#ifdef CONFIG_KSM
643 case MADV_MERGEABLE:
644 case MADV_UNMERGEABLE:
645#endif
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800646#ifdef CONFIG_TRANSPARENT_HUGEPAGE
647 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800648 case MADV_NOHUGEPAGE:
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800649#endif
Jason Baronaccb61f2012-03-23 15:02:51 -0700650 case MADV_DONTDUMP:
651 case MADV_DODUMP:
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700652 return true;
Nick Piggin75927af2009-06-16 15:32:38 -0700653
654 default:
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700655 return false;
Nick Piggin75927af2009-06-16 15:32:38 -0700656 }
657}
Hugh Dickins3866ea92009-09-21 17:01:52 -0700658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659/*
660 * The madvise(2) system call.
661 *
662 * Applications can use madvise() to advise the kernel how it should
663 * handle paging I/O in this VM area. The idea is to help the kernel
664 * use appropriate read-ahead and caching techniques. The information
665 * provided is advisory only, and can be safely disregarded by the
666 * kernel without affecting the correct operation of the application.
667 *
668 * behavior values:
669 * MADV_NORMAL - the default behavior is to read clusters. This
670 * results in some read-ahead and read-behind.
671 * MADV_RANDOM - the system should read the minimum amount of data
672 * on any access, since it is unlikely that the appli-
673 * cation will need more than what it asks for.
674 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
675 * once, so they can be aggressively read ahead, and
676 * can be freed soon after they are accessed.
677 * MADV_WILLNEED - the application is notifying the system to read
678 * some pages ahead.
679 * MADV_DONTNEED - the application is finished with the given range,
680 * so the kernel can free resources associated with it.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700681 * MADV_FREE - the application marks pages in the given range as lazy free,
682 * where actual purges are postponed until memory pressure happens.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800683 * MADV_REMOVE - the application wants to free up the given range of
684 * pages and associated backing store.
Hugh Dickins3866ea92009-09-21 17:01:52 -0700685 * MADV_DONTFORK - omit this area from child's address space when forking:
686 * typically, to avoid COWing pages pinned by get_user_pages().
687 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700688 * MADV_HWPOISON - trigger memory error handler as if the given memory range
689 * were corrupted by unrecoverable hardware memory failure.
690 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700691 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
692 * this area with pages of identical content from other such areas.
693 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700694 * MADV_HUGEPAGE - the application wants to back the given range by transparent
695 * huge pages in the future. Existing pages might be coalesced and
696 * new pages might be allocated as THP.
697 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
698 * transparent huge pages so the existing pages will not be
699 * coalesced into THP and new pages will not be allocated as THP.
700 * MADV_DONTDUMP - the application wants to prevent pages in the given range
701 * from being included in its core dump.
702 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 *
704 * return values:
705 * zero - success
706 * -EINVAL - start + len < 0, start is not page-aligned,
707 * "behavior" is not a valid value, or application
708 * is attempting to release locked or shared pages.
709 * -ENOMEM - addresses in the specified range are not currently
710 * mapped, or are outside the AS of the process.
711 * -EIO - an I/O error occurred while paging in data.
712 * -EBADF - map exists, but area maps something that isn't a file.
713 * -EAGAIN - a kernel resource was temporarily unavailable.
714 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100715SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
Prasanna Meda05b74382005-06-21 17:14:37 -0700717 unsigned long end, tmp;
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700718 struct vm_area_struct *vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 int unmapped_error = 0;
720 int error = -EINVAL;
Jason Baronf7977792007-07-15 23:38:21 -0700721 int write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 size_t len;
Shaohua Li1998cc02013-02-22 16:32:31 -0800723 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Andi Kleen9893e492009-09-16 11:50:17 +0200725#ifdef CONFIG_MEMORY_FAILURE
Andi Kleenafcf9382009-12-16 12:20:00 +0100726 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
727 return madvise_hwpoison(behavior, start, start+len_in);
Andi Kleen9893e492009-09-16 11:50:17 +0200728#endif
Nick Piggin75927af2009-06-16 15:32:38 -0700729 if (!madvise_behavior_valid(behavior))
730 return error;
731
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700732 if (start & ~PAGE_MASK)
733 return error;
734 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
735
736 /* Check to see whether len was rounded up from small -ve to zero */
737 if (len_in && !len)
738 return error;
739
740 end = start + len;
741 if (end < start)
742 return error;
743
744 error = 0;
745 if (end == start)
746 return error;
747
Jason Baronf7977792007-07-15 23:38:21 -0700748 write = madvise_need_mmap_write(behavior);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700749 if (write) {
750 if (down_write_killable(&current->mm->mmap_sem))
751 return -EINTR;
752 } else {
Nick Piggin0a27a142007-05-06 14:49:53 -0700753 down_read(&current->mm->mmap_sem);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 /*
757 * If the interval [start,end) covers some unmapped address
758 * ranges, just ignore them, but return -ENOMEM at the end.
Prasanna Meda05b74382005-06-21 17:14:37 -0700759 * - different from the way of handling in mlock etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 */
Prasanna Meda05b74382005-06-21 17:14:37 -0700761 vma = find_vma_prev(current->mm, start, &prev);
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700762 if (vma && start > vma->vm_start)
763 prev = vma;
764
Shaohua Li1998cc02013-02-22 16:32:31 -0800765 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 for (;;) {
767 /* Still start < end. */
768 error = -ENOMEM;
769 if (!vma)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700770 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
Prasanna Meda05b74382005-06-21 17:14:37 -0700772 /* Here start < (end|vma->vm_end). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 if (start < vma->vm_start) {
774 unmapped_error = -ENOMEM;
775 start = vma->vm_start;
Prasanna Meda05b74382005-06-21 17:14:37 -0700776 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700777 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 }
779
Prasanna Meda05b74382005-06-21 17:14:37 -0700780 /* Here vma->vm_start <= start < (end|vma->vm_end) */
781 tmp = vma->vm_end;
782 if (end < tmp)
783 tmp = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Prasanna Meda05b74382005-06-21 17:14:37 -0700785 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
786 error = madvise_vma(vma, &prev, start, tmp, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (error)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700788 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700789 start = tmp;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700790 if (prev && start < prev->vm_end)
Prasanna Meda05b74382005-06-21 17:14:37 -0700791 start = prev->vm_end;
792 error = unmapped_error;
793 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700794 goto out;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700795 if (prev)
796 vma = prev->vm_next;
797 else /* madvise_remove dropped mmap_sem */
798 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800out:
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700801 blk_finish_plug(&plug);
Jason Baronf7977792007-07-15 23:38:21 -0700802 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700803 up_write(&current->mm->mmap_sem);
804 else
805 up_read(&current->mm->mmap_sem);
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 return error;
808}