Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/mlock.c |
| 4 | * |
| 5 | * (C) Copyright 1995 Linus Torvalds |
| 6 | * (C) Copyright 2002 Christoph Hellwig |
| 7 | */ |
| 8 | |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 9 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mman.h> |
| 11 | #include <linux/mm.h> |
Ingo Molnar | 8703e8a | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 12 | #include <linux/sched/user.h> |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 13 | #include <linux/swap.h> |
| 14 | #include <linux/swapops.h> |
| 15 | #include <linux/pagemap.h> |
Kalesh Singh | d83231e | 2024-04-22 14:24:59 -0700 | [diff] [blame] | 16 | #include <linux/pgsize_migration.h> |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 17 | #include <linux/pagevec.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/mempolicy.h> |
| 19 | #include <linux/syscalls.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 20 | #include <linux/sched.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 21 | #include <linux/export.h> |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 22 | #include <linux/rmap.h> |
| 23 | #include <linux/mmzone.h> |
| 24 | #include <linux/hugetlb.h> |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 25 | #include <linux/memcontrol.h> |
| 26 | #include <linux/mm_inline.h> |
Mike Rapoport | 1507f51 | 2021-07-07 18:08:03 -0700 | [diff] [blame] | 27 | #include <linux/secretmem.h> |
Jianxiong Pan | 07622f0 | 2022-09-22 17:25:35 +0800 | [diff] [blame] | 28 | #ifdef CONFIG_AMLOGIC_PIN_LOCKED_FILE |
| 29 | #include <linux/amlogic/pin_file.h> |
| 30 | #endif |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 31 | |
| 32 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Wang Xiaoqiang | 7f43add | 2016-01-15 16:57:22 -0800 | [diff] [blame] | 34 | bool can_do_mlock(void) |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 35 | { |
Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 36 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
Wang Xiaoqiang | 7f43add | 2016-01-15 16:57:22 -0800 | [diff] [blame] | 37 | return true; |
Jeff Vander Stoep | a5a6579 | 2015-03-12 16:26:17 -0700 | [diff] [blame] | 38 | if (capable(CAP_IPC_LOCK)) |
Wang Xiaoqiang | 7f43add | 2016-01-15 16:57:22 -0800 | [diff] [blame] | 39 | return true; |
| 40 | return false; |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 41 | } |
| 42 | EXPORT_SYMBOL(can_do_mlock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 44 | /* |
| 45 | * Mlocked pages are marked with PageMlocked() flag for efficient testing |
| 46 | * in vmscan and, possibly, the fault path; and to support semi-accurate |
| 47 | * statistics. |
| 48 | * |
| 49 | * An mlocked page [PageMlocked(page)] is unevictable. As such, it will |
| 50 | * be placed on the LRU "unevictable" list, rather than the [in]active lists. |
| 51 | * The unevictable list is an LRU sibling list to the [in]active lists. |
| 52 | * PageUnevictable is set to indicate the unevictable state. |
| 53 | * |
| 54 | * When lazy mlocking via vmscan, it is important to ensure that the |
| 55 | * vma's VM_LOCKED status is not concurrently being modified, otherwise we |
| 56 | * may have mlocked a page that is being munlocked. So lazy mlock must take |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 57 | * the mmap_lock for read, and verify that the vma really is locked |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 58 | * (see mm/rmap.c). |
| 59 | */ |
| 60 | |
| 61 | /* |
| 62 | * LRU accounting for clear_page_mlock() |
| 63 | */ |
Hugh Dickins | e6c509f | 2012-10-08 16:33:19 -0700 | [diff] [blame] | 64 | void clear_page_mlock(struct page *page) |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 65 | { |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 66 | int nr_pages; |
| 67 | |
Hugh Dickins | e6c509f | 2012-10-08 16:33:19 -0700 | [diff] [blame] | 68 | if (!TestClearPageMlocked(page)) |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 69 | return; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 70 | |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 71 | nr_pages = thp_nr_pages(page); |
| 72 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
| 73 | count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 74 | /* |
| 75 | * The previous TestClearPageMlocked() corresponds to the smp_mb() |
| 76 | * in __pagevec_lru_add_fn(). |
| 77 | * |
| 78 | * See __pagevec_lru_add_fn for more explanation. |
| 79 | */ |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 80 | if (!isolate_lru_page(page)) { |
| 81 | putback_lru_page(page); |
| 82 | } else { |
| 83 | /* |
KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 84 | * We lost the race. the page already moved to evictable list. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 85 | */ |
KOSAKI Motohiro | 8891d6d | 2008-11-12 13:26:53 -0800 | [diff] [blame] | 86 | if (PageUnevictable(page)) |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 87 | count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 88 | } |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * Mark page as mlocked if not already. |
| 93 | * If page on LRU, isolate and putback to move to unevictable list. |
| 94 | */ |
| 95 | void mlock_vma_page(struct page *page) |
| 96 | { |
Vlastimil Babka | 57e68e9 | 2014-04-07 15:37:50 -0700 | [diff] [blame] | 97 | /* Serialize with page migration */ |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 98 | BUG_ON(!PageLocked(page)); |
| 99 | |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 100 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 101 | VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); |
| 102 | |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 103 | if (!TestSetPageMlocked(page)) { |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 104 | int nr_pages = thp_nr_pages(page); |
| 105 | |
| 106 | mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); |
| 107 | count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 108 | if (!isolate_lru_page(page)) |
| 109 | putback_lru_page(page); |
| 110 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 113 | /* |
| 114 | * Finish munlock after successful page isolation |
| 115 | * |
Alistair Popple | cd62734 | 2021-06-30 18:54:12 -0700 | [diff] [blame] | 116 | * Page must be locked. This is a wrapper for page_mlock() |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 117 | * and putback_lru_page() with munlock accounting. |
| 118 | */ |
| 119 | static void __munlock_isolated_page(struct page *page) |
| 120 | { |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 121 | /* |
| 122 | * Optimization: if the page was mapped just once, that's our mapping |
| 123 | * and we don't need to check all the other vmas. |
| 124 | */ |
| 125 | if (page_mapcount(page) > 1) |
Alistair Popple | cd62734 | 2021-06-30 18:54:12 -0700 | [diff] [blame] | 126 | page_mlock(page); |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 127 | |
| 128 | /* Did try_to_unlock() succeed or punt? */ |
Minchan Kim | 192d723 | 2017-05-03 14:54:10 -0700 | [diff] [blame] | 129 | if (!PageMlocked(page)) |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 130 | count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page)); |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 131 | |
| 132 | putback_lru_page(page); |
| 133 | } |
| 134 | |
| 135 | /* |
| 136 | * Accounting for page isolation fail during munlock |
| 137 | * |
| 138 | * Performs accounting when page isolation fails in munlock. There is nothing |
| 139 | * else to do because it means some other task has already removed the page |
| 140 | * from the LRU. putback_lru_page() will take care of removing the page from |
| 141 | * the unevictable list, if necessary. vmscan [page_referenced()] will move |
| 142 | * the page back to the unevictable list if some other vma has it mlocked. |
| 143 | */ |
| 144 | static void __munlock_isolation_failed(struct page *page) |
| 145 | { |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 146 | int nr_pages = thp_nr_pages(page); |
| 147 | |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 148 | if (PageUnevictable(page)) |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 149 | __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 150 | else |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 151 | __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 152 | } |
| 153 | |
Lee Schermerhorn | 6927c1d | 2009-12-14 17:59:55 -0800 | [diff] [blame] | 154 | /** |
| 155 | * munlock_vma_page - munlock a vma page |
Mike Rapoport | b7701a5 | 2018-02-06 15:42:13 -0800 | [diff] [blame] | 156 | * @page: page to be unlocked, either a normal page or THP page head |
Vlastimil Babka | c424be1 | 2014-01-02 12:58:43 -0800 | [diff] [blame] | 157 | * |
| 158 | * returns the size of the page as a page mask (0 for normal page, |
| 159 | * HPAGE_PMD_NR - 1 for THP head page) |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 160 | * |
Lee Schermerhorn | 6927c1d | 2009-12-14 17:59:55 -0800 | [diff] [blame] | 161 | * called from munlock()/munmap() path with page supposedly on the LRU. |
| 162 | * When we munlock a page, because the vma where we found the page is being |
| 163 | * munlock()ed or munmap()ed, we want to check whether other vmas hold the |
| 164 | * page locked so that we can leave it on the unevictable lru list and not |
| 165 | * bother vmscan with it. However, to walk the page's rmap list in |
Alistair Popple | cd62734 | 2021-06-30 18:54:12 -0700 | [diff] [blame] | 166 | * page_mlock() we must isolate the page from the LRU. If some other |
Lee Schermerhorn | 6927c1d | 2009-12-14 17:59:55 -0800 | [diff] [blame] | 167 | * task has removed the page from the LRU, we won't be able to do that. |
| 168 | * So we clear the PageMlocked as we might not get another chance. If we |
| 169 | * can't isolate the page, we leave it for putback_lru_page() and vmscan |
| 170 | * [page_referenced()/try_to_unmap()] to deal with. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 171 | */ |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 172 | unsigned int munlock_vma_page(struct page *page) |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 173 | { |
Kirill A. Shutemov | 7162a1e | 2016-01-21 16:40:27 -0800 | [diff] [blame] | 174 | int nr_pages; |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 175 | |
Alistair Popple | cd62734 | 2021-06-30 18:54:12 -0700 | [diff] [blame] | 176 | /* For page_mlock() and to serialize with page migration */ |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 177 | BUG_ON(!PageLocked(page)); |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 178 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 179 | |
Kirill A. Shutemov | 655548b | 2016-11-30 15:54:05 -0800 | [diff] [blame] | 180 | if (!TestClearPageMlocked(page)) { |
| 181 | /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ |
Alex Shi | 3db19aa | 2020-12-15 12:34:07 -0800 | [diff] [blame] | 182 | return 0; |
Kirill A. Shutemov | 655548b | 2016-11-30 15:54:05 -0800 | [diff] [blame] | 183 | } |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 184 | |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 185 | nr_pages = thp_nr_pages(page); |
Alex Shi | 3db19aa | 2020-12-15 12:34:07 -0800 | [diff] [blame] | 186 | mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 187 | |
Alex Shi | 3db19aa | 2020-12-15 12:34:07 -0800 | [diff] [blame] | 188 | if (!isolate_lru_page(page)) |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 189 | __munlock_isolated_page(page); |
Alex Shi | 3db19aa | 2020-12-15 12:34:07 -0800 | [diff] [blame] | 190 | else |
| 191 | __munlock_isolation_failed(page); |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 192 | |
Vlastimil Babka | c424be1 | 2014-01-02 12:58:43 -0800 | [diff] [blame] | 193 | return nr_pages - 1; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Lee Schermerhorn | 9978ad5 | 2008-10-18 20:26:56 -0700 | [diff] [blame] | 196 | /* |
| 197 | * convert get_user_pages() return value to posix mlock() error |
| 198 | */ |
Jianxiong Pan | 07622f0 | 2022-09-22 17:25:35 +0800 | [diff] [blame] | 199 | #ifndef CONFIG_AMLOGIC_PIN_LOCKED_FILE_V2 |
Lee Schermerhorn | 9978ad5 | 2008-10-18 20:26:56 -0700 | [diff] [blame] | 200 | static int __mlock_posix_error_return(long retval) |
| 201 | { |
| 202 | if (retval == -EFAULT) |
| 203 | retval = -ENOMEM; |
| 204 | else if (retval == -ENOMEM) |
| 205 | retval = -EAGAIN; |
| 206 | return retval; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 207 | } |
Jianxiong Pan | 07622f0 | 2022-09-22 17:25:35 +0800 | [diff] [blame] | 208 | #endif |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 209 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 210 | /* |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 211 | * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec() |
| 212 | * |
| 213 | * The fast path is available only for evictable pages with single mapping. |
| 214 | * Then we can bypass the per-cpu pvec and get better performance. |
Alistair Popple | cd62734 | 2021-06-30 18:54:12 -0700 | [diff] [blame] | 215 | * when mapcount > 1 we need page_mlock() which can fail. |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 216 | * when !page_evictable(), we need the full redo logic of putback_lru_page to |
| 217 | * avoid leaving evictable page in unevictable list. |
| 218 | * |
| 219 | * In case of success, @page is added to @pvec and @pgrescued is incremented |
| 220 | * in case that the page was previously unevictable. @page is also unlocked. |
| 221 | */ |
| 222 | static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, |
| 223 | int *pgrescued) |
| 224 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 225 | VM_BUG_ON_PAGE(PageLRU(page), page); |
| 226 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 227 | |
| 228 | if (page_mapcount(page) <= 1 && page_evictable(page)) { |
| 229 | pagevec_add(pvec, page); |
| 230 | if (TestClearPageUnevictable(page)) |
| 231 | (*pgrescued)++; |
| 232 | unlock_page(page); |
| 233 | return true; |
| 234 | } |
| 235 | |
| 236 | return false; |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Putback multiple evictable pages to the LRU |
| 241 | * |
| 242 | * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of |
| 243 | * the pages might have meanwhile become unevictable but that is OK. |
| 244 | */ |
| 245 | static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) |
| 246 | { |
| 247 | count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); |
| 248 | /* |
| 249 | *__pagevec_lru_add() calls release_pages() so we don't call |
| 250 | * put_page() explicitly |
| 251 | */ |
| 252 | __pagevec_lru_add(pvec); |
| 253 | count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); |
| 254 | } |
| 255 | |
| 256 | /* |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 257 | * Munlock a batch of pages from the same zone |
| 258 | * |
| 259 | * The work is split to two main phases. First phase clears the Mlocked flag |
| 260 | * and attempts to isolate the pages, all under a single zone lru lock. |
| 261 | * The second phase finishes the munlock only for pages where isolation |
| 262 | * succeeded. |
| 263 | * |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 264 | * Note that the pagevec may be modified during the process. |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 265 | */ |
| 266 | static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) |
| 267 | { |
| 268 | int i; |
| 269 | int nr = pagevec_count(pvec); |
Yisheng Xie | 70feee0 | 2017-06-02 14:46:43 -0700 | [diff] [blame] | 270 | int delta_munlocked = -nr; |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 271 | struct pagevec pvec_putback; |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 272 | struct lruvec *lruvec = NULL; |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 273 | int pgrescued = 0; |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 274 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 275 | pagevec_init(&pvec_putback); |
Vlastimil Babka | 3b25df9 | 2014-01-02 12:58:44 -0800 | [diff] [blame] | 276 | |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 277 | /* Phase 1: page isolation */ |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 278 | for (i = 0; i < nr; i++) { |
| 279 | struct page *page = pvec->pages[i]; |
| 280 | |
| 281 | if (TestClearPageMlocked(page)) { |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 282 | /* |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 283 | * We already have pin from follow_page_mask() |
| 284 | * so we can spare the get_page() here. |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 285 | */ |
Alex Shi | d25b5bd | 2020-12-15 12:34:16 -0800 | [diff] [blame] | 286 | if (TestClearPageLRU(page)) { |
Alexander Duyck | 2a5e4e3 | 2020-12-15 12:34:33 -0800 | [diff] [blame] | 287 | lruvec = relock_page_lruvec_irq(page, lruvec); |
Yu Zhao | 46ae6b2 | 2021-02-24 12:08:25 -0800 | [diff] [blame] | 288 | del_page_from_lru_list(page, lruvec); |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 289 | continue; |
Alex Shi | 13805a8 | 2020-12-15 12:34:11 -0800 | [diff] [blame] | 290 | } else |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 291 | __munlock_isolation_failed(page); |
Yisheng Xie | 70feee0 | 2017-06-02 14:46:43 -0700 | [diff] [blame] | 292 | } else { |
| 293 | delta_munlocked++; |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 294 | } |
Vlastimil Babka | 01cc2e5 | 2014-01-23 15:52:50 -0800 | [diff] [blame] | 295 | |
| 296 | /* |
| 297 | * We won't be munlocking this page in the next phase |
| 298 | * but we still need to release the follow_page_mask() |
| 299 | * pin. We cannot do it under lru_lock however. If it's |
| 300 | * the last pin, __page_cache_release() would deadlock. |
| 301 | */ |
| 302 | pagevec_add(&pvec_putback, pvec->pages[i]); |
| 303 | pvec->pages[i] = NULL; |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 304 | } |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 305 | if (lruvec) { |
| 306 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); |
| 307 | unlock_page_lruvec_irq(lruvec); |
| 308 | } else if (delta_munlocked) { |
| 309 | mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); |
| 310 | } |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 311 | |
Vlastimil Babka | 3b25df9 | 2014-01-02 12:58:44 -0800 | [diff] [blame] | 312 | /* Now we can release pins of pages that we are not munlocking */ |
| 313 | pagevec_release(&pvec_putback); |
| 314 | |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 315 | /* Phase 2: page munlock */ |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 316 | for (i = 0; i < nr; i++) { |
| 317 | struct page *page = pvec->pages[i]; |
| 318 | |
| 319 | if (page) { |
| 320 | lock_page(page); |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 321 | if (!__putback_lru_fast_prepare(page, &pvec_putback, |
| 322 | &pgrescued)) { |
Vlastimil Babka | 5b40998 | 2013-09-11 14:22:33 -0700 | [diff] [blame] | 323 | /* |
| 324 | * Slow path. We don't want to lose the last |
| 325 | * pin before unlock_page() |
| 326 | */ |
| 327 | get_page(page); /* for putback_lru_page() */ |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 328 | __munlock_isolated_page(page); |
| 329 | unlock_page(page); |
Vlastimil Babka | 5b40998 | 2013-09-11 14:22:33 -0700 | [diff] [blame] | 330 | put_page(page); /* from follow_page_mask() */ |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 331 | } |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 332 | } |
| 333 | } |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 334 | |
Vlastimil Babka | 5b40998 | 2013-09-11 14:22:33 -0700 | [diff] [blame] | 335 | /* |
| 336 | * Phase 3: page putback for pages that qualified for the fast path |
| 337 | * This will also call put_page() to return pin from follow_page_mask() |
| 338 | */ |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 339 | if (pagevec_count(&pvec_putback)) |
| 340 | __putback_lru_fast(&pvec_putback, pgrescued); |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 341 | } |
Vlastimil Babka | 56afe47 | 2013-09-11 14:22:32 -0700 | [diff] [blame] | 342 | |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 343 | /* |
| 344 | * Fill up pagevec for __munlock_pagevec using pte walk |
| 345 | * |
| 346 | * The function expects that the struct page corresponding to @start address is |
| 347 | * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone. |
| 348 | * |
| 349 | * The rest of @pvec is filled by subsequent pages within the same pmd and same |
| 350 | * zone, as long as the pte's are present and vm_normal_page() succeeds. These |
| 351 | * pages also get pinned. |
| 352 | * |
| 353 | * Returns the address of the next page that should be scanned. This equals |
| 354 | * @start + PAGE_SIZE when no page could be added by the pte walk. |
| 355 | */ |
| 356 | static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, |
Joonsoo Kim | 9472f23 | 2017-09-08 16:12:59 -0700 | [diff] [blame] | 357 | struct vm_area_struct *vma, struct zone *zone, |
| 358 | unsigned long start, unsigned long end) |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 359 | { |
| 360 | pte_t *pte; |
| 361 | spinlock_t *ptl; |
| 362 | |
| 363 | /* |
| 364 | * Initialize pte walk starting at the already pinned page where we |
Vlastimil Babka | eadb41a | 2013-09-30 13:45:18 -0700 | [diff] [blame] | 365 | * are sure that there is a pte, as it was pinned under the same |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 366 | * mmap_lock write op. |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 367 | */ |
| 368 | pte = get_locked_pte(vma->vm_mm, start, &ptl); |
Vlastimil Babka | eadb41a | 2013-09-30 13:45:18 -0700 | [diff] [blame] | 369 | /* Make sure we do not cross the page table boundary */ |
| 370 | end = pgd_addr_end(start, end); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 371 | end = p4d_addr_end(start, end); |
Vlastimil Babka | eadb41a | 2013-09-30 13:45:18 -0700 | [diff] [blame] | 372 | end = pud_addr_end(start, end); |
| 373 | end = pmd_addr_end(start, end); |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 374 | |
| 375 | /* The page next to the pinned page is the first we will try to get */ |
| 376 | start += PAGE_SIZE; |
| 377 | while (start < end) { |
| 378 | struct page *page = NULL; |
| 379 | pte++; |
| 380 | if (pte_present(*pte)) |
| 381 | page = vm_normal_page(vma, start, *pte); |
| 382 | /* |
| 383 | * Break if page could not be obtained or the page's node+zone does not |
| 384 | * match |
| 385 | */ |
Joonsoo Kim | 9472f23 | 2017-09-08 16:12:59 -0700 | [diff] [blame] | 386 | if (!page || page_zone(page) != zone) |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 387 | break; |
| 388 | |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 389 | /* |
| 390 | * Do not use pagevec for PTE-mapped THP, |
| 391 | * munlock_vma_pages_range() will handle them. |
| 392 | */ |
| 393 | if (PageTransCompound(page)) |
| 394 | break; |
| 395 | |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 396 | get_page(page); |
| 397 | /* |
| 398 | * Increase the address that will be returned *before* the |
| 399 | * eventual break due to pvec becoming full by adding the page |
| 400 | */ |
| 401 | start += PAGE_SIZE; |
| 402 | if (pagevec_add(pvec, page) == 0) |
| 403 | break; |
| 404 | } |
| 405 | pte_unmap_unlock(pte, ptl); |
| 406 | return start; |
Vlastimil Babka | 7225522 | 2013-09-11 14:22:29 -0700 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | /* |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 410 | * munlock_vma_pages_range() - munlock all pages in the vma range.' |
| 411 | * @vma - vma containing range to be munlock()ed. |
| 412 | * @start - start address in @vma of the range |
| 413 | * @end - end of range in @vma. |
| 414 | * |
| 415 | * For mremap(), munmap() and exit(). |
| 416 | * |
| 417 | * Called with @vma VM_LOCKED. |
| 418 | * |
| 419 | * Returns with VM_LOCKED cleared. Callers must be prepared to |
| 420 | * deal with this. |
| 421 | * |
| 422 | * We don't save and restore VM_LOCKED here because pages are |
| 423 | * still on lru. In unmap path, pages might be scanned by reclaim |
Alistair Popple | cd62734 | 2021-06-30 18:54:12 -0700 | [diff] [blame] | 424 | * and re-mlocked by page_mlock/try_to_unmap before we unmap and |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 425 | * free them. This will result in freeing mlocked pages. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 426 | */ |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 427 | void munlock_vma_pages_range(struct vm_area_struct *vma, |
Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 428 | unsigned long start, unsigned long end) |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 429 | { |
Eric B Munson | de60f5f | 2015-11-05 18:51:36 -0800 | [diff] [blame] | 430 | vma->vm_flags &= VM_LOCKED_CLEAR_MASK; |
Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 431 | |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 432 | while (start < end) { |
Alexey Klimov | ab7a5af | 2016-01-14 15:18:24 -0800 | [diff] [blame] | 433 | struct page *page; |
Kirill A. Shutemov | 6ebb4a1 | 2017-03-09 16:17:23 -0800 | [diff] [blame] | 434 | unsigned int page_mask = 0; |
Vlastimil Babka | c424be1 | 2014-01-02 12:58:43 -0800 | [diff] [blame] | 435 | unsigned long page_increm; |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 436 | struct pagevec pvec; |
| 437 | struct zone *zone; |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 438 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 439 | pagevec_init(&pvec); |
Hugh Dickins | 6e91971 | 2009-09-21 17:03:32 -0700 | [diff] [blame] | 440 | /* |
| 441 | * Although FOLL_DUMP is intended for get_dump_page(), |
| 442 | * it just so happens that its special treatment of the |
| 443 | * ZERO_PAGE (returning an error instead of doing get_page) |
| 444 | * suits munlock very well (and if somehow an abnormal page |
| 445 | * has sneaked into the range, we won't oops here: great). |
| 446 | */ |
Kirill A. Shutemov | 6ebb4a1 | 2017-03-09 16:17:23 -0800 | [diff] [blame] | 447 | page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 448 | |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 449 | if (page && !IS_ERR(page)) { |
| 450 | if (PageTransTail(page)) { |
| 451 | VM_BUG_ON_PAGE(PageMlocked(page), page); |
| 452 | put_page(page); /* follow_page_mask() */ |
| 453 | } else if (PageTransHuge(page)) { |
| 454 | lock_page(page); |
| 455 | /* |
| 456 | * Any THP page found by follow_page_mask() may |
| 457 | * have gotten split before reaching |
Kirill A. Shutemov | 6ebb4a1 | 2017-03-09 16:17:23 -0800 | [diff] [blame] | 458 | * munlock_vma_page(), so we need to compute |
| 459 | * the page_mask here instead. |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 460 | */ |
| 461 | page_mask = munlock_vma_page(page); |
| 462 | unlock_page(page); |
| 463 | put_page(page); /* follow_page_mask() */ |
| 464 | } else { |
| 465 | /* |
| 466 | * Non-huge pages are handled in batches via |
| 467 | * pagevec. The pin from follow_page_mask() |
| 468 | * prevents them from collapsing by THP. |
| 469 | */ |
| 470 | pagevec_add(&pvec, page); |
| 471 | zone = page_zone(page); |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 472 | |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 473 | /* |
| 474 | * Try to fill the rest of pagevec using fast |
| 475 | * pte walk. This will also update start to |
| 476 | * the next page to process. Then munlock the |
| 477 | * pagevec. |
| 478 | */ |
| 479 | start = __munlock_pagevec_fill(&pvec, vma, |
Joonsoo Kim | 9472f23 | 2017-09-08 16:12:59 -0700 | [diff] [blame] | 480 | zone, start, end); |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 481 | __munlock_pagevec(&pvec, zone); |
| 482 | goto next; |
| 483 | } |
Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 484 | } |
Vlastimil Babka | c424be1 | 2014-01-02 12:58:43 -0800 | [diff] [blame] | 485 | page_increm = 1 + page_mask; |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 486 | start += page_increm * PAGE_SIZE; |
Vlastimil Babka | 7a8010c | 2013-09-11 14:22:35 -0700 | [diff] [blame] | 487 | next: |
Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 488 | cond_resched(); |
| 489 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 490 | } |
| 491 | |
| 492 | /* |
| 493 | * mlock_fixup - handle mlock[all]/munlock[all] requests. |
| 494 | * |
| 495 | * Filters out "special" vmas -- VM_LOCKED never gets set for these, and |
| 496 | * munlock is a no-op. However, for some special vmas, we go ahead and |
Michel Lespinasse | cea10a1 | 2013-02-22 16:32:44 -0800 | [diff] [blame] | 497 | * populate the ptes. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 498 | * |
| 499 | * For vmas that pass the filters, merge/split as appropriate. |
| 500 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, |
KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 502 | unsigned long start, unsigned long end, vm_flags_t newflags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | { |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 504 | struct mm_struct *mm = vma->vm_mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | pgoff_t pgoff; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 506 | int nr_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | int ret = 0; |
KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 508 | int lock = !!(newflags & VM_LOCKED); |
Simon Guo | b155b4f | 2016-10-07 16:59:40 -0700 | [diff] [blame] | 509 | vm_flags_t old_flags = vma->vm_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | |
Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 511 | if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || |
Dave Jiang | e1fb4a0 | 2018-08-17 15:43:40 -0700 | [diff] [blame] | 512 | is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || |
Mike Rapoport | 1507f51 | 2021-07-07 18:08:03 -0700 | [diff] [blame] | 513 | vma_is_dax(vma) || vma_is_secretmem(vma)) |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 514 | /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ |
| 515 | goto out; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 516 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
| 518 | *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, |
Andrea Arcangeli | 19a809a | 2015-09-04 15:46:24 -0700 | [diff] [blame] | 519 | vma->vm_file, pgoff, vma_policy(vma), |
Suren Baghdasaryan | 0fd3722 | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 520 | vma->vm_userfaultfd_ctx, anon_vma_name(vma)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | if (*prev) { |
| 522 | vma = *prev; |
| 523 | goto success; |
| 524 | } |
| 525 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | if (start != vma->vm_start) { |
| 527 | ret = split_vma(mm, vma, start, 1); |
| 528 | if (ret) |
| 529 | goto out; |
| 530 | } |
| 531 | |
| 532 | if (end != vma->vm_end) { |
| 533 | ret = split_vma(mm, vma, end, 0); |
| 534 | if (ret) |
| 535 | goto out; |
| 536 | } |
| 537 | |
| 538 | success: |
| 539 | /* |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 540 | * Keep track of amount of locked VM. |
| 541 | */ |
| 542 | nr_pages = (end - start) >> PAGE_SHIFT; |
| 543 | if (!lock) |
| 544 | nr_pages = -nr_pages; |
Simon Guo | b155b4f | 2016-10-07 16:59:40 -0700 | [diff] [blame] | 545 | else if (old_flags & VM_LOCKED) |
| 546 | nr_pages = 0; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 547 | mm->locked_vm += nr_pages; |
| 548 | |
| 549 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 550 | * vm_flags is protected by the mmap_lock held in write mode. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | * It's okay if try_to_unmap_one unmaps a page just after we |
Kirill A. Shutemov | fc05f56 | 2015-04-14 15:44:39 -0700 | [diff] [blame] | 552 | * set VM_LOCKED, populate_vma_page_range will bring it back. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | |
Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 555 | if (lock) |
Kalesh Singh | d83231e | 2024-04-22 14:24:59 -0700 | [diff] [blame] | 556 | vma->vm_flags = vma_pad_fixup_flags(vma, newflags); |
Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 557 | else |
Hugh Dickins | 408e82b | 2009-09-21 17:03:23 -0700 | [diff] [blame] | 558 | munlock_vma_pages_range(vma, start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | out: |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 561 | *prev = vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | return ret; |
| 563 | } |
| 564 | |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 565 | static int apply_vma_lock_flags(unsigned long start, size_t len, |
| 566 | vm_flags_t flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | { |
| 568 | unsigned long nstart, end, tmp; |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 569 | struct vm_area_struct *vma, *prev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | int error; |
| 571 | |
Alexander Kuleshov | 8fd9e48 | 2015-11-05 18:46:49 -0800 | [diff] [blame] | 572 | VM_BUG_ON(offset_in_page(start)); |
Michel Lespinasse | fed067d | 2011-01-13 15:46:10 -0800 | [diff] [blame] | 573 | VM_BUG_ON(len != PAGE_ALIGN(len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | end = start + len; |
| 575 | if (end < start) |
| 576 | return -EINVAL; |
| 577 | if (end == start) |
| 578 | return 0; |
Linus Torvalds | 097d591 | 2012-03-06 18:23:36 -0800 | [diff] [blame] | 579 | vma = find_vma(current->mm, start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | if (!vma || vma->vm_start > start) |
| 581 | return -ENOMEM; |
| 582 | |
Linus Torvalds | 097d591 | 2012-03-06 18:23:36 -0800 | [diff] [blame] | 583 | prev = vma->vm_prev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | if (start > vma->vm_start) |
| 585 | prev = vma; |
| 586 | |
| 587 | for (nstart = start ; ; ) { |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 588 | vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 589 | |
| 590 | newflags |= flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | |
| 592 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | tmp = vma->vm_end; |
| 594 | if (tmp > end) |
| 595 | tmp = end; |
Jianxiong Pan | 07622f0 | 2022-09-22 17:25:35 +0800 | [diff] [blame] | 596 | #ifdef CONFIG_AMLOGIC_PIN_LOCKED_FILE |
| 597 | reset_page_vma_flags(vma, flags); |
| 598 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | error = mlock_fixup(vma, &prev, nstart, tmp, newflags); |
| 600 | if (error) |
| 601 | break; |
| 602 | nstart = tmp; |
| 603 | if (nstart < prev->vm_end) |
| 604 | nstart = prev->vm_end; |
| 605 | if (nstart >= end) |
| 606 | break; |
| 607 | |
| 608 | vma = prev->vm_next; |
| 609 | if (!vma || vma->vm_start != nstart) { |
| 610 | error = -ENOMEM; |
| 611 | break; |
| 612 | } |
| 613 | } |
| 614 | return error; |
| 615 | } |
| 616 | |
Simon Guo | 0cf2f6f | 2016-10-07 16:59:36 -0700 | [diff] [blame] | 617 | /* |
| 618 | * Go through vma areas and sum size of mlocked |
| 619 | * vma pages, as return value. |
| 620 | * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) |
| 621 | * is also counted. |
| 622 | * Return value: previously mlocked page counts |
| 623 | */ |
swkhack | 0874bb4 | 2019-06-13 15:56:08 -0700 | [diff] [blame] | 624 | static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, |
Simon Guo | 0cf2f6f | 2016-10-07 16:59:36 -0700 | [diff] [blame] | 625 | unsigned long start, size_t len) |
| 626 | { |
| 627 | struct vm_area_struct *vma; |
swkhack | 0874bb4 | 2019-06-13 15:56:08 -0700 | [diff] [blame] | 628 | unsigned long count = 0; |
Simon Guo | 0cf2f6f | 2016-10-07 16:59:36 -0700 | [diff] [blame] | 629 | |
| 630 | if (mm == NULL) |
| 631 | mm = current->mm; |
| 632 | |
| 633 | vma = find_vma(mm, start); |
| 634 | if (vma == NULL) |
Miaohe Lin | 48b03ee | 2021-02-25 17:17:49 -0800 | [diff] [blame] | 635 | return 0; |
Simon Guo | 0cf2f6f | 2016-10-07 16:59:36 -0700 | [diff] [blame] | 636 | |
| 637 | for (; vma ; vma = vma->vm_next) { |
| 638 | if (start >= vma->vm_end) |
| 639 | continue; |
| 640 | if (start + len <= vma->vm_start) |
| 641 | break; |
| 642 | if (vma->vm_flags & VM_LOCKED) { |
| 643 | if (start > vma->vm_start) |
| 644 | count -= (start - vma->vm_start); |
| 645 | if (start + len < vma->vm_end) { |
| 646 | count += start + len - vma->vm_start; |
| 647 | break; |
| 648 | } |
| 649 | count += vma->vm_end - vma->vm_start; |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | return count >> PAGE_SHIFT; |
| 654 | } |
| 655 | |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 656 | static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | { |
| 658 | unsigned long locked; |
| 659 | unsigned long lock_limit; |
| 660 | int error = -ENOMEM; |
| 661 | |
Andrey Konovalov | 057d3389 | 2019-09-25 16:48:30 -0700 | [diff] [blame] | 662 | start = untagged_addr(start); |
| 663 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | if (!can_do_mlock()) |
| 665 | return -EPERM; |
| 666 | |
Alexander Kuleshov | 8fd9e48 | 2015-11-05 18:46:49 -0800 | [diff] [blame] | 667 | len = PAGE_ALIGN(len + (offset_in_page(start))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | start &= PAGE_MASK; |
| 669 | |
Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 670 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | lock_limit >>= PAGE_SHIFT; |
Davidlohr Bueso | 1f1cd70 | 2014-01-21 15:49:16 -0800 | [diff] [blame] | 672 | locked = len >> PAGE_SHIFT; |
| 673 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 674 | if (mmap_write_lock_killable(current->mm)) |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 675 | return -EINTR; |
Davidlohr Bueso | 1f1cd70 | 2014-01-21 15:49:16 -0800 | [diff] [blame] | 676 | |
| 677 | locked += current->mm->locked_vm; |
Simon Guo | 0cf2f6f | 2016-10-07 16:59:36 -0700 | [diff] [blame] | 678 | if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { |
| 679 | /* |
| 680 | * It is possible that the regions requested intersect with |
| 681 | * previously mlocked areas, that part area in "mm->locked_vm" |
| 682 | * should not be counted to new mlock increment count. So check |
| 683 | * and adjust locked count if necessary. |
| 684 | */ |
| 685 | locked -= count_mm_mlocked_page_nr(current->mm, |
| 686 | start, len); |
| 687 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | |
| 689 | /* check against resource limits */ |
| 690 | if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 691 | error = apply_vma_lock_flags(start, len, flags); |
Davidlohr Bueso | 1f1cd70 | 2014-01-21 15:49:16 -0800 | [diff] [blame] | 692 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 693 | mmap_write_unlock(current->mm); |
Kirill A. Shutemov | c561259 | 2015-04-14 15:44:42 -0700 | [diff] [blame] | 694 | if (error) |
| 695 | return error; |
| 696 | |
Jianxiong Pan | 07622f0 | 2022-09-22 17:25:35 +0800 | [diff] [blame] | 697 | #ifndef CONFIG_AMLOGIC_PIN_LOCKED_FILE_V2 |
Kirill A. Shutemov | c561259 | 2015-04-14 15:44:42 -0700 | [diff] [blame] | 698 | error = __mm_populate(start, len, 0); |
| 699 | if (error) |
| 700 | return __mlock_posix_error_return(error); |
Jianxiong Pan | 07622f0 | 2022-09-22 17:25:35 +0800 | [diff] [blame] | 701 | #endif |
Kirill A. Shutemov | c561259 | 2015-04-14 15:44:42 -0700 | [diff] [blame] | 702 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | } |
| 704 | |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 705 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) |
| 706 | { |
| 707 | return do_mlock(start, len, VM_LOCKED); |
| 708 | } |
| 709 | |
Eric B Munson | a8ca5d0 | 2015-11-05 18:51:33 -0800 | [diff] [blame] | 710 | SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) |
| 711 | { |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 712 | vm_flags_t vm_flags = VM_LOCKED; |
| 713 | |
| 714 | if (flags & ~MLOCK_ONFAULT) |
Eric B Munson | a8ca5d0 | 2015-11-05 18:51:33 -0800 | [diff] [blame] | 715 | return -EINVAL; |
| 716 | |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 717 | if (flags & MLOCK_ONFAULT) |
| 718 | vm_flags |= VM_LOCKONFAULT; |
| 719 | |
| 720 | return do_mlock(start, len, vm_flags); |
Eric B Munson | a8ca5d0 | 2015-11-05 18:51:33 -0800 | [diff] [blame] | 721 | } |
| 722 | |
Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 723 | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | { |
| 725 | int ret; |
| 726 | |
Andrey Konovalov | 057d3389 | 2019-09-25 16:48:30 -0700 | [diff] [blame] | 727 | start = untagged_addr(start); |
| 728 | |
Alexander Kuleshov | 8fd9e48 | 2015-11-05 18:46:49 -0800 | [diff] [blame] | 729 | len = PAGE_ALIGN(len + (offset_in_page(start))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | start &= PAGE_MASK; |
Davidlohr Bueso | 1f1cd70 | 2014-01-21 15:49:16 -0800 | [diff] [blame] | 731 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 732 | if (mmap_write_lock_killable(current->mm)) |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 733 | return -EINTR; |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 734 | ret = apply_vma_lock_flags(start, len, 0); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 735 | mmap_write_unlock(current->mm); |
Davidlohr Bueso | 1f1cd70 | 2014-01-21 15:49:16 -0800 | [diff] [blame] | 736 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | return ret; |
| 738 | } |
| 739 | |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 740 | /* |
| 741 | * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) |
| 742 | * and translate into the appropriate modifications to mm->def_flags and/or the |
| 743 | * flags for all current VMAs. |
| 744 | * |
| 745 | * There are a couple of subtleties with this. If mlockall() is called multiple |
| 746 | * times with different flags, the values do not necessarily stack. If mlockall |
| 747 | * is called once including the MCL_FUTURE flag and then a second time without |
| 748 | * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. |
| 749 | */ |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 750 | static int apply_mlockall_flags(int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | { |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 752 | struct vm_area_struct *vma, *prev = NULL; |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 753 | vm_flags_t to_add = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 755 | current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; |
| 756 | if (flags & MCL_FUTURE) { |
Michel Lespinasse | 09a9f1d | 2013-03-28 16:26:23 -0700 | [diff] [blame] | 757 | current->mm->def_flags |= VM_LOCKED; |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 758 | |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 759 | if (flags & MCL_ONFAULT) |
| 760 | current->mm->def_flags |= VM_LOCKONFAULT; |
| 761 | |
| 762 | if (!(flags & MCL_CURRENT)) |
| 763 | goto out; |
| 764 | } |
| 765 | |
| 766 | if (flags & MCL_CURRENT) { |
| 767 | to_add |= VM_LOCKED; |
| 768 | if (flags & MCL_ONFAULT) |
| 769 | to_add |= VM_LOCKONFAULT; |
| 770 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | |
| 772 | for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { |
KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 773 | vm_flags_t newflags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | |
Eric B Munson | b0f205c | 2015-11-05 18:51:39 -0800 | [diff] [blame] | 775 | newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; |
| 776 | newflags |= to_add; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | |
| 778 | /* Ignore errors */ |
| 779 | mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); |
Paul E. McKenney | 50d4fb7 | 2017-10-24 08:22:18 -0700 | [diff] [blame] | 780 | cond_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | } |
| 782 | out: |
| 783 | return 0; |
| 784 | } |
| 785 | |
Heiko Carstens | 3480b25 | 2009-01-14 14:14:16 +0100 | [diff] [blame] | 786 | SYSCALL_DEFINE1(mlockall, int, flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | { |
| 788 | unsigned long lock_limit; |
Alexey Klimov | 86d2adcc | 2015-11-05 18:46:00 -0800 | [diff] [blame] | 789 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | |
Potyra, Stefan | dedca63 | 2019-06-13 15:55:55 -0700 | [diff] [blame] | 791 | if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) || |
| 792 | flags == MCL_ONFAULT) |
Alexey Klimov | 86d2adcc | 2015-11-05 18:46:00 -0800 | [diff] [blame] | 793 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | if (!can_do_mlock()) |
Alexey Klimov | 86d2adcc | 2015-11-05 18:46:00 -0800 | [diff] [blame] | 796 | return -EPERM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | |
Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 798 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | lock_limit >>= PAGE_SHIFT; |
| 800 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 801 | if (mmap_write_lock_killable(current->mm)) |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 802 | return -EINTR; |
Davidlohr Bueso | 1f1cd70 | 2014-01-21 15:49:16 -0800 | [diff] [blame] | 803 | |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 804 | ret = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || |
| 806 | capable(CAP_IPC_LOCK)) |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 807 | ret = apply_mlockall_flags(flags); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 808 | mmap_write_unlock(current->mm); |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 809 | if (!ret && (flags & MCL_CURRENT)) |
| 810 | mm_populate(0, TASK_SIZE); |
Alexey Klimov | 86d2adcc | 2015-11-05 18:46:00 -0800 | [diff] [blame] | 811 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | return ret; |
| 813 | } |
| 814 | |
Heiko Carstens | 3480b25 | 2009-01-14 14:14:16 +0100 | [diff] [blame] | 815 | SYSCALL_DEFINE0(munlockall) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | { |
| 817 | int ret; |
| 818 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 819 | if (mmap_write_lock_killable(current->mm)) |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 820 | return -EINTR; |
Eric B Munson | 1aab92ec | 2015-11-05 18:51:29 -0800 | [diff] [blame] | 821 | ret = apply_mlockall_flags(0); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 822 | mmap_write_unlock(current->mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | return ret; |
| 824 | } |
| 825 | |
| 826 | /* |
| 827 | * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB |
| 828 | * shm segments) get accounted against the user_struct instead. |
| 829 | */ |
| 830 | static DEFINE_SPINLOCK(shmlock_user_lock); |
| 831 | |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 832 | int user_shm_lock(size_t size, struct ucounts *ucounts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | { |
| 834 | unsigned long lock_limit, locked; |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 835 | long memlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | int allowed = 0; |
| 837 | |
| 838 | locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 839 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
Miaohe Lin | 025a7cc | 2022-03-22 16:09:18 +0800 | [diff] [blame] | 840 | if (lock_limit != RLIM_INFINITY) |
| 841 | lock_limit >>= PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | spin_lock(&shmlock_user_lock); |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 843 | memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); |
| 844 | |
Miaohe Lin | 025a7cc | 2022-03-22 16:09:18 +0800 | [diff] [blame] | 845 | if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) { |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 846 | dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | goto out; |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 848 | } |
| 849 | if (!get_ucounts(ucounts)) { |
| 850 | dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); |
Miaohe Lin | 954c78e | 2022-03-22 14:44:56 -0700 | [diff] [blame] | 851 | allowed = 0; |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 852 | goto out; |
| 853 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | allowed = 1; |
| 855 | out: |
| 856 | spin_unlock(&shmlock_user_lock); |
| 857 | return allowed; |
| 858 | } |
| 859 | |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 860 | void user_shm_unlock(size_t size, struct ucounts *ucounts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | { |
| 862 | spin_lock(&shmlock_user_lock); |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 863 | dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | spin_unlock(&shmlock_user_lock); |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 865 | put_ucounts(ucounts); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | } |