Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* internal.h: mm/ internal definitions |
| 3 | * |
| 4 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 7 | #ifndef __MM_INTERNAL_H |
| 8 | #define __MM_INTERNAL_H |
| 9 | |
Fabian Frederick | 29f175d | 2014-04-07 15:37:55 -0700 | [diff] [blame] | 10 | #include <linux/fs.h> |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 11 | #include <linux/mm.h> |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 12 | #include <linux/pagemap.h> |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 13 | #include <linux/tracepoint-defs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 15 | /* |
| 16 | * The set of flags that only affect watermark checking and reclaim |
| 17 | * behaviour. This is used by the MM to obey the caller constraints |
| 18 | * about IO, FS and watermark checking while ignoring placement |
| 19 | * hints such as HIGHMEM usage. |
| 20 | */ |
| 21 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 22 | __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
Mel Gorman | e838a45 | 2016-06-24 14:49:37 -0700 | [diff] [blame] | 23 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
| 24 | __GFP_ATOMIC) |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 25 | |
| 26 | /* The GFP flags allowed during early boot */ |
| 27 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) |
| 28 | |
| 29 | /* Control allocation cpuset and node placement constraints */ |
| 30 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
| 31 | |
| 32 | /* Do not use these with a slab allocator */ |
| 33 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
| 34 | |
Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 35 | void page_writeback_init(void); |
| 36 | |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 37 | vm_fault_t do_swap_page(struct vm_fault *vmf); |
Ebru Akagunduz | 8a966ed | 2016-07-26 15:25:03 -0700 | [diff] [blame] | 38 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 39 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
| 40 | unsigned long floor, unsigned long ceiling); |
| 41 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 42 | static inline bool can_madv_lru_vma(struct vm_area_struct *vma) |
Kirill A. Shutemov | 2351907 | 2017-02-22 15:46:39 -0800 | [diff] [blame] | 43 | { |
| 44 | return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); |
| 45 | } |
| 46 | |
Michal Hocko | aac4536 | 2016-03-25 14:20:24 -0700 | [diff] [blame] | 47 | void unmap_page_range(struct mmu_gather *tlb, |
| 48 | struct vm_area_struct *vma, |
| 49 | unsigned long addr, unsigned long end, |
| 50 | struct zap_details *details); |
| 51 | |
David Howells | 7b3df3b | 2020-10-15 20:06:24 -0700 | [diff] [blame] | 52 | void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read, |
| 53 | unsigned long lookahead_size); |
Matthew Wilcox (Oracle) | fcd9ae4 | 2021-04-07 21:18:55 +0100 | [diff] [blame] | 54 | void force_page_cache_ra(struct readahead_control *, unsigned long nr); |
David Howells | 7b3df3b | 2020-10-15 20:06:24 -0700 | [diff] [blame] | 55 | static inline void force_page_cache_readahead(struct address_space *mapping, |
| 56 | struct file *file, pgoff_t index, unsigned long nr_to_read) |
| 57 | { |
Matthew Wilcox (Oracle) | fcd9ae4 | 2021-04-07 21:18:55 +0100 | [diff] [blame] | 58 | DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); |
| 59 | force_page_cache_ra(&ractl, nr_to_read); |
David Howells | 7b3df3b | 2020-10-15 20:06:24 -0700 | [diff] [blame] | 60 | } |
Fabian Frederick | 29f175d | 2014-04-07 15:37:55 -0700 | [diff] [blame] | 61 | |
Matthew Wilcox (Oracle) | 5c211ba | 2021-02-25 17:15:56 -0800 | [diff] [blame] | 62 | unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, |
| 63 | pgoff_t end, struct pagevec *pvec, pgoff_t *indices); |
| 64 | |
Yang Shi | 1eb6234 | 2020-04-01 21:06:20 -0700 | [diff] [blame] | 65 | /** |
| 66 | * page_evictable - test whether a page is evictable |
| 67 | * @page: the page to test |
| 68 | * |
| 69 | * Test whether page is evictable--i.e., should be placed on active/inactive |
| 70 | * lists vs unevictable list. |
| 71 | * |
| 72 | * Reasons page might not be evictable: |
| 73 | * (1) page's mapping marked unevictable |
| 74 | * (2) page is part of an mlocked VMA |
| 75 | * |
| 76 | */ |
| 77 | static inline bool page_evictable(struct page *page) |
| 78 | { |
| 79 | bool ret; |
| 80 | |
| 81 | /* Prevent address_space of inode and swap cache from being freed */ |
| 82 | rcu_read_lock(); |
| 83 | ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); |
| 84 | rcu_read_unlock(); |
| 85 | return ret; |
| 86 | } |
| 87 | |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 88 | /* |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 89 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 90 | * a count of one. |
| 91 | */ |
| 92 | static inline void set_page_refcounted(struct page *page) |
| 93 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 94 | VM_BUG_ON_PAGE(PageTail(page), page); |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 95 | VM_BUG_ON_PAGE(page_ref_count(page), page); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 96 | set_page_count(page, 1); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 97 | } |
| 98 | |
Aili Yao | d3378e8 | 2021-04-09 13:27:19 -0700 | [diff] [blame] | 99 | /* |
| 100 | * When kernel touch the user page, the user page may be have been marked |
| 101 | * poison but still mapped in user space, if without this page, the kernel |
| 102 | * can guarantee the data integrity and operation success, the kernel is |
| 103 | * better to check the posion status and avoid touching it, be good not to |
| 104 | * panic, coredump for process fatal signal is a sample case matching this |
| 105 | * scenario. Or if kernel can't guarantee the data integrity, it's better |
| 106 | * not to call this function, let kernel touch the poison page and get to |
| 107 | * panic. |
| 108 | */ |
| 109 | static inline bool is_page_poisoned(struct page *page) |
| 110 | { |
| 111 | if (PageHWPoison(page)) |
| 112 | return true; |
| 113 | else if (PageHuge(page) && PageHWPoison(compound_head(page))) |
| 114 | return true; |
| 115 | |
| 116 | return false; |
| 117 | } |
| 118 | |
Hugh Dickins | 03f6462 | 2009-09-21 17:03:35 -0700 | [diff] [blame] | 119 | extern unsigned long highest_memmap_pfn; |
| 120 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 121 | /* |
Johannes Weiner | c73322d | 2017-05-03 14:51:51 -0700 | [diff] [blame] | 122 | * Maximum number of reclaim retries without progress before the OOM |
| 123 | * killer is consider the only way forward. |
| 124 | */ |
| 125 | #define MAX_RECLAIM_RETRIES 16 |
| 126 | |
| 127 | /* |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 128 | * in mm/vmscan.c: |
| 129 | */ |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 130 | extern int isolate_lru_page(struct page *page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 131 | extern void putback_lru_page(struct page *page); |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 132 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 133 | /* |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 134 | * in mm/rmap.c: |
| 135 | */ |
| 136 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); |
| 137 | |
| 138 | /* |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 139 | * in mm/page_alloc.c |
| 140 | */ |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 141 | |
| 142 | /* |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 143 | * Structure for holding the mostly immutable allocation parameters passed |
| 144 | * between functions involved in allocations, including the alloc_pages* |
| 145 | * family of functions. |
| 146 | * |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 147 | * nodemask, migratetype and highest_zoneidx are initialized only once in |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 148 | * __alloc_pages_nodemask() and then never change. |
| 149 | * |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 150 | * zonelist, preferred_zone and highest_zoneidx are set first in |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 151 | * __alloc_pages_nodemask() for the fast path, and might be later changed |
Ethon Paul | 68956cc | 2020-06-04 16:49:31 -0700 | [diff] [blame] | 152 | * in __alloc_pages_slowpath(). All other functions pass the whole structure |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 153 | * by a const pointer. |
| 154 | */ |
| 155 | struct alloc_context { |
| 156 | struct zonelist *zonelist; |
| 157 | nodemask_t *nodemask; |
Mel Gorman | c33d6c0 | 2016-05-19 17:14:10 -0700 | [diff] [blame] | 158 | struct zoneref *preferred_zoneref; |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 159 | int migratetype; |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 160 | |
| 161 | /* |
| 162 | * highest_zoneidx represents highest usable zone index of |
| 163 | * the allocation request. Due to the nature of the zone, |
| 164 | * memory on lower zone than the highest_zoneidx will be |
| 165 | * protected by lowmem_reserve[highest_zoneidx]. |
| 166 | * |
| 167 | * highest_zoneidx is also used by reclaim/compaction to limit |
| 168 | * the target zone since higher zone than this index cannot be |
| 169 | * usable for this allocation request. |
| 170 | */ |
| 171 | enum zone_type highest_zoneidx; |
Mel Gorman | c9ab0c4 | 2015-11-06 16:28:12 -0800 | [diff] [blame] | 172 | bool spread_dirty_pages; |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 173 | }; |
| 174 | |
| 175 | /* |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 176 | * Locate the struct page for both the matching buddy in our |
| 177 | * pair (buddy1) and the combined O(n+1) page they form (page). |
| 178 | * |
| 179 | * 1) Any buddy B1 will have an order O twin B2 which satisfies |
| 180 | * the following equation: |
| 181 | * B2 = B1 ^ (1 << O) |
| 182 | * For example, if the starting buddy (buddy2) is #8 its order |
| 183 | * 1 buddy is #10: |
| 184 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 |
| 185 | * |
| 186 | * 2) Any buddy B will have an order O+1 parent P which |
| 187 | * satisfies the following equation: |
| 188 | * P = B & ~(1 << O) |
| 189 | * |
| 190 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER |
| 191 | */ |
| 192 | static inline unsigned long |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 193 | __find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 194 | { |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 195 | return page_pfn ^ (1 << order); |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 196 | } |
| 197 | |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 198 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
| 199 | unsigned long end_pfn, struct zone *zone); |
| 200 | |
| 201 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, |
| 202 | unsigned long end_pfn, struct zone *zone) |
| 203 | { |
| 204 | if (zone->contiguous) |
| 205 | return pfn_to_page(start_pfn); |
| 206 | |
| 207 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); |
| 208 | } |
| 209 | |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 210 | extern int __isolate_free_page(struct page *page, unsigned int order); |
Alexander Duyck | 624f58d | 2020-04-06 20:04:53 -0700 | [diff] [blame] | 211 | extern void __putback_isolated_page(struct page *page, unsigned int order, |
| 212 | int mt); |
Mike Rapoport | 7c2ee34 | 2018-10-30 15:09:36 -0700 | [diff] [blame] | 213 | extern void memblock_free_pages(struct page *page, unsigned long pfn, |
Mel Gorman | d70ddd7 | 2015-06-30 14:56:52 -0700 | [diff] [blame] | 214 | unsigned int order); |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame] | 215 | extern void __free_pages_core(struct page *page, unsigned int order); |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 216 | extern void prep_compound_page(struct page *page, unsigned int order); |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 217 | extern void post_alloc_hook(struct page *page, unsigned int order, |
| 218 | gfp_t gfp_flags); |
Han Pingtian | 42aa83c | 2014-01-23 15:53:28 -0800 | [diff] [blame] | 219 | extern int user_min_free_kbytes; |
Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 220 | |
Matthew Wilcox (Oracle) | 0966aeb | 2020-12-14 19:08:02 -0800 | [diff] [blame] | 221 | extern void free_unref_page(struct page *page); |
| 222 | extern void free_unref_page_list(struct list_head *list); |
| 223 | |
Mel Gorman | 6826539 | 2019-11-30 17:55:15 -0800 | [diff] [blame] | 224 | extern void zone_pcp_update(struct zone *zone); |
| 225 | extern void zone_pcp_reset(struct zone *zone); |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 226 | extern void zone_pcp_disable(struct zone *zone); |
| 227 | extern void zone_pcp_enable(struct zone *zone); |
Mel Gorman | 6826539 | 2019-11-30 17:55:15 -0800 | [diff] [blame] | 228 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 229 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 230 | |
| 231 | /* |
| 232 | * in mm/compaction.c |
| 233 | */ |
| 234 | /* |
| 235 | * compact_control is used to track pages being migrated and the free pages |
| 236 | * they are being migrated to during memory compaction. The free_pfn starts |
| 237 | * at the end of a zone and migrate_pfn begins at the start. Movable pages |
| 238 | * are moved to the end of a zone during a compaction run and the run |
| 239 | * completes when free_pfn <= migrate_pfn |
| 240 | */ |
| 241 | struct compact_control { |
| 242 | struct list_head freepages; /* List of free pages to migrate to */ |
| 243 | struct list_head migratepages; /* List of pages being migrated */ |
Mel Gorman | c5fbd93 | 2019-03-05 15:44:25 -0800 | [diff] [blame] | 244 | unsigned int nr_freepages; /* Number of isolated free pages */ |
| 245 | unsigned int nr_migratepages; /* Number of pages to migrate */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 246 | unsigned long free_pfn; /* isolate_freepages search base */ |
| 247 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 248 | unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
Mel Gorman | c5943b9 | 2019-03-05 15:44:28 -0800 | [diff] [blame] | 249 | struct zone *zone; |
| 250 | unsigned long total_migrate_scanned; |
| 251 | unsigned long total_free_scanned; |
Mel Gorman | dbe2d4e | 2019-03-05 15:45:31 -0800 | [diff] [blame] | 252 | unsigned short fast_search_fail;/* failures to use free list searches */ |
| 253 | short search_order; /* order to start a fast search at */ |
Vlastimil Babka | f25ba6d | 2017-05-08 15:54:30 -0700 | [diff] [blame] | 254 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
| 255 | int order; /* order a direct compactor needs */ |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 256 | int migratetype; /* migratetype of direct compactor */ |
Vlastimil Babka | f25ba6d | 2017-05-08 15:54:30 -0700 | [diff] [blame] | 257 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 258 | const int highest_zoneidx; /* zone index of a direct compactor */ |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 259 | enum migrate_mode mode; /* Async or sync migration mode */ |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 260 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
Vlastimil Babka | 2583d67 | 2017-11-17 15:26:38 -0800 | [diff] [blame] | 261 | bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 262 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 263 | bool direct_compaction; /* False from kcompactd or /proc/... */ |
Nitin Gupta | facdaa9 | 2020-08-11 18:31:00 -0700 | [diff] [blame] | 264 | bool proactive_compaction; /* kcompactd proactive compaction */ |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 265 | bool whole_zone; /* Whole zone should/has been scanned */ |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 266 | bool contended; /* Signal lock or sched contention */ |
Mel Gorman | 804d312 | 2019-03-05 15:45:07 -0800 | [diff] [blame] | 267 | bool rescan; /* Rescanning the same pageblock */ |
Rik van Riel | b06eda0 | 2020-04-01 21:10:28 -0700 | [diff] [blame] | 268 | bool alloc_contig; /* alloc_contig_range allocation */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 269 | }; |
| 270 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 271 | /* |
| 272 | * Used in direct compaction when a page should be taken from the freelists |
| 273 | * immediately when one is created during the free path. |
| 274 | */ |
| 275 | struct capture_control { |
| 276 | struct compact_control *cc; |
| 277 | struct page *page; |
| 278 | }; |
| 279 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 280 | unsigned long |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 281 | isolate_freepages_range(struct compact_control *cc, |
| 282 | unsigned long start_pfn, unsigned long end_pfn); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 283 | unsigned long |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 284 | isolate_migratepages_range(struct compact_control *cc, |
| 285 | unsigned long low_pfn, unsigned long end_pfn); |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 286 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
| 287 | int migratetype, bool only_stealable, bool *can_steal); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 288 | |
| 289 | #endif |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 290 | |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 291 | /* |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 292 | * This function returns the order of a free page in the buddy system. In |
| 293 | * general, page_zone(page)->lock must be held by the caller to prevent the |
| 294 | * page from being allocated in parallel and returning garbage as the order. |
| 295 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 296 | * page cannot be allocated or merged in parallel. Alternatively, it must |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 297 | * handle invalid values gracefully, and use buddy_order_unsafe() below. |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 298 | */ |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 299 | static inline unsigned int buddy_order(struct page *page) |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 300 | { |
KAMEZAWA Hiroyuki | 572438f | 2010-10-26 14:22:08 -0700 | [diff] [blame] | 301 | /* PageBuddy() must be checked by the caller */ |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 302 | return page_private(page); |
| 303 | } |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 304 | |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 305 | /* |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 306 | * Like buddy_order(), but for callers who cannot afford to hold the zone lock. |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 307 | * PageBuddy() should be checked first by the caller to minimize race window, |
| 308 | * and invalid values must be handled gracefully. |
| 309 | * |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 310 | * READ_ONCE is used so that if the caller assigns the result into a local |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 311 | * variable and e.g. tests it for valid range before using, the compiler cannot |
| 312 | * decide to remove the variable and inline the page_private(page) multiple |
| 313 | * times, potentially observing different values in the tests and the actual |
| 314 | * use of the result. |
| 315 | */ |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 316 | #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 317 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 318 | /* |
| 319 | * These three helpers classifies VMAs for virtual memory accounting. |
| 320 | */ |
| 321 | |
| 322 | /* |
| 323 | * Executable code area - executable, not writable, not stack |
| 324 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 325 | static inline bool is_exec_mapping(vm_flags_t flags) |
| 326 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 327 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 328 | } |
| 329 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 330 | /* |
| 331 | * Stack area - atomatically grows in one direction |
| 332 | * |
| 333 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: |
| 334 | * do_mmap() forbids all other combinations. |
| 335 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 336 | static inline bool is_stack_mapping(vm_flags_t flags) |
| 337 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 338 | return (flags & VM_STACK) == VM_STACK; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 339 | } |
| 340 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 341 | /* |
| 342 | * Data area - private, writable, not stack |
| 343 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 344 | static inline bool is_data_mapping(vm_flags_t flags) |
| 345 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 346 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 347 | } |
| 348 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 349 | /* mm/util.c */ |
| 350 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
Wei Yang | aba6dfb | 2019-11-30 17:50:53 -0800 | [diff] [blame] | 351 | struct vm_area_struct *prev); |
Wei Yang | 1b9fc5b2 | 2019-11-30 17:50:49 -0800 | [diff] [blame] | 352 | void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 353 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 354 | #ifdef CONFIG_MMU |
Kirill A. Shutemov | fc05f56 | 2015-04-14 15:44:39 -0700 | [diff] [blame] | 355 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
Michel Lespinasse | cea10a1 | 2013-02-22 16:32:44 -0800 | [diff] [blame] | 356 | unsigned long start, unsigned long end, int *nonblocking); |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 357 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
| 358 | unsigned long start, unsigned long end); |
| 359 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
| 360 | { |
| 361 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
| 362 | } |
| 363 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 364 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 365 | * must be called with vma's mmap_lock held for read or write, and page locked. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 366 | */ |
| 367 | extern void mlock_vma_page(struct page *page); |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 368 | extern unsigned int munlock_vma_page(struct page *page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 369 | |
| 370 | /* |
| 371 | * Clear the page's PageMlocked(). This can be useful in a situation where |
| 372 | * we want to unconditionally remove a page from the pagecache -- e.g., |
| 373 | * on truncation or freeing. |
| 374 | * |
| 375 | * It is legal to call this function for any page, mlocked or not. |
| 376 | * If called for a page that is still mapped by mlocked vmas, all we do |
| 377 | * is revert to lazy LRU behaviour -- semantics are not broken. |
| 378 | */ |
Hugh Dickins | e6c509f | 2012-10-08 16:33:19 -0700 | [diff] [blame] | 379 | extern void clear_page_mlock(struct page *page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 380 | |
| 381 | /* |
Hugh Dickins | 51afb12 | 2015-11-05 18:49:37 -0800 | [diff] [blame] | 382 | * mlock_migrate_page - called only from migrate_misplaced_transhuge_page() |
| 383 | * (because that does not go through the full procedure of migration ptes): |
| 384 | * to migrate the Mlocked page flag; update statistics. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 385 | */ |
| 386 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) |
| 387 | { |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 388 | if (TestClearPageMlocked(page)) { |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 389 | int nr_pages = thp_nr_pages(page); |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 390 | |
Hugh Dickins | 51afb12 | 2015-11-05 18:49:37 -0800 | [diff] [blame] | 391 | /* Holding pmd lock, no change in irq context: __mod is safe */ |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 392 | __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 393 | SetPageMlocked(newpage); |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 394 | __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 395 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 396 | } |
| 397 | |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 398 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 399 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 400 | /* |
| 401 | * At what user virtual address is page expected in @vma? |
| 402 | */ |
| 403 | static inline unsigned long |
| 404 | __vma_address(struct page *page, struct vm_area_struct *vma) |
| 405 | { |
| 406 | pgoff_t pgoff = page_to_pgoff(page); |
| 407 | return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
| 408 | } |
| 409 | |
| 410 | static inline unsigned long |
| 411 | vma_address(struct page *page, struct vm_area_struct *vma) |
| 412 | { |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 413 | unsigned long start, end; |
| 414 | |
| 415 | start = __vma_address(page, vma); |
Matthew Wilcox (Oracle) | af3bbc1 | 2020-08-14 17:30:33 -0700 | [diff] [blame] | 416 | end = start + thp_size(page) - PAGE_SIZE; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 417 | |
| 418 | /* page should be within @vma mapping range */ |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 419 | VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 420 | |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 421 | return max(start, vma->vm_start); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 422 | } |
| 423 | |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 424 | static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, |
| 425 | struct file *fpin) |
| 426 | { |
| 427 | int flags = vmf->flags; |
| 428 | |
| 429 | if (fpin) |
| 430 | return fpin; |
| 431 | |
| 432 | /* |
| 433 | * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 434 | * anything, so we only pin the file and drop the mmap_lock if only |
Peter Xu | 4064b98 | 2020-04-01 21:08:45 -0700 | [diff] [blame] | 435 | * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 436 | */ |
Peter Xu | 4064b98 | 2020-04-01 21:08:45 -0700 | [diff] [blame] | 437 | if (fault_flag_allow_retry_first(flags) && |
| 438 | !(flags & FAULT_FLAG_RETRY_NOWAIT)) { |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 439 | fpin = get_file(vmf->vma->vm_file); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 440 | mmap_read_unlock(vmf->vma->vm_mm); |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 441 | } |
| 442 | return fpin; |
| 443 | } |
| 444 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 445 | #else /* !CONFIG_MMU */ |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 446 | static inline void clear_page_mlock(struct page *page) { } |
| 447 | static inline void mlock_vma_page(struct page *page) { } |
| 448 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
| 449 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 450 | #endif /* !CONFIG_MMU */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 451 | |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 452 | /* |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 453 | * Return the mem_map entry representing the 'offset' subpage within |
| 454 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
| 455 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
| 456 | */ |
| 457 | static inline struct page *mem_map_offset(struct page *base, int offset) |
| 458 | { |
| 459 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
Fabian Frederick | bc7f84c | 2014-08-06 16:05:17 -0700 | [diff] [blame] | 460 | return nth_page(base, offset); |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 461 | return base + offset; |
| 462 | } |
| 463 | |
| 464 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 465 | * Iterator over all subpages within the maximally aligned gigantic |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 466 | * page 'base'. Handle any discontiguity in the mem_map. |
| 467 | */ |
| 468 | static inline struct page *mem_map_next(struct page *iter, |
| 469 | struct page *base, int offset) |
| 470 | { |
| 471 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
| 472 | unsigned long pfn = page_to_pfn(base) + offset; |
| 473 | if (!pfn_valid(pfn)) |
| 474 | return NULL; |
| 475 | return pfn_to_page(pfn); |
| 476 | } |
| 477 | return iter + 1; |
| 478 | } |
| 479 | |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 480 | /* Memory initialisation debug and verification */ |
| 481 | enum mminit_level { |
| 482 | MMINIT_WARNING, |
| 483 | MMINIT_VERIFY, |
| 484 | MMINIT_TRACE |
| 485 | }; |
| 486 | |
| 487 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
| 488 | |
| 489 | extern int mminit_loglevel; |
| 490 | |
| 491 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
| 492 | do { \ |
| 493 | if (level < mminit_loglevel) { \ |
Rasmus Villemoes | fc5199d | 2015-02-12 15:00:02 -0800 | [diff] [blame] | 494 | if (level <= MMINIT_WARNING) \ |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 495 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
Rasmus Villemoes | fc5199d | 2015-02-12 15:00:02 -0800 | [diff] [blame] | 496 | else \ |
| 497 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 498 | } \ |
| 499 | } while (0) |
| 500 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 501 | extern void mminit_verify_pageflags_layout(void); |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 502 | extern void mminit_verify_zonelist(void); |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 503 | #else |
| 504 | |
| 505 | static inline void mminit_dprintk(enum mminit_level level, |
| 506 | const char *prefix, const char *fmt, ...) |
| 507 | { |
| 508 | } |
| 509 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 510 | static inline void mminit_verify_pageflags_layout(void) |
| 511 | { |
| 512 | } |
| 513 | |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 514 | static inline void mminit_verify_zonelist(void) |
| 515 | { |
| 516 | } |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 517 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 518 | |
| 519 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
| 520 | #if defined(CONFIG_SPARSEMEM) |
| 521 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 522 | unsigned long *end_pfn); |
| 523 | #else |
| 524 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 525 | unsigned long *end_pfn) |
| 526 | { |
| 527 | } |
| 528 | #endif /* CONFIG_SPARSEMEM */ |
| 529 | |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 530 | #define NODE_RECLAIM_NOSCAN -2 |
| 531 | #define NODE_RECLAIM_FULL -1 |
| 532 | #define NODE_RECLAIM_SOME 0 |
| 533 | #define NODE_RECLAIM_SUCCESS 1 |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 534 | |
Wei Yang | 8b09549 | 2018-12-28 00:34:36 -0800 | [diff] [blame] | 535 | #ifdef CONFIG_NUMA |
| 536 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); |
| 537 | #else |
| 538 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, |
| 539 | unsigned int order) |
| 540 | { |
| 541 | return NODE_RECLAIM_NOSCAN; |
| 542 | } |
| 543 | #endif |
| 544 | |
Wu Fengguang | 31d3d34 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 545 | extern int hwpoison_filter(struct page *p); |
| 546 | |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 547 | extern u32 hwpoison_filter_dev_major; |
| 548 | extern u32 hwpoison_filter_dev_minor; |
Wu Fengguang | 478c5ff | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 549 | extern u64 hwpoison_filter_flags_mask; |
| 550 | extern u64 hwpoison_filter_flags_value; |
Andi Kleen | 4fd466e | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 551 | extern u64 hwpoison_filter_memcg; |
Haicheng Li | 1bfe5fe | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 552 | extern u32 hwpoison_filter_enable; |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 553 | |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 554 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 555 | unsigned long, unsigned long, |
Michal Hocko | 9fbeb5a | 2016-05-23 16:25:30 -0700 | [diff] [blame] | 556 | unsigned long, unsigned long); |
Xishi Qiu | ca57df7 | 2012-07-31 16:43:19 -0700 | [diff] [blame] | 557 | |
| 558 | extern void set_pageblock_order(void); |
Maninder Singh | 730ec8c | 2020-06-03 16:01:18 -0700 | [diff] [blame] | 559 | unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 560 | struct list_head *page_list); |
Bartlomiej Zolnierkiewicz | d95ea5d | 2012-10-08 16:32:05 -0700 | [diff] [blame] | 561 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
| 562 | #define ALLOC_WMARK_MIN WMARK_MIN |
| 563 | #define ALLOC_WMARK_LOW WMARK_LOW |
| 564 | #define ALLOC_WMARK_HIGH WMARK_HIGH |
| 565 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ |
| 566 | |
| 567 | /* Mask to get the watermark bits */ |
| 568 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
| 569 | |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 570 | /* |
| 571 | * Only MMU archs have async oom victim reclaim - aka oom_reaper so we |
| 572 | * cannot assume a reduced access to memory reserves is sufficient for |
| 573 | * !MMU |
| 574 | */ |
| 575 | #ifdef CONFIG_MMU |
| 576 | #define ALLOC_OOM 0x08 |
| 577 | #else |
| 578 | #define ALLOC_OOM ALLOC_NO_WATERMARKS |
| 579 | #endif |
| 580 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 581 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
| 582 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
| 583 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
| 584 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
| 585 | #ifdef CONFIG_ZONE_DMA32 |
| 586 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ |
| 587 | #else |
| 588 | #define ALLOC_NOFRAGMENT 0x0 |
| 589 | #endif |
Mateusz Nosek | 736838e | 2020-04-01 21:09:47 -0700 | [diff] [blame] | 590 | #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ |
Bartlomiej Zolnierkiewicz | d95ea5d | 2012-10-08 16:32:05 -0700 | [diff] [blame] | 591 | |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 592 | enum ttu_flags; |
| 593 | struct tlbflush_unmap_batch; |
| 594 | |
Michal Hocko | ce61287 | 2017-04-07 16:05:05 -0700 | [diff] [blame] | 595 | |
| 596 | /* |
| 597 | * only for MM internal work items which do not depend on |
| 598 | * any allocations or locks which might depend on allocations |
| 599 | */ |
| 600 | extern struct workqueue_struct *mm_percpu_wq; |
| 601 | |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 602 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| 603 | void try_to_unmap_flush(void); |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 604 | void try_to_unmap_flush_dirty(void); |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 605 | void flush_tlb_batched_pending(struct mm_struct *mm); |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 606 | #else |
| 607 | static inline void try_to_unmap_flush(void) |
| 608 | { |
| 609 | } |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 610 | static inline void try_to_unmap_flush_dirty(void) |
| 611 | { |
| 612 | } |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 613 | static inline void flush_tlb_batched_pending(struct mm_struct *mm) |
| 614 | { |
| 615 | } |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 616 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 617 | |
| 618 | extern const struct trace_print_flags pageflag_names[]; |
| 619 | extern const struct trace_print_flags vmaflag_names[]; |
| 620 | extern const struct trace_print_flags gfpflag_names[]; |
| 621 | |
Xishi Qiu | a6ffdc0 | 2017-05-03 14:52:52 -0700 | [diff] [blame] | 622 | static inline bool is_migrate_highatomic(enum migratetype migratetype) |
| 623 | { |
| 624 | return migratetype == MIGRATE_HIGHATOMIC; |
| 625 | } |
| 626 | |
| 627 | static inline bool is_migrate_highatomic_page(struct page *page) |
| 628 | { |
| 629 | return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; |
| 630 | } |
| 631 | |
Michal Hocko | 72675e1 | 2017-09-06 16:20:24 -0700 | [diff] [blame] | 632 | void setup_zone_pageset(struct zone *zone); |
Joonsoo Kim | 19fc7be | 2020-08-11 18:37:25 -0700 | [diff] [blame] | 633 | |
| 634 | struct migration_target_control { |
| 635 | int nid; /* preferred node id */ |
| 636 | nodemask_t *nmask; |
| 637 | gfp_t gfp_mask; |
| 638 | }; |
| 639 | |
Nicholas Piggin | b67177e | 2021-04-29 22:58:53 -0700 | [diff] [blame^] | 640 | /* |
| 641 | * mm/vmalloc.c |
| 642 | */ |
| 643 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
| 644 | pgprot_t prot, struct page **pages, unsigned int page_shift); |
| 645 | |
Michel Lespinasse | db97141 | 2012-10-08 16:29:34 -0700 | [diff] [blame] | 646 | #endif /* __MM_INTERNAL_H */ |