blob: 9f8712a4b1a5b509614ec27157d247d28faa7a0f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Macros for manipulating and testing page->flags
4 */
5
6#ifndef PAGE_FLAGS_H
7#define PAGE_FLAGS_H
8
Andrew Mortonf886ed42006-06-23 02:03:06 -07009#include <linux/types.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050010#include <linux/bug.h>
Mel Gorman072bb0a2012-07-31 16:43:58 -070011#include <linux/mmdebug.h>
Christoph Lameter9223b4192008-04-28 02:12:48 -070012#ifndef __GENERATING_BOUNDS_H
Christoph Lameter6d777952007-05-06 14:49:40 -070013#include <linux/mm_types.h>
Sam Ravnborg01fc0ac2009-04-19 21:57:19 +020014#include <generated/bounds.h>
Christoph Lameter9223b4192008-04-28 02:12:48 -070015#endif /* !__GENERATING_BOUNDS_H */
Andrew Mortonf886ed42006-06-23 02:03:06 -070016
Linus Torvalds1da177e2005-04-16 15:20:36 -070017/*
18 * Various page->flags bits:
19 *
David Hildenbrand6e2e07c2019-03-05 15:47:36 -080020 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
35 * PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 *
Nick Pigginda6052f2006-09-25 23:31:35 -070052 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 *
Nick Pigginda6052f2006-09-25 23:31:35 -070056 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 *
66 * PG_uptodate tells whether the page's contents is valid. When a read
67 * completes, the page becomes uptodate, unless a disk I/O error happened.
68 *
Nick Pigginda6052f2006-09-25 23:31:35 -070069 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
70 * file-backed pagecache (see mm/vmscan.c).
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 *
72 * PG_error is set to indicate that an I/O error occurred on this page.
73 *
74 * PG_arch_1 is an architecture specific page state bit. The generic code
75 * guarantees that this bit is cleared for a page when it first is entered into
76 * the page cache.
77 *
Andi Kleend466f2f2009-09-16 11:50:03 +020078 * PG_hwpoison indicates that a page got corrupted in hardware and contains
79 * data with incorrect ECC bits that triggered a machine check. Accessing is
80 * not safe since it may cause another machine check. Don't touch!
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 */
82
83/*
84 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
Andy Whitcroft91fc8ab2006-04-10 22:53:01 -070085 * locked- and dirty-page accounting.
86 *
87 * The page flags field is split into two parts, the main flags area
88 * which extends from the low bits upwards, and the fields area which
89 * extends from the high bits downwards.
90 *
91 * | FIELD | ... | FLAGS |
Christoph Lameter9223b4192008-04-28 02:12:48 -070092 * N-1 ^ 0
93 * (NR_PAGEFLAGS)
Andy Whitcroft91fc8ab2006-04-10 22:53:01 -070094 *
Christoph Lameter9223b4192008-04-28 02:12:48 -070095 * The fields area is reserved for fields mapping zone, node (for NUMA) and
96 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
97 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 */
Christoph Lametere2683182008-04-28 02:12:47 -070099enum pageflags {
100 PG_locked, /* Page is locked. Don't touch. */
Christoph Lametere2683182008-04-28 02:12:47 -0700101 PG_referenced,
102 PG_uptodate,
103 PG_dirty,
104 PG_lru,
105 PG_active,
Johannes Weiner1899ad12018-10-26 15:06:04 -0700106 PG_workingset,
Linus Torvaldsb91e1302016-12-27 11:40:38 -0800107 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
Johannes Weiner1899ad12018-10-26 15:06:04 -0700108 PG_error,
Christoph Lametere2683182008-04-28 02:12:47 -0700109 PG_slab,
110 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
Christoph Lametere2683182008-04-28 02:12:47 -0700111 PG_arch_1,
112 PG_reserved,
113 PG_private, /* If pagecache, has fs-private data */
David Howells266cf652009-04-03 16:42:36 +0100114 PG_private_2, /* If pagecache, has fs aux data */
Christoph Lametere2683182008-04-28 02:12:47 -0700115 PG_writeback, /* Page is under writeback */
Christoph Lametere20b8cc2008-04-28 02:12:55 -0700116 PG_head, /* A head page */
Christoph Lametere2683182008-04-28 02:12:47 -0700117 PG_mappedtodisk, /* Has blocks allocated on-disk */
118 PG_reclaim, /* To be reclaimed asap */
Rik van Rielb2e18532008-10-18 20:26:30 -0700119 PG_swapbacked, /* Page is backed by RAM/swap */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700120 PG_unevictable, /* Page is "unevictable" */
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800121#ifdef CONFIG_MMU
Nick Pigginb291f002008-10-18 20:26:44 -0700122 PG_mlocked, /* Page is vma mlocked */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700123#endif
Venkatesh Pallipadi46cf98c2009-07-10 09:57:37 -0700124#ifdef CONFIG_ARCH_USES_PG_UNCACHED
Christoph Lameter602c4d12008-04-28 02:12:52 -0700125 PG_uncached, /* Page has been mapped as uncached */
Andrew Mortonf886ed42006-06-23 02:03:06 -0700126#endif
Andi Kleend466f2f2009-09-16 11:50:03 +0200127#ifdef CONFIG_MEMORY_FAILURE
128 PG_hwpoison, /* hardware poisoned page. Don't touch */
129#endif
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700130#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
131 PG_young,
132 PG_idle,
133#endif
Andy Whitcroft0cad47c2008-07-23 21:27:16 -0700134 __NR_PAGEFLAGS,
135
136 /* Filesystems */
137 PG_checked = PG_owner_priv_1,
138
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000139 /* SwapBacked */
140 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
141
David Howells266cf652009-04-03 16:42:36 +0100142 /* Two page bits are conscripted by FS-Cache to maintain local caching
143 * state. These bits are set on pages belonging to the netfs's inodes
144 * when those inodes are being locally cached.
145 */
146 PG_fscache = PG_private_2, /* page backed by cache */
147
Andy Whitcroft0cad47c2008-07-23 21:27:16 -0700148 /* XEN */
Jennifer Herbertd8ac3dd2015-01-05 13:24:09 +0000149 /* Pinned in Xen as a read-only pagetable page. */
Andy Whitcroft0cad47c2008-07-23 21:27:16 -0700150 PG_pinned = PG_owner_priv_1,
Jennifer Herbertd8ac3dd2015-01-05 13:24:09 +0000151 /* Pinned as part of domain save (see xen_mm_pin_all()). */
Andy Whitcroft0cad47c2008-07-23 21:27:16 -0700152 PG_savepinned = PG_dirty,
Jennifer Herbertd8ac3dd2015-01-05 13:24:09 +0000153 /* Has a grant mapping of another (foreign) domain's page. */
154 PG_foreign = PG_owner_priv_1,
Andy Whitcroft8a380822008-07-23 21:27:18 -0700155
Andy Whitcroft9023cb72008-07-23 21:27:19 -0700156 /* SLOB */
Andy Whitcroft9023cb72008-07-23 21:27:19 -0700157 PG_slob_free = PG_private,
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800158
159 /* Compound pages. Stored in first tail page's flags */
160 PG_double_map = PG_private_2,
Minchan Kimbda807d2016-07-26 15:23:05 -0700161
162 /* non-lru isolated movable page */
163 PG_isolated = PG_reclaim,
Christoph Lametere2683182008-04-28 02:12:47 -0700164};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Christoph Lameter9223b4192008-04-28 02:12:48 -0700166#ifndef __GENERATING_BOUNDS_H
167
Kirill A. Shutemov0e6d31a2016-01-15 16:51:17 -0800168struct page; /* forward declaration */
169
170static inline struct page *compound_head(struct page *page)
171{
172 unsigned long head = READ_ONCE(page->compound_head);
173
174 if (unlikely(head & 1))
175 return (struct page *) (head - 1);
176 return page;
177}
178
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700179static __always_inline int PageTail(struct page *page)
Kirill A. Shutemov0e6d31a2016-01-15 16:51:17 -0800180{
181 return READ_ONCE(page->compound_head) & 1;
182}
183
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700184static __always_inline int PageCompound(struct page *page)
Kirill A. Shutemov0e6d31a2016-01-15 16:51:17 -0800185{
186 return test_bit(PG_head, &page->flags) || PageTail(page);
187}
188
Pavel Tatashinf165b372018-04-05 16:22:47 -0700189#define PAGE_POISON_PATTERN -1l
190static inline int PagePoisoned(const struct page *page)
191{
192 return page->flags == PAGE_POISON_PATTERN;
193}
194
Alexander Duyckf682a972018-10-26 15:07:45 -0700195#ifdef CONFIG_DEBUG_VM
196void page_init_poison(struct page *page, size_t size);
197#else
198static inline void page_init_poison(struct page *page, size_t size)
199{
200}
201#endif
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203/*
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800204 * Page flags policies wrt compound pages
205 *
Pavel Tatashinf165b372018-04-05 16:22:47 -0700206 * PF_POISONED_CHECK
207 * check if this struct page poisoned/uninitialized
208 *
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800209 * PF_ANY:
210 * the page flag is relevant for small, head and tail pages.
211 *
212 * PF_HEAD:
213 * for compound page all operations related to the page flag applied to
214 * head page.
215 *
Nicholas Piggin62906022016-12-25 13:00:30 +1000216 * PF_ONLY_HEAD:
217 * for compound page, callers only ever operate on the head page.
218 *
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800219 * PF_NO_TAIL:
220 * modifications of the page flag must be done on small or head pages,
221 * checks can be done on tail pages too.
222 *
223 * PF_NO_COMPOUND:
224 * the page flag is not relevant for compound pages.
225 */
Pavel Tatashinf165b372018-04-05 16:22:47 -0700226#define PF_POISONED_CHECK(page) ({ \
227 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
228 page; })
229#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
230#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
Nicholas Piggin62906022016-12-25 13:00:30 +1000231#define PF_ONLY_HEAD(page, enforce) ({ \
232 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
Pavel Tatashinf165b372018-04-05 16:22:47 -0700233 PF_POISONED_CHECK(page); })
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800234#define PF_NO_TAIL(page, enforce) ({ \
235 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
Pavel Tatashinf165b372018-04-05 16:22:47 -0700236 PF_POISONED_CHECK(compound_head(page)); })
Kirill A. Shutemov822cdd112016-01-15 16:52:03 -0800237#define PF_NO_COMPOUND(page, enforce) ({ \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800238 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
Pavel Tatashinf165b372018-04-05 16:22:47 -0700239 PF_POISONED_CHECK(page); })
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800240
241/*
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700242 * Macros to create function definitions for page flags
243 */
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800244#define TESTPAGEFLAG(uname, lname, policy) \
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700245static __always_inline int Page##uname(struct page *page) \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800246 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700247
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800248#define SETPAGEFLAG(uname, lname, policy) \
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700249static __always_inline void SetPage##uname(struct page *page) \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800250 { set_bit(PG_##lname, &policy(page, 1)->flags); }
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700251
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800252#define CLEARPAGEFLAG(uname, lname, policy) \
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700253static __always_inline void ClearPage##uname(struct page *page) \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800254 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700255
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800256#define __SETPAGEFLAG(uname, lname, policy) \
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700257static __always_inline void __SetPage##uname(struct page *page) \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800258 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700259
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800260#define __CLEARPAGEFLAG(uname, lname, policy) \
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700261static __always_inline void __ClearPage##uname(struct page *page) \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800262 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700263
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800264#define TESTSETFLAG(uname, lname, policy) \
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700265static __always_inline int TestSetPage##uname(struct page *page) \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800266 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700267
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800268#define TESTCLEARFLAG(uname, lname, policy) \
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700269static __always_inline int TestClearPage##uname(struct page *page) \
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800270 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700271
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800272#define PAGEFLAG(uname, lname, policy) \
273 TESTPAGEFLAG(uname, lname, policy) \
274 SETPAGEFLAG(uname, lname, policy) \
275 CLEARPAGEFLAG(uname, lname, policy)
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700276
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800277#define __PAGEFLAG(uname, lname, policy) \
278 TESTPAGEFLAG(uname, lname, policy) \
279 __SETPAGEFLAG(uname, lname, policy) \
280 __CLEARPAGEFLAG(uname, lname, policy)
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700281
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800282#define TESTSCFLAG(uname, lname, policy) \
283 TESTSETFLAG(uname, lname, policy) \
284 TESTCLEARFLAG(uname, lname, policy)
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700285
Johannes Weiner2f3e4422014-08-06 16:05:40 -0700286#define TESTPAGEFLAG_FALSE(uname) \
287static inline int Page##uname(const struct page *page) { return 0; }
288
Lee Schermerhorn8a7a8542008-10-18 20:26:37 -0700289#define SETPAGEFLAG_NOOP(uname) \
290static inline void SetPage##uname(struct page *page) { }
291
292#define CLEARPAGEFLAG_NOOP(uname) \
293static inline void ClearPage##uname(struct page *page) { }
294
295#define __CLEARPAGEFLAG_NOOP(uname) \
296static inline void __ClearPage##uname(struct page *page) { }
297
Johannes Weiner2f3e4422014-08-06 16:05:40 -0700298#define TESTSETFLAG_FALSE(uname) \
299static inline int TestSetPage##uname(struct page *page) { return 0; }
300
Lee Schermerhorn8a7a8542008-10-18 20:26:37 -0700301#define TESTCLEARFLAG_FALSE(uname) \
302static inline int TestClearPage##uname(struct page *page) { return 0; }
303
Johannes Weiner2f3e4422014-08-06 16:05:40 -0700304#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
305 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
306
307#define TESTSCFLAG_FALSE(uname) \
308 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
309
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800310__PAGEFLAG(Locked, locked, PF_NO_TAIL)
Nicholas Piggin62906022016-12-25 13:00:30 +1000311PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
Kirill A. Shutemovdf8c94d2016-01-15 16:51:28 -0800312PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
Kirill A. Shutemov8cb38fa2016-01-15 16:51:32 -0800313PAGEFLAG(Referenced, referenced, PF_HEAD)
314 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
315 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
Kirill A. Shutemovdf8c94d2016-01-15 16:51:28 -0800316PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
317 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
Kirill A. Shutemov8cb38fa2016-01-15 16:51:32 -0800318PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
319PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
320 TESTCLEARFLAG(Active, active, PF_HEAD)
Johannes Weiner1899ad12018-10-26 15:06:04 -0700321PAGEFLAG(Workingset, workingset, PF_HEAD)
322 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
Kirill A. Shutemovdcb351cd2016-01-15 16:51:35 -0800323__PAGEFLAG(Slab, slab, PF_NO_TAIL)
324__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
Kirill A. Shutemovdf8c94d2016-01-15 16:51:28 -0800325PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
Kirill A. Shutemovc13985fa2016-01-15 16:51:39 -0800326
327/* Xen */
328PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
329 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
330PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
331PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
332
Kirill A. Shutemovde09d312016-01-15 16:51:42 -0800333PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
334 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
Alexander Duyckd483da52018-10-26 15:07:48 -0700335 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
Kirill A. Shutemovda5efc42016-01-15 16:51:46 -0800336PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
337 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
338 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700339
Christoph Lameterf94a62e2008-04-28 02:12:49 -0700340/*
David Howells266cf652009-04-03 16:42:36 +0100341 * Private page markings that may be used by the filesystem that owns the page
342 * for its own purposes.
343 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
344 */
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800345PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
346 __CLEARPAGEFLAG(Private, private, PF_ANY)
347PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
348PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
349 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
David Howells266cf652009-04-03 16:42:36 +0100350
351/*
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700352 * Only test-and-set exist for PG_writeback. The unconditional operators are
353 * risky: they bypass page accounting.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 */
Huang Ying225311a2017-09-06 16:22:30 -0700355TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
356 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
Kirill A. Shutemove2f0a0d2016-07-26 15:25:59 -0700357PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Shaohua Li579f8292014-02-06 12:04:21 -0800359/* PG_readahead is only used for reads; PG_reclaim is only for writes */
Kirill A. Shutemove2f0a0d2016-07-26 15:25:59 -0700360PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
361 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
Kirill A. Shutemovdf8c94d2016-01-15 16:51:28 -0800362PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
363 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700365#ifdef CONFIG_HIGHMEM
366/*
367 * Must use a macro here due to header dependency issues. page_zone() is not
368 * available at this point.
369 */
Vineet Gupta3ca65c12015-11-05 18:48:29 -0800370#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700371#else
Christoph Lameterec7cade2008-04-28 02:12:53 -0700372PAGEFLAG_FALSE(HighMem)
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700373#endif
374
375#ifdef CONFIG_SWAP
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000376static __always_inline int PageSwapCache(struct page *page)
377{
Huang Ying38d8b4e2017-07-06 15:37:18 -0700378#ifdef CONFIG_THP_SWAP
379 page = compound_head(page);
380#endif
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000381 return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
382
383}
Huang Ying38d8b4e2017-07-06 15:37:18 -0700384SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
385CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700386#else
Christoph Lameterec7cade2008-04-28 02:12:53 -0700387PAGEFLAG_FALSE(SwapCache)
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700388#endif
389
Kirill A. Shutemov8cb38fa2016-01-15 16:51:32 -0800390PAGEFLAG(Unevictable, unevictable, PF_HEAD)
391 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
392 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700393
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800394#ifdef CONFIG_MMU
Kirill A. Shutemove4f87d52016-01-15 16:51:53 -0800395PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
396 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
397 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
David Howells33925b22009-03-31 15:23:26 -0700398#else
Johannes Weiner2f3e4422014-08-06 16:05:40 -0700399PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
Kirill A. Shutemov685eaad2016-01-15 16:52:10 -0800400 TESTSCFLAG_FALSE(Mlocked)
David Howells33925b22009-03-31 15:23:26 -0700401#endif
402
Venkatesh Pallipadi46cf98c2009-07-10 09:57:37 -0700403#ifdef CONFIG_ARCH_USES_PG_UNCACHED
Kirill A. Shutemovb9d41812016-01-15 16:51:56 -0800404PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
Christoph Lameter602c4d12008-04-28 02:12:52 -0700405#else
Christoph Lameterec7cade2008-04-28 02:12:53 -0700406PAGEFLAG_FALSE(Uncached)
Christoph Lameter6a1e7f72008-04-28 02:12:50 -0700407#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Andi Kleend466f2f2009-09-16 11:50:03 +0200409#ifdef CONFIG_MEMORY_FAILURE
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800410PAGEFLAG(HWPoison, hwpoison, PF_ANY)
411TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
Andi Kleend466f2f2009-09-16 11:50:03 +0200412#define __PG_HWPOISON (1UL << PG_hwpoison)
Naoya Horiguchid4ae9912018-08-23 17:00:42 -0700413extern bool set_hwpoison_free_buddy_page(struct page *page);
Andi Kleend466f2f2009-09-16 11:50:03 +0200414#else
415PAGEFLAG_FALSE(HWPoison)
Naoya Horiguchid4ae9912018-08-23 17:00:42 -0700416static inline bool set_hwpoison_free_buddy_page(struct page *page)
417{
418 return 0;
419}
Andi Kleend466f2f2009-09-16 11:50:03 +0200420#define __PG_HWPOISON 0
421#endif
422
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700423#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800424TESTPAGEFLAG(Young, young, PF_ANY)
425SETPAGEFLAG(Young, young, PF_ANY)
426TESTCLEARFLAG(Young, young, PF_ANY)
427PAGEFLAG(Idle, idle, PF_ANY)
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700428#endif
429
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700430/*
431 * On an anonymous page mapped into a user virtual memory area,
432 * page->mapping points to its anon_vma, not to a struct address_space;
433 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
434 *
435 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
Minchan Kimbda807d2016-07-26 15:23:05 -0700436 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
437 * bit; and then page->mapping points, not to an anon_vma, but to a private
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700438 * structure which KSM associates with that merged page. See ksm.h.
439 *
Minchan Kimbda807d2016-07-26 15:23:05 -0700440 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
441 * page and then page->mapping points a struct address_space.
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700442 *
443 * Please note that, confusingly, "page_mapping" refers to the inode
444 * address_space which maps the page from disk; whereas "page_mapped"
445 * refers to user virtual address space into which the page is mapped.
446 */
Minchan Kimbda807d2016-07-26 15:23:05 -0700447#define PAGE_MAPPING_ANON 0x1
448#define PAGE_MAPPING_MOVABLE 0x2
449#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
450#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700451
Minchan Kimbda807d2016-07-26 15:23:05 -0700452static __always_inline int PageMappingFlags(struct page *page)
Mel Gorman17514572016-05-19 17:13:21 -0700453{
Minchan Kimbda807d2016-07-26 15:23:05 -0700454 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
Mel Gorman17514572016-05-19 17:13:21 -0700455}
456
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700457static __always_inline int PageAnon(struct page *page)
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700458{
Kirill A. Shutemov822cdd112016-01-15 16:52:03 -0800459 page = compound_head(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700460 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
461}
462
463static __always_inline int __PageMovable(struct page *page)
464{
465 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
466 PAGE_MAPPING_MOVABLE;
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700467}
468
469#ifdef CONFIG_KSM
470/*
471 * A KSM page is one of those write-protected "shared pages" or "merged pages"
472 * which KSM maps into multiple mms, wherever identical anonymous page content
473 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
474 * anon_vma, but to that page's node of the stable tree.
475 */
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700476static __always_inline int PageKsm(struct page *page)
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700477{
Kirill A. Shutemov822cdd112016-01-15 16:52:03 -0800478 page = compound_head(page);
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700479 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
Minchan Kimbda807d2016-07-26 15:23:05 -0700480 PAGE_MAPPING_KSM;
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700481}
482#else
483TESTPAGEFLAG_FALSE(Ksm)
484#endif
485
Wu Fengguang1a9b5b72009-12-16 12:19:59 +0100486u64 stable_page_flags(struct page *page);
487
Nick Piggin0ed361d2008-02-04 22:29:34 -0800488static inline int PageUptodate(struct page *page)
489{
Kirill A. Shutemovd2998c42016-01-15 16:52:00 -0800490 int ret;
491 page = compound_head(page);
492 ret = test_bit(PG_uptodate, &(page)->flags);
Nick Piggin0ed361d2008-02-04 22:29:34 -0800493 /*
494 * Must ensure that the data we read out of the page is loaded
495 * _after_ we've loaded page->flags to check for PageUptodate.
496 * We can skip the barrier if the page is not uptodate, because
497 * we wouldn't be reading anything from it.
498 *
499 * See SetPageUptodate() for the other side of the story.
500 */
501 if (ret)
502 smp_rmb();
503
504 return ret;
505}
506
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700507static __always_inline void __SetPageUptodate(struct page *page)
Nick Piggin0ed361d2008-02-04 22:29:34 -0800508{
Kirill A. Shutemovd2998c42016-01-15 16:52:00 -0800509 VM_BUG_ON_PAGE(PageTail(page), page);
Nick Piggin0ed361d2008-02-04 22:29:34 -0800510 smp_wmb();
Kirill A. Shutemovdf8c94d2016-01-15 16:51:28 -0800511 __set_bit(PG_uptodate, &page->flags);
Nick Piggin0ed361d2008-02-04 22:29:34 -0800512}
513
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700514static __always_inline void SetPageUptodate(struct page *page)
Heiko Carstens2dcea572006-09-29 01:58:41 -0700515{
Kirill A. Shutemovd2998c42016-01-15 16:52:00 -0800516 VM_BUG_ON_PAGE(PageTail(page), page);
Nick Piggin0ed361d2008-02-04 22:29:34 -0800517 /*
518 * Memory barrier must be issued before setting the PG_uptodate bit,
519 * so that all previous stores issued in order to bring the page
520 * uptodate are actually visible before PageUptodate becomes true.
Nick Piggin0ed361d2008-02-04 22:29:34 -0800521 */
522 smp_wmb();
Kirill A. Shutemovdf8c94d2016-01-15 16:51:28 -0800523 set_bit(PG_uptodate, &page->flags);
Nick Piggin0ed361d2008-02-04 22:29:34 -0800524}
525
Kirill A. Shutemovd2998c42016-01-15 16:52:00 -0800526CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528int test_clear_page_writeback(struct page *page);
Namjae Jeon1c8349a2014-05-12 08:12:25 -0400529int __test_set_page_writeback(struct page *page, bool keep_write);
530
531#define test_set_page_writeback(page) \
532 __test_set_page_writeback(page, false)
533#define test_set_page_writeback_keepwrite(page) \
534 __test_set_page_writeback(page, true)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536static inline void set_page_writeback(struct page *page)
537{
538 test_set_page_writeback(page);
539}
540
Namjae Jeon1c8349a2014-05-12 08:12:25 -0400541static inline void set_page_writeback_keepwrite(struct page *page)
542{
543 test_set_page_writeback_keepwrite(page);
544}
545
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800546__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800547
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700548static __always_inline void set_compound_head(struct page *page, struct page *head)
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800549{
550 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
551}
552
Denys Vlasenko4b0f3262016-03-17 14:18:24 -0700553static __always_inline void clear_compound_head(struct page *page)
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800554{
555 WRITE_ONCE(page->compound_head, 0);
556}
557
Andrea Arcangeli4e6af672011-01-13 15:46:44 -0800558#ifdef CONFIG_TRANSPARENT_HUGEPAGE
559static inline void ClearPageCompound(struct page *page)
560{
561 BUG_ON(!PageHead(page));
562 ClearPageHead(page);
563}
564#endif
Petr Tesarikb3acc562014-06-23 13:22:03 -0700565
Yu Zhaod2a1a1f2016-05-20 16:58:16 -0700566#define PG_head_mask ((1UL << PG_head))
Petr Tesarikb3acc562014-06-23 13:22:03 -0700567
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700568#ifdef CONFIG_HUGETLB_PAGE
569int PageHuge(struct page *page);
570int PageHeadHuge(struct page *page);
Naoya Horiguchi7e1f0492015-04-15 16:14:41 -0700571bool page_huge_active(struct page *page);
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700572#else
573TESTPAGEFLAG_FALSE(Huge)
574TESTPAGEFLAG_FALSE(HeadHuge)
Naoya Horiguchi7e1f0492015-04-15 16:14:41 -0700575
576static inline bool page_huge_active(struct page *page)
577{
578 return 0;
579}
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700580#endif
581
Naoya Horiguchi7e1f0492015-04-15 16:14:41 -0700582
Andrea Arcangeli936a5fe2011-01-13 15:46:48 -0800583#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800584/*
585 * PageHuge() only returns true for hugetlbfs pages, but not for
586 * normal or transparent huge pages.
587 *
588 * PageTransHuge() returns true for both transparent huge and
589 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
590 * called only in the core VM paths where hugetlbfs pages can't exist.
591 */
592static inline int PageTransHuge(struct page *page)
593{
Sasha Levin309381fea2014-01-23 15:52:54 -0800594 VM_BUG_ON_PAGE(PageTail(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800595 return PageHead(page);
596}
597
Dean Nelson385de352012-03-21 16:34:05 -0700598/*
599 * PageTransCompound returns true for both transparent huge pages
600 * and hugetlbfs pages, so it should only be called when it's known
601 * that hugetlbfs pages aren't involved.
602 */
Andrea Arcangeli936a5fe2011-01-13 15:46:48 -0800603static inline int PageTransCompound(struct page *page)
604{
605 return PageCompound(page);
606}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800607
Dean Nelson385de352012-03-21 16:34:05 -0700608/*
Andrea Arcangeli127393f2016-05-05 16:22:20 -0700609 * PageTransCompoundMap is the same as PageTransCompound, but it also
610 * guarantees the primary MMU has the entire compound page mapped
611 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
612 * can also map the entire compound page. This allows the secondary
613 * MMUs to call get_user_pages() only once for each compound page and
614 * to immediately map the entire compound page with a single secondary
615 * MMU fault. If there will be a pmd split later, the secondary MMUs
616 * will get an update through the MMU notifier invalidation through
617 * split_huge_pmd().
618 *
619 * Unlike PageTransCompound, this is safe to be called only while
620 * split_huge_pmd() cannot run from under us, like if protected by the
621 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
622 * positives.
623 */
624static inline int PageTransCompoundMap(struct page *page)
625{
626 return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
627}
628
629/*
Dean Nelson385de352012-03-21 16:34:05 -0700630 * PageTransTail returns true for both transparent huge pages
631 * and hugetlbfs pages, so it should only be called when it's known
632 * that hugetlbfs pages aren't involved.
633 */
634static inline int PageTransTail(struct page *page)
635{
636 return PageTail(page);
637}
638
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800639/*
640 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
641 * as PMDs.
642 *
643 * This is required for optimization of rmap operations for THP: we can postpone
644 * per small page mapcount accounting (and its overhead from atomic operations)
645 * until the first PMD split.
646 *
647 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
648 * by one. This reference will go away with last compound_mapcount.
649 *
650 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
651 */
652static inline int PageDoubleMap(struct page *page)
653{
654 return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
655}
656
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -0700657static inline void SetPageDoubleMap(struct page *page)
658{
659 VM_BUG_ON_PAGE(!PageHead(page), page);
660 set_bit(PG_double_map, &page[1].flags);
661}
662
663static inline void ClearPageDoubleMap(struct page *page)
664{
665 VM_BUG_ON_PAGE(!PageHead(page), page);
666 clear_bit(PG_double_map, &page[1].flags);
667}
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800668static inline int TestSetPageDoubleMap(struct page *page)
669{
670 VM_BUG_ON_PAGE(!PageHead(page), page);
671 return test_and_set_bit(PG_double_map, &page[1].flags);
672}
673
674static inline int TestClearPageDoubleMap(struct page *page)
675{
676 VM_BUG_ON_PAGE(!PageHead(page), page);
677 return test_and_clear_bit(PG_double_map, &page[1].flags);
678}
679
Andrea Arcangeli936a5fe2011-01-13 15:46:48 -0800680#else
Kirill A. Shutemovd8c1bde2016-01-15 16:51:13 -0800681TESTPAGEFLAG_FALSE(TransHuge)
682TESTPAGEFLAG_FALSE(TransCompound)
Andrea Arcangeli127393f2016-05-05 16:22:20 -0700683TESTPAGEFLAG_FALSE(TransCompoundMap)
Kirill A. Shutemovd8c1bde2016-01-15 16:51:13 -0800684TESTPAGEFLAG_FALSE(TransTail)
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -0700685PAGEFLAG_FALSE(DoubleMap)
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800686 TESTSETFLAG_FALSE(DoubleMap)
687 TESTCLEARFLAG_FALSE(DoubleMap)
Andrea Arcangeli936a5fe2011-01-13 15:46:48 -0800688#endif
689
Mel Gorman072bb0a2012-07-31 16:43:58 -0700690/*
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700691 * For pages that are never mapped to userspace (and aren't PageSlab),
692 * page_type may be used. Because it is initialised to -1, we invert the
693 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
694 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
695 * low bits so that an underflow or overflow of page_mapcount() won't be
696 * mistaken for a page type value.
Vladimir Davydov632c0a12016-07-26 15:24:18 -0700697 */
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700698
699#define PAGE_TYPE_BASE 0xf0000000
700/* Reserve 0x0000007f to catch underflows of page_mapcount */
Anthony Yznaga144552f2018-12-28 00:37:31 -0800701#define PAGE_MAPCOUNT_RESERVE -128
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700702#define PG_buddy 0x00000080
David Hildenbrandca215082019-03-05 15:42:23 -0800703#define PG_offline 0x00000100
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700704#define PG_kmemcg 0x00000200
Matthew Wilcox1d40a5e2018-06-07 17:08:23 -0700705#define PG_table 0x00000400
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700706
707#define PageType(page, flag) \
708 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
709
Anthony Yznaga144552f2018-12-28 00:37:31 -0800710static inline int page_has_type(struct page *page)
711{
712 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
713}
714
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700715#define PAGE_TYPE_OPS(uname, lname) \
Vladimir Davydov632c0a12016-07-26 15:24:18 -0700716static __always_inline int Page##uname(struct page *page) \
717{ \
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700718 return PageType(page, PG_##lname); \
Vladimir Davydov632c0a12016-07-26 15:24:18 -0700719} \
720static __always_inline void __SetPage##uname(struct page *page) \
721{ \
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700722 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
723 page->page_type &= ~PG_##lname; \
Vladimir Davydov632c0a12016-07-26 15:24:18 -0700724} \
725static __always_inline void __ClearPage##uname(struct page *page) \
726{ \
727 VM_BUG_ON_PAGE(!Page##uname(page), page); \
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700728 page->page_type |= PG_##lname; \
Vladimir Davydov632c0a12016-07-26 15:24:18 -0700729}
730
731/*
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700732 * PageBuddy() indicates that the page is free and in the buddy system
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700733 * (see mm/page_alloc.c).
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700734 */
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700735PAGE_TYPE_OPS(Buddy, buddy)
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700736
Vladimir Davydov632c0a12016-07-26 15:24:18 -0700737/*
David Hildenbrandca215082019-03-05 15:42:23 -0800738 * PageOffline() indicates that the page is logically offline although the
739 * containing section is online. (e.g. inflated in a balloon driver or
740 * not onlined when onlining the section).
741 * The content of these pages is effectively stale. Such pages should not
742 * be touched (read/write/dump/save) except by their owner.
Vladimir Davydov632c0a12016-07-26 15:24:18 -0700743 */
David Hildenbrandca215082019-03-05 15:42:23 -0800744PAGE_TYPE_OPS(Offline, offline)
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700745
Vladimir Davydov49491482016-07-26 15:24:24 -0700746/*
747 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
748 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
749 */
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700750PAGE_TYPE_OPS(Kmemcg, kmemcg)
Vladimir Davydov49491482016-07-26 15:24:24 -0700751
Matthew Wilcox1d40a5e2018-06-07 17:08:23 -0700752/*
753 * Marks pages in use as page tables.
754 */
755PAGE_TYPE_OPS(Table, table)
756
Naoya Horiguchi832fc1d2016-03-17 14:17:41 -0700757extern bool is_free_buddy_page(struct page *page);
758
Minchan Kimbda807d2016-07-26 15:23:05 -0700759__PAGEFLAG(Isolated, isolated, PF_ANY);
760
Kirill A. Shutemove8c61582015-04-15 16:13:08 -0700761/*
Mel Gorman072bb0a2012-07-31 16:43:58 -0700762 * If network-based swap is enabled, sl*b must keep track of whether pages
763 * were allocated from pfmemalloc reserves.
764 */
765static inline int PageSlabPfmemalloc(struct page *page)
766{
Sasha Levin309381fea2014-01-23 15:52:54 -0800767 VM_BUG_ON_PAGE(!PageSlab(page), page);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700768 return PageActive(page);
769}
770
771static inline void SetPageSlabPfmemalloc(struct page *page)
772{
Sasha Levin309381fea2014-01-23 15:52:54 -0800773 VM_BUG_ON_PAGE(!PageSlab(page), page);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700774 SetPageActive(page);
775}
776
777static inline void __ClearPageSlabPfmemalloc(struct page *page)
778{
Sasha Levin309381fea2014-01-23 15:52:54 -0800779 VM_BUG_ON_PAGE(!PageSlab(page), page);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700780 __ClearPageActive(page);
781}
782
783static inline void ClearPageSlabPfmemalloc(struct page *page)
784{
Sasha Levin309381fea2014-01-23 15:52:54 -0800785 VM_BUG_ON_PAGE(!PageSlab(page), page);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700786 ClearPageActive(page);
787}
788
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800789#ifdef CONFIG_MMU
Yu Zhaod2a1a1f2016-05-20 16:58:16 -0700790#define __PG_MLOCKED (1UL << PG_mlocked)
David Howells33925b22009-03-31 15:23:26 -0700791#else
Nick Pigginb291f002008-10-18 20:26:44 -0700792#define __PG_MLOCKED 0
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700793#endif
794
Russ Andersondfa7e202008-06-09 11:18:45 -0500795/*
796 * Flags checked when a page is freed. Pages being freed should not have
797 * these flags set. It they are, there is a problem.
798 */
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000799#define PAGE_FLAGS_CHECK_AT_FREE \
800 (1UL << PG_lru | 1UL << PG_locked | \
801 1UL << PG_private | 1UL << PG_private_2 | \
802 1UL << PG_writeback | 1UL << PG_reserved | \
803 1UL << PG_slab | 1UL << PG_active | \
804 1UL << PG_unevictable | __PG_MLOCKED)
Russ Andersondfa7e202008-06-09 11:18:45 -0500805
806/*
807 * Flags checked when a page is prepped for return by the page allocator.
Naoya Horiguchif4c18e62015-08-06 15:47:08 -0700808 * Pages being prepped should not have these flags set. It they are set,
Hugh Dickins79f4b7b2009-01-06 14:40:05 -0800809 * there has been a kernel bug or struct page corruption.
Naoya Horiguchif4c18e62015-08-06 15:47:08 -0700810 *
811 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
812 * alloc-free cycle to prevent from reusing the page.
Russ Andersondfa7e202008-06-09 11:18:45 -0500813 */
Naoya Horiguchif4c18e62015-08-06 15:47:08 -0700814#define PAGE_FLAGS_CHECK_AT_PREP \
Yu Zhaod2a1a1f2016-05-20 16:58:16 -0700815 (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
Russ Andersondfa7e202008-06-09 11:18:45 -0500816
Johannes Weineredcf4742009-09-21 17:02:59 -0700817#define PAGE_FLAGS_PRIVATE \
Yu Zhaod2a1a1f2016-05-20 16:58:16 -0700818 (1UL << PG_private | 1UL << PG_private_2)
David Howells266cf652009-04-03 16:42:36 +0100819/**
820 * page_has_private - Determine if page has private stuff
821 * @page: The page to be checked
822 *
823 * Determine if a page has private stuff, indicating that release routines
824 * should be invoked upon it.
825 */
Johannes Weineredcf4742009-09-21 17:02:59 -0700826static inline int page_has_private(struct page *page)
827{
828 return !!(page->flags & PAGE_FLAGS_PRIVATE);
829}
830
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800831#undef PF_ANY
832#undef PF_HEAD
Nicholas Piggin62906022016-12-25 13:00:30 +1000833#undef PF_ONLY_HEAD
Kirill A. Shutemov95ad9752016-01-15 16:51:21 -0800834#undef PF_NO_TAIL
835#undef PF_NO_COMPOUND
Johannes Weineredcf4742009-09-21 17:02:59 -0700836#endif /* !__GENERATING_BOUNDS_H */
David Howells266cf652009-04-03 16:42:36 +0100837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838#endif /* PAGE_FLAGS_H */