blob: aa44dea5276f54fec9e7204b6c169468624b5971 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Sasha Levin31c9afa2014-10-09 15:28:37 -07002/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
Sasha Levin82742a32014-10-09 15:28:34 -07009#include <linux/kernel.h>
10#include <linux/mm.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040011#include <linux/trace_events.h>
Sasha Levin82742a32014-10-09 15:28:34 -070012#include <linux/memcontrol.h>
Vlastimil Babka420adbe92016-03-15 14:55:52 -070013#include <trace/events/mmflags.h>
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070014#include <linux/migrate.h>
Vlastimil Babka4e462112016-03-15 14:56:21 -070015#include <linux/page_owner.h>
Minchan Kim6e12c5b2021-03-18 09:56:10 -070016#include <linux/page_pinner.h>
Alexander Duyckf682a972018-10-26 15:07:45 -070017#include <linux/ctype.h>
Sasha Levin82742a32014-10-09 15:28:34 -070018
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070019#include "internal.h"
20
Alexey Dobriyan9a2f45f2018-12-28 00:35:59 -080021const char *migrate_reason_names[MR_TYPES] = {
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070022 "compaction",
23 "memory_failure",
24 "memory_hotplug",
25 "syscall_or_cpuset",
26 "mempolicy_mbind",
27 "numa_misplaced",
28 "cma",
29};
30
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070031const struct trace_print_flags pageflag_names[] = {
32 __def_pageflag_names,
33 {0, NULL}
Vlastimil Babka420adbe92016-03-15 14:55:52 -070034};
35
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070036const struct trace_print_flags gfpflag_names[] = {
37 __def_gfpflag_names,
38 {0, NULL}
39};
40
41const struct trace_print_flags vmaflag_names[] = {
42 __def_vmaflag_names,
43 {0, NULL}
Sasha Levin82742a32014-10-09 15:28:34 -070044};
45
Vlastimil Babkaff8e8112016-03-15 14:56:24 -070046void __dump_page(struct page *page, const char *reason)
Sasha Levin82742a32014-10-09 15:28:34 -070047{
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070048 struct page *head = compound_head(page);
Robin Murphy311ade02019-02-20 22:19:45 -080049 struct address_space *mapping;
Pavel Tatashinfc36def2018-07-03 17:02:53 -070050 bool page_poisoned = PagePoisoned(page);
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070051 bool compound = PageCompound(page);
Qian Cai4a55c042020-01-30 22:14:57 -080052 /*
53 * Accessing the pageblock without the zone lock. It could change to
54 * "isolate" again in the meantime, but since we are just dumping the
55 * state for debugging, it should be fine to accept a bit of
56 * inaccuracy here due to racing.
57 */
58 bool page_cma = is_migrate_cma_page(page);
Pavel Tatashinfc36def2018-07-03 17:02:53 -070059 int mapcount;
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -080060 char *type = "";
Pavel Tatashinfc36def2018-07-03 17:02:53 -070061
62 /*
63 * If struct page is poisoned don't access Page*() functions as that
64 * leads to recursive loop. Page*() check for poisoned pages, and calls
65 * dump_page() when detected.
66 */
67 if (page_poisoned) {
Michal Hockoe0392cf2018-12-28 00:33:42 -080068 pr_warn("page:%px is uninitialized and poisoned", page);
Pavel Tatashinfc36def2018-07-03 17:02:53 -070069 goto hex_only;
70 }
71
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070072 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
Matthew Wilcox (Oracle)e1ab96f2020-08-06 23:19:32 -070073 /*
74 * Corrupt page, so we cannot call page_mapping. Instead, do a
75 * safe subset of the steps that page_mapping() does. Caution:
76 * this will be misleading for tail pages, PageSwapCache pages,
77 * and potentially other situations. (See the page_mapping()
78 * implementation for what's missing here.)
79 */
80 unsigned long tmp = (unsigned long)page->mapping;
81
82 if (tmp & PAGE_MAPPING_ANON)
83 mapping = NULL;
84 else
85 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070086 head = page;
87 compound = false;
88 } else {
89 mapping = page_mapping(page);
90 }
Robin Murphy311ade02019-02-20 22:19:45 -080091
Kirill A. Shutemov9996f052016-10-07 17:01:40 -070092 /*
93 * Avoid VM_BUG_ON() in page_mapcount().
94 * page->_mapcount space in struct page is used by sl[aou]b pages to
95 * encode own info.
96 */
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070097 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
Kirill A. Shutemov4d354272016-09-19 14:44:07 -070098
Matthew Wilcox (Oracle)54a75152020-08-06 23:19:48 -070099 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -0700100 page, page_ref_count(head), mapcount, mapping,
Matthew Wilcox (Oracle)54a75152020-08-06 23:19:48 -0700101 page_to_pgoff(page), page_to_pfn(page));
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -0700102 if (compound) {
John Hubbarddc8fb2f22020-04-01 21:05:52 -0700103 if (hpage_pincount_available(page)) {
Matthew Wilcox (Oracle)54a75152020-08-06 23:19:48 -0700104 pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -0700105 head, compound_order(head),
John Hubbardbac3cf42020-10-13 16:51:14 -0700106 head_compound_mapcount(head),
107 head_compound_pincount(head));
John Hubbarddc8fb2f22020-04-01 21:05:52 -0700108 } else {
Matthew Wilcox (Oracle)54a75152020-08-06 23:19:48 -0700109 pr_warn("head:%p order:%u compound_mapcount:%d\n",
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -0700110 head, compound_order(head),
John Hubbardbac3cf42020-10-13 16:51:14 -0700111 head_compound_mapcount(head));
John Hubbarddc8fb2f22020-04-01 21:05:52 -0700112 }
Matthew Wilcox (Oracle)452b5572020-08-06 23:19:35 -0700113 }
Ralph Campbell6855ac42019-11-15 17:35:07 -0800114 if (PageKsm(page))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -0800115 type = "ksm ";
Ralph Campbell6855ac42019-11-15 17:35:07 -0800116 else if (PageAnon(page))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -0800117 type = "anon ";
Michal Hocko1c6fb1d2018-12-28 00:33:38 -0800118 else if (mapping) {
Matthew Wilcox (Oracle)9ad38262020-08-06 23:19:42 -0700119 struct inode *host;
Vlastimil Babka002ae702020-06-01 21:46:03 -0700120 const struct address_space_operations *a_ops;
Matthew Wilcox (Oracle)9ad38262020-08-06 23:19:42 -0700121 struct hlist_node *dentry_first;
122 struct dentry *dentry_ptr;
Vlastimil Babka002ae702020-06-01 21:46:03 -0700123 struct dentry dentry;
Matthew Wilcox (Oracle)853322a62020-10-13 16:51:10 -0700124 unsigned long ino;
Vlastimil Babka002ae702020-06-01 21:46:03 -0700125
126 /*
127 * mapping can be invalid pointer and we don't want to crash
128 * accessing it, so probe everything depending on it carefully
129 */
Matthew Wilcox (Oracle)9ad38262020-08-06 23:19:42 -0700130 if (get_kernel_nofault(host, &mapping->host) ||
131 get_kernel_nofault(a_ops, &mapping->a_ops)) {
132 pr_warn("failed to read mapping contents, not a valid kernel address?\n");
Vlastimil Babka002ae702020-06-01 21:46:03 -0700133 goto out_mapping;
134 }
135
136 if (!host) {
Matthew Wilcox (Oracle)9ad38262020-08-06 23:19:42 -0700137 pr_warn("aops:%ps\n", a_ops);
Vlastimil Babka002ae702020-06-01 21:46:03 -0700138 goto out_mapping;
139 }
140
Matthew Wilcox (Oracle)853322a62020-10-13 16:51:10 -0700141 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
142 get_kernel_nofault(ino, &host->i_ino)) {
Matthew Wilcox (Oracle)9ad38262020-08-06 23:19:42 -0700143 pr_warn("aops:%ps with invalid host inode %px\n",
144 a_ops, host);
Vlastimil Babka002ae702020-06-01 21:46:03 -0700145 goto out_mapping;
146 }
147
148 if (!dentry_first) {
Matthew Wilcox (Oracle)853322a62020-10-13 16:51:10 -0700149 pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
Vlastimil Babka002ae702020-06-01 21:46:03 -0700150 goto out_mapping;
151 }
152
153 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
Matthew Wilcox (Oracle)9ad38262020-08-06 23:19:42 -0700154 if (get_kernel_nofault(dentry, dentry_ptr)) {
Matthew Wilcox (Oracle)853322a62020-10-13 16:51:10 -0700155 pr_warn("aops:%ps ino:%lx with invalid dentry %px\n",
156 a_ops, ino, dentry_ptr);
Vlastimil Babka002ae702020-06-01 21:46:03 -0700157 } else {
158 /*
159 * if dentry is corrupted, the %pd handler may still
160 * crash, but it's unlikely that we reach here with a
161 * corrupted struct page
162 */
Matthew Wilcox (Oracle)9bdaf2c2020-08-06 23:19:45 -0700163 pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n",
Matthew Wilcox (Oracle)853322a62020-10-13 16:51:10 -0700164 a_ops, ino, &dentry);
Vlastimil Babka002ae702020-06-01 21:46:03 -0700165 }
Michal Hocko1c6fb1d2018-12-28 00:33:38 -0800166 }
Vlastimil Babka002ae702020-06-01 21:46:03 -0700167out_mapping:
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -0700168 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700169
Matthew Wilcox (Oracle)0b93d592020-08-06 23:19:39 -0700170 pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags,
Qian Cai4a55c042020-01-30 22:14:57 -0800171 page_cma ? " CMA" : "");
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -0800172
Pavel Tatashinfc36def2018-07-03 17:02:53 -0700173hex_only:
Michal Hockoe0392cf2018-12-28 00:33:42 -0800174 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
Vlastimil Babka46e8a3a2016-12-12 16:44:35 -0800175 sizeof(unsigned long), page,
176 sizeof(struct page), false);
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -0700177 if (head != page)
178 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
179 sizeof(unsigned long), head,
180 sizeof(struct page), false);
Vlastimil Babka46e8a3a2016-12-12 16:44:35 -0800181
Sasha Levin82742a32014-10-09 15:28:34 -0700182 if (reason)
Michal Hockoe0392cf2018-12-28 00:33:42 -0800183 pr_warn("page dumped because: %s\n", reason);
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700184
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800185#ifdef CONFIG_MEMCG
Pavel Tatashinfc36def2018-07-03 17:02:53 -0700186 if (!page_poisoned && page->mem_cgroup)
Michal Hockoe0392cf2018-12-28 00:33:42 -0800187 pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800188#endif
Sasha Levin82742a32014-10-09 15:28:34 -0700189}
190
191void dump_page(struct page *page, const char *reason)
192{
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700193 __dump_page(page, reason);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700194 dump_page_owner(page);
Minchan Kim6e12c5b2021-03-18 09:56:10 -0700195 dump_page_pinner(page);
Sasha Levin82742a32014-10-09 15:28:34 -0700196}
197EXPORT_SYMBOL(dump_page);
198
199#ifdef CONFIG_DEBUG_VM
200
Sasha Levin82742a32014-10-09 15:28:34 -0700201void dump_vma(const struct vm_area_struct *vma)
202{
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800203 pr_emerg("vma %px start %px end %px\n"
204 "next %px prev %px mm %px\n"
205 "prot %lx anon_vma %px vm_ops %px\n"
206 "pgoff %lx file %px private_data %px\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700207 "flags: %#lx(%pGv)\n",
Sasha Levin82742a32014-10-09 15:28:34 -0700208 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
209 vma->vm_prev, vma->vm_mm,
210 (unsigned long)pgprot_val(vma->vm_page_prot),
211 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700212 vma->vm_file, vma->vm_private_data,
213 vma->vm_flags, &vma->vm_flags);
Sasha Levin82742a32014-10-09 15:28:34 -0700214}
215EXPORT_SYMBOL(dump_vma);
216
Sasha Levin31c9afa2014-10-09 15:28:37 -0700217void dump_mm(const struct mm_struct *mm)
218{
Linus Torvalds7a9cdeb2018-09-12 23:57:48 -1000219 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700220#ifdef CONFIG_MMU
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800221 "get_unmapped_area %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700222#endif
223 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800224 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700225 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800226 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700227 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
228 "start_brk %lx brk %lx start_stack %lx\n"
229 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800230 "binfmt %px flags %lx core_state %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700231#ifdef CONFIG_AIO
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800232 "ioctx_table %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700233#endif
234#ifdef CONFIG_MEMCG
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800235 "owner %px "
Sasha Levin31c9afa2014-10-09 15:28:37 -0700236#endif
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800237 "exe_file %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700238#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400239 "notifier_subscriptions %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700240#endif
241#ifdef CONFIG_NUMA_BALANCING
242 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
243#endif
Sasha Levin31c9afa2014-10-09 15:28:37 -0700244 "tlb_flush_pending %d\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700245 "def_flags: %#lx(%pGv)\n",
Sasha Levin31c9afa2014-10-09 15:28:37 -0700246
Linus Torvalds7a9cdeb2018-09-12 23:57:48 -1000247 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700248#ifdef CONFIG_MMU
249 mm->get_unmapped_area,
250#endif
251 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
252 mm->pgd, atomic_read(&mm->mm_users),
253 atomic_read(&mm->mm_count),
Kirill A. Shutemovaf5b0f62017-11-15 17:35:40 -0800254 mm_pgtables_bytes(mm),
Sasha Levin31c9afa2014-10-09 15:28:37 -0700255 mm->map_count,
256 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
Qian Cai44dc1b12019-03-28 20:43:23 -0700257 (u64)atomic64_read(&mm->pinned_vm),
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800258 mm->data_vm, mm->exec_vm, mm->stack_vm,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700259 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
260 mm->start_brk, mm->brk, mm->start_stack,
261 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
262 mm->binfmt, mm->flags, mm->core_state,
263#ifdef CONFIG_AIO
264 mm->ioctx_table,
265#endif
266#ifdef CONFIG_MEMCG
267 mm->owner,
268#endif
269 mm->exe_file,
270#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400271 mm->notifier_subscriptions,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700272#endif
273#ifdef CONFIG_NUMA_BALANCING
274 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
275#endif
Nadav Amit16af97d2017-08-10 15:23:56 -0700276 atomic_read(&mm->tlb_flush_pending),
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700277 mm->def_flags, &mm->def_flags
278 );
Sasha Levin31c9afa2014-10-09 15:28:37 -0700279}
280
Alexander Duyckf682a972018-10-26 15:07:45 -0700281static bool page_init_poisoning __read_mostly = true;
282
283static int __init setup_vm_debug(char *str)
284{
285 bool __page_init_poisoning = true;
286
287 /*
288 * Calling vm_debug with no arguments is equivalent to requesting
289 * to enable all debugging options we can control.
290 */
291 if (*str++ != '=' || !*str)
292 goto out;
293
294 __page_init_poisoning = false;
295 if (*str == '-')
296 goto out;
297
298 while (*str) {
299 switch (tolower(*str)) {
300 case'p':
301 __page_init_poisoning = true;
302 break;
303 default:
304 pr_err("vm_debug option '%c' unknown. skipped\n",
305 *str);
306 }
307
308 str++;
309 }
310out:
311 if (page_init_poisoning && !__page_init_poisoning)
312 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
313
314 page_init_poisoning = __page_init_poisoning;
315
316 return 1;
317}
318__setup("vm_debug", setup_vm_debug);
319
320void page_init_poison(struct page *page, size_t size)
321{
322 if (page_init_poisoning)
323 memset(page, PAGE_POISON_PATTERN, size);
324}
325EXPORT_SYMBOL_GPL(page_init_poison);
Sasha Levin82742a32014-10-09 15:28:34 -0700326#endif /* CONFIG_DEBUG_VM */