blob: f5ffb0784559b3eb5b2bbddd008c9c5de9379b3b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Sasha Levin31c9afa2014-10-09 15:28:37 -07002/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
Sasha Levin82742a32014-10-09 15:28:34 -07009#include <linux/kernel.h>
10#include <linux/mm.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040011#include <linux/trace_events.h>
Sasha Levin82742a32014-10-09 15:28:34 -070012#include <linux/memcontrol.h>
Vlastimil Babka420adbe92016-03-15 14:55:52 -070013#include <trace/events/mmflags.h>
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070014#include <linux/migrate.h>
Vlastimil Babka4e462112016-03-15 14:56:21 -070015#include <linux/page_owner.h>
Alexander Duyckf682a972018-10-26 15:07:45 -070016#include <linux/ctype.h>
Sasha Levin82742a32014-10-09 15:28:34 -070017
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070018#include "internal.h"
19
Alexey Dobriyan9a2f45f2018-12-28 00:35:59 -080020const char *migrate_reason_names[MR_TYPES] = {
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070021 "compaction",
22 "memory_failure",
23 "memory_hotplug",
24 "syscall_or_cpuset",
25 "mempolicy_mbind",
26 "numa_misplaced",
27 "cma",
28};
29
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070030const struct trace_print_flags pageflag_names[] = {
31 __def_pageflag_names,
32 {0, NULL}
Vlastimil Babka420adbe92016-03-15 14:55:52 -070033};
34
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070035const struct trace_print_flags gfpflag_names[] = {
36 __def_gfpflag_names,
37 {0, NULL}
38};
39
40const struct trace_print_flags vmaflag_names[] = {
41 __def_vmaflag_names,
42 {0, NULL}
Sasha Levin82742a32014-10-09 15:28:34 -070043};
44
Vlastimil Babkaff8e8112016-03-15 14:56:24 -070045void __dump_page(struct page *page, const char *reason)
Sasha Levin82742a32014-10-09 15:28:34 -070046{
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070047 struct page *head = compound_head(page);
Robin Murphy311ade02019-02-20 22:19:45 -080048 struct address_space *mapping;
Pavel Tatashinfc36def2018-07-03 17:02:53 -070049 bool page_poisoned = PagePoisoned(page);
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070050 bool compound = PageCompound(page);
Qian Cai4a55c042020-01-30 22:14:57 -080051 /*
52 * Accessing the pageblock without the zone lock. It could change to
53 * "isolate" again in the meantime, but since we are just dumping the
54 * state for debugging, it should be fine to accept a bit of
55 * inaccuracy here due to racing.
56 */
57 bool page_cma = is_migrate_cma_page(page);
Pavel Tatashinfc36def2018-07-03 17:02:53 -070058 int mapcount;
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -080059 char *type = "";
Pavel Tatashinfc36def2018-07-03 17:02:53 -070060
61 /*
62 * If struct page is poisoned don't access Page*() functions as that
63 * leads to recursive loop. Page*() check for poisoned pages, and calls
64 * dump_page() when detected.
65 */
66 if (page_poisoned) {
Michal Hockoe0392cf2018-12-28 00:33:42 -080067 pr_warn("page:%px is uninitialized and poisoned", page);
Pavel Tatashinfc36def2018-07-03 17:02:53 -070068 goto hex_only;
69 }
70
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070071 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
72 /* Corrupt page, cannot call page_mapping */
73 mapping = page->mapping;
74 head = page;
75 compound = false;
76 } else {
77 mapping = page_mapping(page);
78 }
Robin Murphy311ade02019-02-20 22:19:45 -080079
Kirill A. Shutemov9996f052016-10-07 17:01:40 -070080 /*
81 * Avoid VM_BUG_ON() in page_mapcount().
82 * page->_mapcount space in struct page is used by sl[aou]b pages to
83 * encode own info.
84 */
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070085 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
Kirill A. Shutemov4d354272016-09-19 14:44:07 -070086
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070087 if (compound)
88 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p "
89 "index:%#lx head:%px order:%u compound_mapcount:%d\n",
90 page, page_ref_count(head), mapcount,
91 mapping, page_to_pgoff(page), head,
92 compound_order(head), compound_mapcount(page));
Ralph Campbell76a18502019-11-15 17:35:04 -080093 else
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070094 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p index:%#lx\n",
Ralph Campbell76a18502019-11-15 17:35:04 -080095 page, page_ref_count(page), mapcount,
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -070096 mapping, page_to_pgoff(page));
Ralph Campbell6855ac42019-11-15 17:35:07 -080097 if (PageKsm(page))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -080098 type = "ksm ";
Ralph Campbell6855ac42019-11-15 17:35:07 -080099 else if (PageAnon(page))
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -0800100 type = "anon ";
Michal Hocko1c6fb1d2018-12-28 00:33:38 -0800101 else if (mapping) {
Oscar Salvador5ae2efb2019-03-28 20:44:01 -0700102 if (mapping->host && mapping->host->i_dentry.first) {
Michal Hocko1c6fb1d2018-12-28 00:33:38 -0800103 struct dentry *dentry;
104 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
Ralph Campbell76a18502019-11-15 17:35:04 -0800105 pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
106 } else
107 pr_warn("%ps\n", mapping->a_ops);
Michal Hocko1c6fb1d2018-12-28 00:33:38 -0800108 }
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -0700109 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700110
Qian Cai4a55c042020-01-30 22:14:57 -0800111 pr_warn("%sflags: %#lx(%pGp)%s\n", type, page->flags, &page->flags,
112 page_cma ? " CMA" : "");
Vlastimil Babka5b57b8f2020-01-30 22:12:03 -0800113
Pavel Tatashinfc36def2018-07-03 17:02:53 -0700114hex_only:
Michal Hockoe0392cf2018-12-28 00:33:42 -0800115 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
Vlastimil Babka46e8a3a2016-12-12 16:44:35 -0800116 sizeof(unsigned long), page,
117 sizeof(struct page), false);
Matthew Wilcox (Oracle)6197ab92020-04-01 21:05:49 -0700118 if (head != page)
119 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
120 sizeof(unsigned long), head,
121 sizeof(struct page), false);
Vlastimil Babka46e8a3a2016-12-12 16:44:35 -0800122
Sasha Levin82742a32014-10-09 15:28:34 -0700123 if (reason)
Michal Hockoe0392cf2018-12-28 00:33:42 -0800124 pr_warn("page dumped because: %s\n", reason);
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700125
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800126#ifdef CONFIG_MEMCG
Pavel Tatashinfc36def2018-07-03 17:02:53 -0700127 if (!page_poisoned && page->mem_cgroup)
Michal Hockoe0392cf2018-12-28 00:33:42 -0800128 pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800129#endif
Sasha Levin82742a32014-10-09 15:28:34 -0700130}
131
132void dump_page(struct page *page, const char *reason)
133{
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700134 __dump_page(page, reason);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700135 dump_page_owner(page);
Sasha Levin82742a32014-10-09 15:28:34 -0700136}
137EXPORT_SYMBOL(dump_page);
138
139#ifdef CONFIG_DEBUG_VM
140
Sasha Levin82742a32014-10-09 15:28:34 -0700141void dump_vma(const struct vm_area_struct *vma)
142{
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800143 pr_emerg("vma %px start %px end %px\n"
144 "next %px prev %px mm %px\n"
145 "prot %lx anon_vma %px vm_ops %px\n"
146 "pgoff %lx file %px private_data %px\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700147 "flags: %#lx(%pGv)\n",
Sasha Levin82742a32014-10-09 15:28:34 -0700148 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
149 vma->vm_prev, vma->vm_mm,
150 (unsigned long)pgprot_val(vma->vm_page_prot),
151 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700152 vma->vm_file, vma->vm_private_data,
153 vma->vm_flags, &vma->vm_flags);
Sasha Levin82742a32014-10-09 15:28:34 -0700154}
155EXPORT_SYMBOL(dump_vma);
156
Sasha Levin31c9afa2014-10-09 15:28:37 -0700157void dump_mm(const struct mm_struct *mm)
158{
Linus Torvalds7a9cdeb2018-09-12 23:57:48 -1000159 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700160#ifdef CONFIG_MMU
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800161 "get_unmapped_area %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700162#endif
163 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800164 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700165 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800166 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700167 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
168 "start_brk %lx brk %lx start_stack %lx\n"
169 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800170 "binfmt %px flags %lx core_state %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700171#ifdef CONFIG_AIO
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800172 "ioctx_table %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700173#endif
174#ifdef CONFIG_MEMCG
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800175 "owner %px "
Sasha Levin31c9afa2014-10-09 15:28:37 -0700176#endif
Matthew Wilcox152a2d12018-01-04 16:17:59 -0800177 "exe_file %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700178#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400179 "notifier_subscriptions %px\n"
Sasha Levin31c9afa2014-10-09 15:28:37 -0700180#endif
181#ifdef CONFIG_NUMA_BALANCING
182 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
183#endif
Sasha Levin31c9afa2014-10-09 15:28:37 -0700184 "tlb_flush_pending %d\n"
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700185 "def_flags: %#lx(%pGv)\n",
Sasha Levin31c9afa2014-10-09 15:28:37 -0700186
Linus Torvalds7a9cdeb2018-09-12 23:57:48 -1000187 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700188#ifdef CONFIG_MMU
189 mm->get_unmapped_area,
190#endif
191 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
192 mm->pgd, atomic_read(&mm->mm_users),
193 atomic_read(&mm->mm_count),
Kirill A. Shutemovaf5b0f62017-11-15 17:35:40 -0800194 mm_pgtables_bytes(mm),
Sasha Levin31c9afa2014-10-09 15:28:37 -0700195 mm->map_count,
196 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
Qian Cai44dc1b12019-03-28 20:43:23 -0700197 (u64)atomic64_read(&mm->pinned_vm),
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800198 mm->data_vm, mm->exec_vm, mm->stack_vm,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700199 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
200 mm->start_brk, mm->brk, mm->start_stack,
201 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
202 mm->binfmt, mm->flags, mm->core_state,
203#ifdef CONFIG_AIO
204 mm->ioctx_table,
205#endif
206#ifdef CONFIG_MEMCG
207 mm->owner,
208#endif
209 mm->exe_file,
210#ifdef CONFIG_MMU_NOTIFIER
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400211 mm->notifier_subscriptions,
Sasha Levin31c9afa2014-10-09 15:28:37 -0700212#endif
213#ifdef CONFIG_NUMA_BALANCING
214 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
215#endif
Nadav Amit16af97d2017-08-10 15:23:56 -0700216 atomic_read(&mm->tlb_flush_pending),
Vlastimil Babkab8eceeb2016-03-15 14:55:59 -0700217 mm->def_flags, &mm->def_flags
218 );
Sasha Levin31c9afa2014-10-09 15:28:37 -0700219}
220
Alexander Duyckf682a972018-10-26 15:07:45 -0700221static bool page_init_poisoning __read_mostly = true;
222
223static int __init setup_vm_debug(char *str)
224{
225 bool __page_init_poisoning = true;
226
227 /*
228 * Calling vm_debug with no arguments is equivalent to requesting
229 * to enable all debugging options we can control.
230 */
231 if (*str++ != '=' || !*str)
232 goto out;
233
234 __page_init_poisoning = false;
235 if (*str == '-')
236 goto out;
237
238 while (*str) {
239 switch (tolower(*str)) {
240 case'p':
241 __page_init_poisoning = true;
242 break;
243 default:
244 pr_err("vm_debug option '%c' unknown. skipped\n",
245 *str);
246 }
247
248 str++;
249 }
250out:
251 if (page_init_poisoning && !__page_init_poisoning)
252 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
253
254 page_init_poisoning = __page_init_poisoning;
255
256 return 1;
257}
258__setup("vm_debug", setup_vm_debug);
259
260void page_init_poison(struct page *page, size_t size)
261{
262 if (page_init_poisoning)
263 memset(page, PAGE_POISON_PATTERN, size);
264}
265EXPORT_SYMBOL_GPL(page_init_poison);
Sasha Levin82742a32014-10-09 15:28:34 -0700266#endif /* CONFIG_DEBUG_VM */