blob: 1b5e0385f4195ef28501b58d37fdd3cf24564d27 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andy Whitcroftd41dee32005-06-23 00:07:54 -07002/*
3 * sparse memory mappings.
4 */
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -07007#include <linux/mmzone.h>
Mike Rapoport97ad1082018-10-30 15:09:44 -07008#include <linux/memblock.h>
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -07009#include <linux/compiler.h>
Dave Hansen0b0acbec2005-10-29 18:16:55 -070010#include <linux/highmem.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040011#include <linux/export.h>
Dave Hansen28ae55c2005-09-03 15:54:29 -070012#include <linux/spinlock.h>
Dave Hansen0b0acbec2005-10-29 18:16:55 -070013#include <linux/vmalloc.h>
Alastair D'Silva9f828832019-09-23 15:36:30 -070014#include <linux/swap.h>
15#include <linux/swapops.h>
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -070016
Yasunori Goto0c0a4a52008-04-28 02:13:34 -070017#include "internal.h"
Andy Whitcroftd41dee32005-06-23 00:07:54 -070018#include <asm/dma.h>
19
20/*
21 * Permanent SPARSEMEM data:
22 *
23 * 1) mem_section - memory sections, mem_map's for valid memory
24 */
Bob Picco3e347262005-09-03 15:54:28 -070025#ifdef CONFIG_SPARSEMEM_EXTREME
Kirill A. Shutemov83e3c482017-09-29 17:08:16 +030026struct mem_section **mem_section;
Bob Picco3e347262005-09-03 15:54:28 -070027#else
28struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080029 ____cacheline_internodealigned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070030#endif
31EXPORT_SYMBOL(mem_section);
32
Christoph Lameter89689ae2006-12-06 20:31:45 -080033#ifdef NODE_NOT_IN_PAGE_FLAGS
34/*
35 * If we did not store the node number in the page then we have to
36 * do a lookup in the section_to_node_table in order to find which
37 * node the page belongs to.
38 */
39#if MAX_NUMNODES <= 256
40static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41#else
42static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
43#endif
44
Ian Campbell33dd4e02011-07-25 17:11:51 -070045int page_to_nid(const struct page *page)
Christoph Lameter89689ae2006-12-06 20:31:45 -080046{
47 return section_to_node_table[page_to_section(page)];
48}
49EXPORT_SYMBOL(page_to_nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -070050
51static void set_section_nid(unsigned long section_nr, int nid)
52{
53 section_to_node_table[section_nr] = nid;
54}
55#else /* !NODE_NOT_IN_PAGE_FLAGS */
56static inline void set_section_nid(unsigned long section_nr, int nid)
57{
58}
Christoph Lameter89689ae2006-12-06 20:31:45 -080059#endif
60
Bob Picco3e347262005-09-03 15:54:28 -070061#ifdef CONFIG_SPARSEMEM_EXTREME
Fabian Frederickbd721ea2016-08-02 14:03:33 -070062static noinline struct mem_section __ref *sparse_index_alloc(int nid)
Bob Picco802f1922005-09-03 15:54:26 -070063{
Dave Hansen28ae55c2005-09-03 15:54:29 -070064 struct mem_section *section = NULL;
65 unsigned long array_size = SECTIONS_PER_ROOT *
66 sizeof(struct mem_section);
Bob Picco802f1922005-09-03 15:54:26 -070067
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070068 if (slab_is_available()) {
Michal Hockob95046b2017-09-06 16:20:41 -070069 section = kzalloc_node(array_size, GFP_KERNEL, nid);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070070 } else {
Mike Rapoport7e1c4e22018-10-30 15:09:57 -070071 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72 nid);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070073 if (!section)
74 panic("%s: Failed to allocate %lu bytes nid=%d\n",
75 __func__, array_size, nid);
76 }
Bob Picco3e347262005-09-03 15:54:28 -070077
Dave Hansen28ae55c2005-09-03 15:54:29 -070078 return section;
Bob Picco802f1922005-09-03 15:54:26 -070079}
Dave Hansen28ae55c2005-09-03 15:54:29 -070080
Yasunori Gotoa3142c82007-05-08 00:23:07 -070081static int __meminit sparse_index_init(unsigned long section_nr, int nid)
Dave Hansen28ae55c2005-09-03 15:54:29 -070082{
Dave Hansen28ae55c2005-09-03 15:54:29 -070083 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
84 struct mem_section *section;
Dave Hansen28ae55c2005-09-03 15:54:29 -070085
Dan Williamsba72b4c2019-07-18 15:58:26 -070086 /*
87 * An existing section is possible in the sub-section hotplug
88 * case. First hot-add instantiates, follow-on hot-add reuses
89 * the existing section.
90 *
91 * The mem_hotplug_lock resolves the apparent race below.
92 */
Dave Hansen28ae55c2005-09-03 15:54:29 -070093 if (mem_section[root])
Dan Williamsba72b4c2019-07-18 15:58:26 -070094 return 0;
Dave Hansen28ae55c2005-09-03 15:54:29 -070095
96 section = sparse_index_alloc(nid);
WANG Congaf0cd5a2007-12-17 16:19:58 -080097 if (!section)
98 return -ENOMEM;
Dave Hansen28ae55c2005-09-03 15:54:29 -070099
100 mem_section[root] = section;
Gavin Shanc1c95182012-07-31 16:46:06 -0700101
Zhang Yanfei9d1936c2013-05-17 22:10:38 +0800102 return 0;
Dave Hansen28ae55c2005-09-03 15:54:29 -0700103}
104#else /* !SPARSEMEM_EXTREME */
105static inline int sparse_index_init(unsigned long section_nr, int nid)
106{
107 return 0;
108}
109#endif
110
Zhou Chengming91fd8b92016-07-28 15:48:35 -0700111#ifdef CONFIG_SPARSEMEM_EXTREME
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700112unsigned long __section_nr(struct mem_section *ms)
Dave Hansen4ca644d2005-10-29 18:16:51 -0700113{
114 unsigned long root_nr;
Kirill A. Shutemov83e3c482017-09-29 17:08:16 +0300115 struct mem_section *root = NULL;
Dave Hansen4ca644d2005-10-29 18:16:51 -0700116
Mike Kravetz12783b02006-05-20 15:00:05 -0700117 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
118 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
Dave Hansen4ca644d2005-10-29 18:16:51 -0700119 if (!root)
120 continue;
121
122 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
123 break;
124 }
125
Kirill A. Shutemov83e3c482017-09-29 17:08:16 +0300126 VM_BUG_ON(!root);
Gavin Shandb36a462012-07-31 16:46:04 -0700127
Dave Hansen4ca644d2005-10-29 18:16:51 -0700128 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
129}
Zhou Chengming91fd8b92016-07-28 15:48:35 -0700130#else
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700131unsigned long __section_nr(struct mem_section *ms)
Zhou Chengming91fd8b92016-07-28 15:48:35 -0700132{
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700133 return (unsigned long)(ms - mem_section[0]);
Zhou Chengming91fd8b92016-07-28 15:48:35 -0700134}
135#endif
Dave Hansen4ca644d2005-10-29 18:16:51 -0700136
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700137/*
138 * During early boot, before section_mem_map is used for an actual
139 * mem_map, we use section_mem_map to store the section's NUMA
140 * node. This keeps us from having to use another data structure. The
141 * node information is cleared just before we store the real mem_map.
142 */
143static inline unsigned long sparse_encode_early_nid(int nid)
144{
145 return (nid << SECTION_NID_SHIFT);
146}
147
148static inline int sparse_early_nid(struct mem_section *section)
149{
150 return (section->section_mem_map >> SECTION_NID_SHIFT);
151}
152
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700153/* Validate the physical addressing limitations of the model */
154void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
155 unsigned long *end_pfn)
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700156{
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700157 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700158
Ingo Molnarbead9a32008-04-16 01:40:00 +0200159 /*
160 * Sanity checks - do not allow an architecture to pass
161 * in larger pfns than the maximum scope of sparsemem:
162 */
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700163 if (*start_pfn > max_sparsemem_pfn) {
164 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
165 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
166 *start_pfn, *end_pfn, max_sparsemem_pfn);
167 WARN_ON_ONCE(1);
168 *start_pfn = max_sparsemem_pfn;
169 *end_pfn = max_sparsemem_pfn;
Cyrill Gorcunovef161a92009-03-31 15:19:25 -0700170 } else if (*end_pfn > max_sparsemem_pfn) {
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn, *end_pfn, max_sparsemem_pfn);
174 WARN_ON_ONCE(1);
175 *end_pfn = max_sparsemem_pfn;
176 }
177}
178
Dave Hansenc4e1be92017-07-06 15:36:44 -0700179/*
180 * There are a number of times that we loop over NR_MEM_SECTIONS,
181 * looking for section_present() on each. But, when we have very
182 * large physical address spaces, NR_MEM_SECTIONS can also be
183 * very large which makes the loops quite long.
184 *
185 * Keeping track of this gives us an easy way to break out of
186 * those loops early.
187 */
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700188unsigned long __highest_present_section_nr;
Dave Hansenc4e1be92017-07-06 15:36:44 -0700189static void section_mark_present(struct mem_section *ms)
190{
David Hildenbrand2491f0a2019-07-18 15:57:37 -0700191 unsigned long section_nr = __section_nr(ms);
Dave Hansenc4e1be92017-07-06 15:36:44 -0700192
193 if (section_nr > __highest_present_section_nr)
194 __highest_present_section_nr = section_nr;
195
196 ms->section_mem_map |= SECTION_MARKED_PRESENT;
197}
198
Dave Hansenc4e1be92017-07-06 15:36:44 -0700199#define for_each_present_section_nr(start, section_nr) \
200 for (section_nr = next_present_section_nr(start-1); \
Qian Caid7780152019-03-05 15:50:11 -0800201 ((section_nr != -1) && \
Dave Hansenc4e1be92017-07-06 15:36:44 -0700202 (section_nr <= __highest_present_section_nr)); \
203 section_nr = next_present_section_nr(section_nr))
204
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700205static inline unsigned long first_present_section_nr(void)
206{
207 return next_present_section_nr(-1);
208}
209
Baoquan He0a9f9f62020-04-06 20:07:06 -0700210#ifdef CONFIG_SPARSEMEM_VMEMMAP
Yi Wang758b8db2019-10-06 17:58:12 -0700211static void subsection_mask_set(unsigned long *map, unsigned long pfn,
Dan Williamsf46edbd2019-07-18 15:58:04 -0700212 unsigned long nr_pages)
213{
214 int idx = subsection_map_index(pfn);
215 int end = subsection_map_index(pfn + nr_pages - 1);
216
217 bitmap_set(map, idx, end - idx + 1);
218}
219
220void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
221{
222 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
Dan Williams9a845032019-07-18 15:58:43 -0700223 unsigned long nr, start_sec = pfn_to_section_nr(pfn);
Dan Williamsf46edbd2019-07-18 15:58:04 -0700224
225 if (!nr_pages)
226 return;
227
Dan Williams9a845032019-07-18 15:58:43 -0700228 for (nr = start_sec; nr <= end_sec; nr++) {
Dan Williamsf46edbd2019-07-18 15:58:04 -0700229 struct mem_section *ms;
230 unsigned long pfns;
231
232 pfns = min(nr_pages, PAGES_PER_SECTION
233 - (pfn & ~PAGE_SECTION_MASK));
Dan Williams9a845032019-07-18 15:58:43 -0700234 ms = __nr_to_section(nr);
Dan Williamsf46edbd2019-07-18 15:58:04 -0700235 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
236
Dan Williams9a845032019-07-18 15:58:43 -0700237 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
Dan Williamsf46edbd2019-07-18 15:58:04 -0700238 pfns, subsection_map_index(pfn),
239 subsection_map_index(pfn + pfns - 1));
240
241 pfn += pfns;
242 nr_pages -= pfns;
243 }
244}
Baoquan He0a9f9f62020-04-06 20:07:06 -0700245#else
246void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
247{
248}
249#endif
Dan Williamsf46edbd2019-07-18 15:58:04 -0700250
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700251/* Record a memory area against a node. */
252void __init memory_present(int nid, unsigned long start, unsigned long end)
253{
254 unsigned long pfn;
Ingo Molnarbead9a32008-04-16 01:40:00 +0200255
Kirill A. Shutemov629a3592017-11-07 11:33:37 +0300256#ifdef CONFIG_SPARSEMEM_EXTREME
257 if (unlikely(!mem_section)) {
258 unsigned long size, align;
259
Baoquan Hed09cfbb2018-01-04 16:18:06 -0800260 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
Kirill A. Shutemov629a3592017-11-07 11:33:37 +0300261 align = 1 << (INTERNODE_CACHE_SHIFT);
Mike Rapoporteb31d552018-10-30 15:08:04 -0700262 mem_section = memblock_alloc(size, align);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700263 if (!mem_section)
264 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
265 __func__, size, align);
Kirill A. Shutemov629a3592017-11-07 11:33:37 +0300266 }
267#endif
268
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700269 start &= PAGE_SECTION_MASK;
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700270 mminit_validate_memmodel_limits(&start, &end);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700271 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
272 unsigned long section = pfn_to_section_nr(pfn);
Bob Picco802f1922005-09-03 15:54:26 -0700273 struct mem_section *ms;
274
275 sparse_index_init(section, nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -0700276 set_section_nid(section, nid);
Bob Picco802f1922005-09-03 15:54:26 -0700277
278 ms = __nr_to_section(section);
Dave Hansenc4e1be92017-07-06 15:36:44 -0700279 if (!ms->section_mem_map) {
Michal Hocko2d070ea2017-07-06 15:37:56 -0700280 ms->section_mem_map = sparse_encode_early_nid(nid) |
281 SECTION_IS_ONLINE;
Dave Hansenc4e1be92017-07-06 15:36:44 -0700282 section_mark_present(ms);
283 }
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700284 }
285}
286
287/*
Logan Gunthorpe9def36e2018-12-14 14:16:57 -0800288 * Mark all memblocks as present using memory_present(). This is a
Ethon Paul2e6787d2020-06-04 16:49:37 -0700289 * convenience function that is useful for a number of arches
Logan Gunthorpe9def36e2018-12-14 14:16:57 -0800290 * to mark all of the systems memory as present during initialization.
291 */
292void __init memblocks_present(void)
293{
294 struct memblock_region *reg;
295
296 for_each_memblock(memory, reg) {
297 memory_present(memblock_get_region_node(reg),
298 memblock_region_memory_base_pfn(reg),
299 memblock_region_memory_end_pfn(reg));
300 }
301}
302
303/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700304 * Subtle, we encode the real pfn into the mem_map such that
305 * the identity pfn - section_mem_map will return the actual
306 * physical page frame number.
307 */
308static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
309{
Petr Tesarikdef9b712018-01-31 16:20:26 -0800310 unsigned long coded_mem_map =
311 (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
312 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
313 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
314 return coded_mem_map;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700315}
316
317/*
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700318 * Decode mem_map from the coded memmap
Andy Whitcroft29751f62005-06-23 00:08:00 -0700319 */
Andy Whitcroft29751f62005-06-23 00:08:00 -0700320struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
321{
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700322 /* mask off the extra low bits of information */
323 coded_mem_map &= SECTION_MAP_MASK;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700324 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
325}
326
Oscar Salvador4e409872018-08-17 15:47:14 -0700327static void __meminit sparse_init_one_section(struct mem_section *ms,
Mel Gorman5c0e3062007-10-16 01:25:56 -0700328 unsigned long pnum, struct page *mem_map,
Dan Williams326e1b82019-07-18 15:58:00 -0700329 struct mem_section_usage *usage, unsigned long flags)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700330{
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700331 ms->section_mem_map &= ~SECTION_MAP_MASK;
Dan Williams326e1b82019-07-18 15:58:00 -0700332 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
333 | SECTION_HAS_MEM_MAP | flags;
Dan Williamsf1eca352019-07-18 15:57:57 -0700334 ms->usage = usage;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700335}
336
Dan Williamsf1eca352019-07-18 15:57:57 -0700337static unsigned long usemap_size(void)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700338{
Wei Yang60a7a882017-05-03 14:53:51 -0700339 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
Mel Gorman5c0e3062007-10-16 01:25:56 -0700340}
341
Dan Williamsf1eca352019-07-18 15:57:57 -0700342size_t mem_section_usage_size(void)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700343{
Dan Williamsf1eca352019-07-18 15:57:57 -0700344 return sizeof(struct mem_section_usage) + usemap_size();
Mel Gorman5c0e3062007-10-16 01:25:56 -0700345}
Mel Gorman5c0e3062007-10-16 01:25:56 -0700346
Yasunori Goto48c90682008-07-23 21:28:15 -0700347#ifdef CONFIG_MEMORY_HOTREMOVE
Dan Williamsf1eca352019-07-18 15:57:57 -0700348static struct mem_section_usage * __init
Yinghai Lua4322e1b2010-02-10 01:20:21 -0800349sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
Johannes Weiner238305b2012-05-29 15:06:36 -0700350 unsigned long size)
Yasunori Goto48c90682008-07-23 21:28:15 -0700351{
Dan Williamsf1eca352019-07-18 15:57:57 -0700352 struct mem_section_usage *usage;
Yinghai Lu99ab7b12012-07-11 14:02:53 -0700353 unsigned long goal, limit;
Yinghai Lu99ab7b12012-07-11 14:02:53 -0700354 int nid;
Yasunori Goto48c90682008-07-23 21:28:15 -0700355 /*
356 * A page may contain usemaps for other sections preventing the
357 * page being freed and making a section unremovable while
Li Zhongc800bcd2014-03-31 16:41:58 +0800358 * other sections referencing the usemap remain active. Similarly,
Yasunori Goto48c90682008-07-23 21:28:15 -0700359 * a pgdat can prevent a section being removed. If section A
360 * contains a pgdat and section B contains the usemap, both
361 * sections become inter-dependent. This allocates usemaps
362 * from the same section as the pgdat where possible to avoid
363 * this problem.
364 */
Yinghai Lu07b4e2b2012-07-11 14:02:51 -0700365 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
Yinghai Lu99ab7b12012-07-11 14:02:53 -0700366 limit = goal + (1UL << PA_SECTION_SHIFT);
367 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
368again:
Dan Williamsf1eca352019-07-18 15:57:57 -0700369 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
370 if (!usage && limit) {
Yinghai Lu99ab7b12012-07-11 14:02:53 -0700371 limit = 0;
372 goto again;
373 }
Dan Williamsf1eca352019-07-18 15:57:57 -0700374 return usage;
Yasunori Goto48c90682008-07-23 21:28:15 -0700375}
376
Dan Williamsf1eca352019-07-18 15:57:57 -0700377static void __init check_usemap_section_nr(int nid,
378 struct mem_section_usage *usage)
Yasunori Goto48c90682008-07-23 21:28:15 -0700379{
380 unsigned long usemap_snr, pgdat_snr;
Kirill A. Shutemov83e3c482017-09-29 17:08:16 +0300381 static unsigned long old_usemap_snr;
382 static unsigned long old_pgdat_snr;
Yasunori Goto48c90682008-07-23 21:28:15 -0700383 struct pglist_data *pgdat = NODE_DATA(nid);
384 int usemap_nid;
385
Kirill A. Shutemov83e3c482017-09-29 17:08:16 +0300386 /* First call */
387 if (!old_usemap_snr) {
388 old_usemap_snr = NR_MEM_SECTIONS;
389 old_pgdat_snr = NR_MEM_SECTIONS;
390 }
391
Dan Williamsf1eca352019-07-18 15:57:57 -0700392 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
Yasunori Goto48c90682008-07-23 21:28:15 -0700393 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
394 if (usemap_snr == pgdat_snr)
395 return;
396
397 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
398 /* skip redundant message */
399 return;
400
401 old_usemap_snr = usemap_snr;
402 old_pgdat_snr = pgdat_snr;
403
404 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
405 if (usemap_nid != nid) {
Joe Perches11705322016-03-17 14:19:50 -0700406 pr_info("node %d must be removed before remove section %ld\n",
407 nid, usemap_snr);
Yasunori Goto48c90682008-07-23 21:28:15 -0700408 return;
409 }
410 /*
411 * There is a circular dependency.
412 * Some platforms allow un-removable section because they will just
413 * gather other removable sections for dynamic partitioning.
414 * Just notify un-removable section's number here.
415 */
Joe Perches11705322016-03-17 14:19:50 -0700416 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
417 usemap_snr, pgdat_snr, nid);
Yasunori Goto48c90682008-07-23 21:28:15 -0700418}
419#else
Dan Williamsf1eca352019-07-18 15:57:57 -0700420static struct mem_section_usage * __init
Yinghai Lua4322e1b2010-02-10 01:20:21 -0800421sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
Johannes Weiner238305b2012-05-29 15:06:36 -0700422 unsigned long size)
Yasunori Goto48c90682008-07-23 21:28:15 -0700423{
Mike Rapoport26fb3da2019-03-11 23:30:42 -0700424 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
Yasunori Goto48c90682008-07-23 21:28:15 -0700425}
426
Dan Williamsf1eca352019-07-18 15:57:57 -0700427static void __init check_usemap_section_nr(int nid,
428 struct mem_section_usage *usage)
Yasunori Goto48c90682008-07-23 21:28:15 -0700429{
430}
431#endif /* CONFIG_MEMORY_HOTREMOVE */
432
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700433#ifdef CONFIG_SPARSEMEM_VMEMMAP
Pavel Tatashinafda57b2018-08-17 15:49:30 -0700434static unsigned long __init section_map_size(void)
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700435{
436 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
437}
438
439#else
Pavel Tatashinafda57b2018-08-17 15:49:30 -0700440static unsigned long __init section_map_size(void)
Pavel Tatashine131c062018-08-17 15:49:26 -0700441{
442 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
443}
444
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700445struct page __init *__populate_section_memmap(unsigned long pfn,
446 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700447{
Pavel Tatashine131c062018-08-17 15:49:26 -0700448 unsigned long size = section_map_size();
449 struct page *map = sparse_buffer_alloc(size);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700450 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
Andy Whitcroft29751f62005-06-23 00:08:00 -0700451
Pavel Tatashine131c062018-08-17 15:49:26 -0700452 if (map)
453 return map;
454
Michal Hocko09dbcf42019-11-30 17:54:27 -0800455 map = memblock_alloc_try_nid_raw(size, size, addr,
Mike Rapoport97ad1082018-10-30 15:09:44 -0700456 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700457 if (!map)
458 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
459 __func__, size, PAGE_SIZE, nid, &addr);
460
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700461 return map;
462}
463#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
464
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700465static void *sparsemap_buf __meminitdata;
466static void *sparsemap_buf_end __meminitdata;
467
Lecopzer Chenae831892019-09-23 15:36:21 -0700468static inline void __meminit sparse_buffer_free(unsigned long size)
469{
470 WARN_ON(!sparsemap_buf || size == 0);
471 memblock_free_early(__pa(sparsemap_buf), size);
472}
473
Pavel Tatashinafda57b2018-08-17 15:49:30 -0700474static void __init sparse_buffer_init(unsigned long size, int nid)
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700475{
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700476 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700477 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
Michal Hocko09dbcf42019-11-30 17:54:27 -0800478 /*
479 * Pre-allocated buffer is mainly used by __populate_section_memmap
480 * and we want it to be properly aligned to the section size - this is
481 * especially the case for VMEMMAP which maps memmap to PMDs
482 */
Yunfeng Ye0ac398b2019-11-30 17:56:27 -0800483 sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
Michal Hocko09dbcf42019-11-30 17:54:27 -0800484 addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700485 sparsemap_buf_end = sparsemap_buf + size;
486}
487
Pavel Tatashinafda57b2018-08-17 15:49:30 -0700488static void __init sparse_buffer_fini(void)
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700489{
490 unsigned long size = sparsemap_buf_end - sparsemap_buf;
491
492 if (sparsemap_buf && size > 0)
Lecopzer Chenae831892019-09-23 15:36:21 -0700493 sparse_buffer_free(size);
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700494 sparsemap_buf = NULL;
495}
496
497void * __meminit sparse_buffer_alloc(unsigned long size)
498{
499 void *ptr = NULL;
500
501 if (sparsemap_buf) {
Lecopzer Chendb57e982019-09-23 15:36:24 -0700502 ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700503 if (ptr + size > sparsemap_buf_end)
504 ptr = NULL;
Lecopzer Chenae831892019-09-23 15:36:21 -0700505 else {
506 /* Free redundant aligned space */
507 if ((unsigned long)(ptr - sparsemap_buf) > 0)
508 sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700509 sparsemap_buf = ptr + size;
Lecopzer Chenae831892019-09-23 15:36:21 -0700510 }
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -0700511 }
512 return ptr;
513}
514
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -0700515void __weak __meminit vmemmap_populate_print_last(void)
Yinghai Luc2b91e22008-04-12 01:19:24 -0700516{
517}
Yinghai Lua4322e1b2010-02-10 01:20:21 -0800518
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700519/*
520 * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
521 * And number of present sections in this node is map_count.
522 */
523static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
524 unsigned long pnum_end,
525 unsigned long map_count)
526{
Dan Williamsf1eca352019-07-18 15:57:57 -0700527 struct mem_section_usage *usage;
528 unsigned long pnum;
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700529 struct page *map;
530
Dan Williamsf1eca352019-07-18 15:57:57 -0700531 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
532 mem_section_usage_size() * map_count);
533 if (!usage) {
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700534 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
535 goto failed;
536 }
537 sparse_buffer_init(map_count * section_map_size(), nid);
538 for_each_present_section_nr(pnum_begin, pnum) {
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700539 unsigned long pfn = section_nr_to_pfn(pnum);
540
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700541 if (pnum >= pnum_end)
542 break;
543
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700544 map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
545 nid, NULL);
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700546 if (!map) {
547 pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
548 __func__, nid);
549 pnum_begin = pnum;
550 goto failed;
551 }
Dan Williamsf1eca352019-07-18 15:57:57 -0700552 check_usemap_section_nr(nid, usage);
Dan Williams326e1b82019-07-18 15:58:00 -0700553 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
554 SECTION_IS_EARLY);
Dan Williamsf1eca352019-07-18 15:57:57 -0700555 usage = (void *) usage + mem_section_usage_size();
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700556 }
557 sparse_buffer_fini();
558 return;
559failed:
560 /* We failed to allocate, mark all the following pnums as not present */
561 for_each_present_section_nr(pnum_begin, pnum) {
562 struct mem_section *ms;
563
564 if (pnum >= pnum_end)
565 break;
566 ms = __nr_to_section(pnum);
567 ms->section_mem_map = 0;
568 }
569}
570
571/*
572 * Allocate the accumulated non-linear sections, allocate a mem_map
573 * for each and record the physical to section mapping.
574 */
Pavel Tatashin2a3cb8b2018-08-17 15:49:37 -0700575void __init sparse_init(void)
Pavel Tatashin85c77f72018-08-17 15:49:33 -0700576{
577 unsigned long pnum_begin = first_present_section_nr();
578 int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
579 unsigned long pnum_end, map_count = 1;
580
581 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
582 set_pageblock_order();
583
584 for_each_present_section_nr(pnum_begin + 1, pnum_end) {
585 int nid = sparse_early_nid(__nr_to_section(pnum_end));
586
587 if (nid == nid_begin) {
588 map_count++;
589 continue;
590 }
591 /* Init node with sections in range [pnum_begin, pnum_end) */
592 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
593 nid_begin = nid;
594 pnum_begin = pnum_end;
595 map_count = 1;
596 }
597 /* cover the last node */
598 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
599 vmemmap_populate_print_last();
600}
601
Stephen Rothwell193faea2007-06-08 13:46:51 -0700602#ifdef CONFIG_MEMORY_HOTPLUG
Michal Hocko2d070ea2017-07-06 15:37:56 -0700603
604/* Mark all memory sections within the pfn range as online */
605void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
606{
607 unsigned long pfn;
608
609 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
Michal Hockob4ccec42017-09-08 16:13:15 -0700610 unsigned long section_nr = pfn_to_section_nr(pfn);
Michal Hocko2d070ea2017-07-06 15:37:56 -0700611 struct mem_section *ms;
612
613 /* onlining code should never touch invalid ranges */
614 if (WARN_ON(!valid_section_nr(section_nr)))
615 continue;
616
617 ms = __nr_to_section(section_nr);
618 ms->section_mem_map |= SECTION_IS_ONLINE;
619 }
620}
621
622#ifdef CONFIG_MEMORY_HOTREMOVE
Qian Cai9b7ea462019-03-28 20:43:34 -0700623/* Mark all memory sections within the pfn range as offline */
Michal Hocko2d070ea2017-07-06 15:37:56 -0700624void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
625{
626 unsigned long pfn;
627
628 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
Pavel Tatashin27227c72018-05-11 16:01:50 -0700629 unsigned long section_nr = pfn_to_section_nr(pfn);
Michal Hocko2d070ea2017-07-06 15:37:56 -0700630 struct mem_section *ms;
631
632 /*
633 * TODO this needs some double checking. Offlining code makes
634 * sure to check pfn_valid but those checks might be just bogus
635 */
636 if (WARN_ON(!valid_section_nr(section_nr)))
637 continue;
638
639 ms = __nr_to_section(section_nr);
640 ms->section_mem_map &= ~SECTION_IS_ONLINE;
641 }
642}
643#endif
644
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700645#ifdef CONFIG_SPARSEMEM_VMEMMAP
Ilya Leoshkevich030eab4f2019-11-30 17:54:24 -0800646static struct page * __meminit populate_section_memmap(unsigned long pfn,
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700647 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700648{
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700649 return __populate_section_memmap(pfn, nr_pages, nid, altmap);
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700650}
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700651
652static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100653 struct vmem_altmap *altmap)
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700654{
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700655 unsigned long start = (unsigned long) pfn_to_page(pfn);
656 unsigned long end = start + nr_pages * sizeof(struct page);
Johannes Weiner0aad8182013-04-29 15:07:50 -0700657
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100658 vmemmap_free(start, end, altmap);
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700659}
Zhang Yanfei81556b02013-11-12 15:07:43 -0800660static void free_map_bootmem(struct page *memmap)
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700661{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700662 unsigned long start = (unsigned long)memmap;
Zhang Yanfei81556b02013-11-12 15:07:43 -0800663 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
Johannes Weiner0aad8182013-04-29 15:07:50 -0700664
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100665 vmemmap_free(start, end, NULL);
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700666}
Baoquan He6ecb0fc2020-04-06 20:07:13 -0700667
668static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
669{
670 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
671 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
672 struct mem_section *ms = __pfn_to_section(pfn);
673 unsigned long *subsection_map = ms->usage
674 ? &ms->usage->subsection_map[0] : NULL;
675
676 subsection_mask_set(map, pfn, nr_pages);
677 if (subsection_map)
678 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
679
680 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
681 "section already deactivated (%#lx + %ld)\n",
682 pfn, nr_pages))
683 return -EINVAL;
684
685 bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
686 return 0;
687}
688
689static bool is_subsection_map_empty(struct mem_section *ms)
690{
691 return bitmap_empty(&ms->usage->subsection_map[0],
692 SUBSECTIONS_PER_SECTION);
693}
694
695static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
696{
697 struct mem_section *ms = __pfn_to_section(pfn);
698 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
699 unsigned long *subsection_map;
700 int rc = 0;
701
702 subsection_mask_set(map, pfn, nr_pages);
703
704 subsection_map = &ms->usage->subsection_map[0];
705
706 if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
707 rc = -EINVAL;
708 else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
709 rc = -EEXIST;
710 else
711 bitmap_or(subsection_map, map, subsection_map,
712 SUBSECTIONS_PER_SECTION);
713
714 return rc;
715}
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700716#else
Ilya Leoshkevich030eab4f2019-11-30 17:54:24 -0800717struct page * __meminit populate_section_memmap(unsigned long pfn,
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700718 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700719{
Baoquan He40271492020-04-01 21:09:34 -0700720 return kvmalloc_node(array_size(sizeof(struct page),
721 PAGES_PER_SECTION), GFP_KERNEL, nid);
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700722}
723
Dan Williamse9c0a3f02019-07-18 15:58:11 -0700724static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100725 struct vmem_altmap *altmap)
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700726{
Baoquan He3af776f2020-04-01 21:09:31 -0700727 kvfree(pfn_to_page(pfn));
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700728}
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700729
Zhang Yanfei81556b02013-11-12 15:07:43 -0800730static void free_map_bootmem(struct page *memmap)
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700731{
732 unsigned long maps_section_nr, removing_section_nr, i;
Zhang Yanfei81556b02013-11-12 15:07:43 -0800733 unsigned long magic, nr_pages;
Jianguo Wuae64ffc2012-11-29 13:54:21 -0800734 struct page *page = virt_to_page(memmap);
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700735
Zhang Yanfei81556b02013-11-12 15:07:43 -0800736 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
737 >> PAGE_SHIFT;
738
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700739 for (i = 0; i < nr_pages; i++, page++) {
Yasuaki Ishimatsuddffe982017-02-22 15:45:13 -0800740 magic = (unsigned long) page->freelist;
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700741
742 BUG_ON(magic == NODE_INFO);
743
744 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
Yasuaki Ishimatsu857e5222017-02-22 15:45:10 -0800745 removing_section_nr = page_private(page);
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700746
747 /*
748 * When this function is called, the removing section is
749 * logical offlined state. This means all pages are isolated
750 * from page allocator. If removing section's memmap is placed
751 * on the same section, it must not be freed.
752 * If it is freed, page allocator may allocate it which will
753 * be removed physically soon.
754 */
755 if (maps_section_nr != removing_section_nr)
756 put_page_bootmem(page);
757 }
758}
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700759
Baoquan He0a9f9f62020-04-06 20:07:06 -0700760static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
761{
762 return 0;
763}
764
765static bool is_subsection_map_empty(struct mem_section *ms)
766{
767 return true;
768}
Baoquan He6ecb0fc2020-04-06 20:07:13 -0700769
770static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
771{
772 return 0;
773}
774#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Baoquan He37bc1502020-04-06 20:07:03 -0700775
Baoquan He95a5a342020-04-06 20:07:09 -0700776/*
777 * To deactivate a memory region, there are 3 cases to handle across
778 * two configurations (SPARSEMEM_VMEMMAP={y,n}):
779 *
780 * 1. deactivation of a partial hot-added section (only possible in
781 * the SPARSEMEM_VMEMMAP=y case).
782 * a) section was present at memory init.
783 * b) section was hot-added post memory init.
784 * 2. deactivation of a complete hot-added section.
785 * 3. deactivation of a complete section from memory init.
786 *
787 * For 1, when subsection_map does not empty we will not be freeing the
788 * usage map, but still need to free the vmemmap range.
789 *
790 * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
791 */
Baoquan He37bc1502020-04-06 20:07:03 -0700792static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
793 struct vmem_altmap *altmap)
794{
795 struct mem_section *ms = __pfn_to_section(pfn);
796 bool section_is_early = early_section(ms);
797 struct page *memmap = NULL;
798 bool empty;
799
800 if (clear_subsection_map(pfn, nr_pages))
801 return;
Baoquan He95a5a342020-04-06 20:07:09 -0700802
Baoquan He37bc1502020-04-06 20:07:03 -0700803 empty = is_subsection_map_empty(ms);
Baoquan Hed41e2f32020-03-21 18:22:13 -0700804 if (empty) {
Dan Williamsba72b4c2019-07-18 15:58:26 -0700805 unsigned long section_nr = pfn_to_section_nr(pfn);
806
David Hildenbrand8068df32020-01-13 16:29:07 -0800807 /*
808 * When removing an early section, the usage map is kept (as the
809 * usage maps of other sections fall into the same page). It
810 * will be re-used when re-adding the section - which is then no
811 * longer an early section. If the usage map is PageReserved, it
812 * was allocated during boot.
813 */
814 if (!PageReserved(virt_to_page(ms->usage))) {
Dan Williamsba72b4c2019-07-18 15:58:26 -0700815 kfree(ms->usage);
816 ms->usage = NULL;
817 }
818 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
Aneesh Kumar K.Vb943f042020-03-28 19:17:29 -0700819 /*
820 * Mark the section invalid so that valid_section()
821 * return false. This prevents code from dereferencing
822 * ms->usage array.
823 */
824 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
Dan Williamsba72b4c2019-07-18 15:58:26 -0700825 }
826
Wei Yangef69bc92020-08-06 23:23:55 -0700827 /*
828 * The memmap of early sections is always fully populated. See
829 * section_activate() and pfn_valid() .
830 */
831 if (!section_is_early)
Dan Williamsba72b4c2019-07-18 15:58:26 -0700832 depopulate_section_memmap(pfn, nr_pages, altmap);
Wei Yangef69bc92020-08-06 23:23:55 -0700833 else if (memmap)
834 free_map_bootmem(memmap);
Baoquan Hed41e2f32020-03-21 18:22:13 -0700835
836 if (empty)
837 ms->section_mem_map = (unsigned long)NULL;
Dan Williamsba72b4c2019-07-18 15:58:26 -0700838}
839
Baoquan He5d872552020-04-06 20:07:00 -0700840static struct page * __meminit section_activate(int nid, unsigned long pfn,
841 unsigned long nr_pages, struct vmem_altmap *altmap)
842{
843 struct mem_section *ms = __pfn_to_section(pfn);
844 struct mem_section_usage *usage = NULL;
845 struct page *memmap;
846 int rc = 0;
847
848 if (!ms->usage) {
849 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
850 if (!usage)
851 return ERR_PTR(-ENOMEM);
852 ms->usage = usage;
853 }
854
855 rc = fill_subsection_map(pfn, nr_pages);
Dan Williamsba72b4c2019-07-18 15:58:26 -0700856 if (rc) {
857 if (usage)
858 ms->usage = NULL;
859 kfree(usage);
860 return ERR_PTR(rc);
861 }
862
863 /*
864 * The early init code does not consider partially populated
865 * initial sections, it simply assumes that memory will never be
866 * referenced. If we hot-add memory into such a section then we
867 * do not need to populate the memmap and can simply reuse what
868 * is already there.
869 */
870 if (nr_pages < PAGES_PER_SECTION && early_section(ms))
871 return pfn_to_page(pfn);
872
873 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
874 if (!memmap) {
875 section_deactivate(pfn, nr_pages, altmap);
876 return ERR_PTR(-ENOMEM);
877 }
878
879 return memmap;
880}
881
Baoquan He7567cfc2019-05-13 17:19:32 -0700882/**
Dan Williamsba72b4c2019-07-18 15:58:26 -0700883 * sparse_add_section - add a memory section, or populate an existing one
Baoquan He7567cfc2019-05-13 17:19:32 -0700884 * @nid: The node to add section on
885 * @start_pfn: start pfn of the memory range
Dan Williamsba72b4c2019-07-18 15:58:26 -0700886 * @nr_pages: number of pfns to add in the section
Baoquan He7567cfc2019-05-13 17:19:32 -0700887 * @altmap: device page map
888 *
889 * This is only intended for hotplug.
890 *
Baoquan He95a5a342020-04-06 20:07:09 -0700891 * Note that only VMEMMAP supports sub-section aligned hotplug,
892 * the proper alignment and size are gated by check_pfn_span().
893 *
894 *
Baoquan He7567cfc2019-05-13 17:19:32 -0700895 * Return:
896 * * 0 - On success.
897 * * -EEXIST - Section has been present.
898 * * -ENOMEM - Out of memory.
Andy Whitcroft29751f62005-06-23 00:08:00 -0700899 */
Dan Williams7ea62162019-07-18 15:58:22 -0700900int __meminit sparse_add_section(int nid, unsigned long start_pfn,
901 unsigned long nr_pages, struct vmem_altmap *altmap)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700902{
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700903 unsigned long section_nr = pfn_to_section_nr(start_pfn);
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700904 struct mem_section *ms;
905 struct page *memmap;
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700906 int ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700907
Wei Yang4e0d2e72018-12-28 00:37:06 -0800908 ret = sparse_index_init(section_nr, nid);
Dan Williamsba72b4c2019-07-18 15:58:26 -0700909 if (ret < 0)
WANG Congbbd06822007-12-17 16:19:59 -0800910 return ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700911
Dan Williamsba72b4c2019-07-18 15:58:26 -0700912 memmap = section_activate(nid, start_pfn, nr_pages, altmap);
913 if (IS_ERR(memmap))
914 return PTR_ERR(memmap);
Mel Gorman5c0e3062007-10-16 01:25:56 -0700915
Pavel Tatashind0dc12e2018-04-05 16:23:00 -0700916 /*
917 * Poison uninitialized struct pages in order to catch invalid flags
918 * combinations.
919 */
Wei Yang18e19f12020-02-20 20:04:27 -0800920 page_init_poison(memmap, sizeof(struct page) * nr_pages);
Wen Congyang3ac19f82012-12-11 16:00:59 -0800921
Wei Yangc1cbc3e2019-09-23 15:36:27 -0700922 ms = __nr_to_section(section_nr);
Wei Yang26f26be2019-07-18 15:57:21 -0700923 set_section_nid(section_nr, nid);
Dave Hansenc4e1be92017-07-06 15:36:44 -0700924 section_mark_present(ms);
Dave Hansen0b0acbec2005-10-29 18:16:55 -0700925
Dan Williamsba72b4c2019-07-18 15:58:26 -0700926 /* Align memmap to section boundary in the subsection case */
927 if (section_nr_to_pfn(section_nr) != start_pfn)
Wei Yang4627d762020-04-01 21:09:24 -0700928 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
Dan Williamsba72b4c2019-07-18 15:58:26 -0700929 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
930
931 return 0;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700932}
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700933
Wen Congyang95a47742012-12-11 16:00:47 -0800934#ifdef CONFIG_MEMORY_FAILURE
935static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
936{
937 int i;
938
Balbir Singh5eb570a2018-12-28 00:33:24 -0800939 /*
940 * A further optimization is to have per section refcounted
941 * num_poisoned_pages. But that would need more space per memmap, so
942 * for now just do a quick global check to speed up this routine in the
943 * absence of bad pages.
944 */
945 if (atomic_long_read(&num_poisoned_pages) == 0)
946 return;
947
Dan Williams4b94ffd2016-01-15 16:56:22 -0800948 for (i = 0; i < nr_pages; i++) {
Wen Congyang95a47742012-12-11 16:00:47 -0800949 if (PageHWPoison(&memmap[i])) {
Alastair D'Silva9f828832019-09-23 15:36:30 -0700950 num_poisoned_pages_dec();
Wen Congyang95a47742012-12-11 16:00:47 -0800951 ClearPageHWPoison(&memmap[i]);
952 }
953 }
954}
955#else
956static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
957{
958}
959#endif
960
Dan Williamsba72b4c2019-07-18 15:58:26 -0700961void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
Dan Williams7ea62162019-07-18 15:58:22 -0700962 unsigned long nr_pages, unsigned long map_offset,
963 struct vmem_altmap *altmap)
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700964{
Dan Williamsba72b4c2019-07-18 15:58:26 -0700965 clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
966 nr_pages - map_offset);
967 section_deactivate(pfn, nr_pages, altmap);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700968}
David Rientjes4edd7ce2013-04-29 15:08:22 -0700969#endif /* CONFIG_MEMORY_HOTPLUG */