blob: c038cff6fdd34fe1b7821583e1fd71a444330a14 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Russell King4baa9922008-08-02 10:55:55 +01003 * arch/arm/include/asm/pgalloc.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 2000-2001 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7#ifndef _ASMARM_PGALLOC_H
8#define _ASMARM_PGALLOC_H
9
Uwe Kleine-König97594b02011-02-22 23:29:37 +010010#include <linux/pagemap.h>
11
Russell King74945c82006-03-16 14:44:36 +000012#include <asm/domain.h>
13#include <asm/pgtable-hwdef.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/processor.h>
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17
Russell King002547b2006-06-20 20:46:52 +010018#define check_pgt_cache() do { } while (0)
19
20#ifdef CONFIG_MMU
21
Russell King74945c82006-03-16 14:44:36 +000022#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
23#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
24
Catalin Marinasda028772011-11-22 17:30:29 +000025#ifdef CONFIG_ARM_LPAE
26
27static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
28{
Michal Hocko32d6bd92016-06-24 14:48:47 -070029 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
Catalin Marinasda028772011-11-22 17:30:29 +000030}
31
32static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
33{
34 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
35 free_page((unsigned long)pmd);
36}
37
38static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
39{
40 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
41}
42
43#else /* !CONFIG_ARM_LPAE */
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/*
46 * Since we have only two-level page tables, these are trivial
47 */
48#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080049#define pmd_free(mm, pmd) do { } while (0)
Russell Kinga32618d2011-11-22 17:30:28 +000050#define pud_populate(mm,pmd,pte) BUG()
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Catalin Marinasda028772011-11-22 17:30:29 +000052#endif /* CONFIG_ARM_LPAE */
53
Russell Kingb0d03742010-11-21 11:00:56 +000054extern pgd_t *pgd_alloc(struct mm_struct *mm);
55extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -080057#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
Russell King65cec8e2009-08-17 20:02:06 +010058
Russell Kingd30e45e2010-11-16 00:16:01 +000059static inline void clean_pte_table(pte_t *pte)
60{
61 clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
62}
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * Allocate one PTE table.
66 *
67 * This actually allocates two hardware PTE tables, but we wrap this up
68 * into one table thus:
69 *
70 * +------------+
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 * | Linux pt 0 |
72 * +------------+
73 * | Linux pt 1 |
74 * +------------+
Russell Kingd30e45e2010-11-16 00:16:01 +000075 * | h/w pt 0 |
76 * +------------+
77 * | h/w pt 1 |
78 * +------------+
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 */
80static inline pte_t *
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -080081pte_alloc_one_kernel(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 pte_t *pte;
84
Russell King65cec8e2009-08-17 20:02:06 +010085 pte = (pte_t *)__get_free_page(PGALLOC_GFP);
Russell Kingd30e45e2010-11-16 00:16:01 +000086 if (pte)
87 clean_pte_table(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 return pte;
90}
91
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080092static inline pgtable_t
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -080093pte_alloc_one(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 struct page *pte;
96
Russell King65cec8e2009-08-17 20:02:06 +010097#ifdef CONFIG_HIGHPTE
98 pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
99#else
100 pte = alloc_pages(PGALLOC_GFP, 0);
101#endif
Kirill A. Shutemovaffce502013-11-14 14:31:26 -0800102 if (!pte)
103 return NULL;
104 if (!PageHighMem(pte))
105 clean_pte_table(page_address(pte));
106 if (!pgtable_page_ctor(pte)) {
107 __free_page(pte);
108 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 return pte;
111}
112
113/*
114 * Free one PTE table.
115 */
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800116static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Russell Kingd30e45e2010-11-16 00:16:01 +0000118 if (pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 free_page((unsigned long)pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120}
121
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800122static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800124 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 __free_page(pte);
126}
127
Russell King97092e02010-11-16 00:16:01 +0000128static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
Catalin Marinas442e70c2011-09-05 17:51:56 +0100129 pmdval_t prot)
Russell Kingbdf04242005-06-22 20:58:29 +0100130{
Catalin Marinas442e70c2011-09-05 17:51:56 +0100131 pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
Russell Kingbdf04242005-06-22 20:58:29 +0100132 pmdp[0] = __pmd(pmdval);
Catalin Marinasda028772011-11-22 17:30:29 +0000133#ifndef CONFIG_ARM_LPAE
Russell Kingbdf04242005-06-22 20:58:29 +0100134 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
Catalin Marinasda028772011-11-22 17:30:29 +0000135#endif
Russell Kingbdf04242005-06-22 20:58:29 +0100136 flush_pmd_entry(pmdp);
137}
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * Populate the pmdp entry with a pointer to the pte. This pmd is part
141 * of the mm address space.
142 *
143 * Ensure that we always set both PMD entries.
144 */
145static inline void
146pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
147{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 /*
Russell Kingd30e45e2010-11-16 00:16:01 +0000149 * The pmd must be loaded with the physical address of the PTE table
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 */
Russell Kingd30e45e2010-11-16 00:16:01 +0000151 __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152}
153
154static inline void
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800155pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
Jungseung Lee1d4d3712014-11-29 02:33:30 +0100157 extern pmdval_t user_pmd_table;
158 pmdval_t prot;
159
160 if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
161 prot = user_pmd_table;
162 else
163 prot = _PAGE_USER_TABLE;
164
165 __pmd_populate(pmdp, page_to_phys(ptep), prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800167#define pmd_pgtable(pmd) pmd_page(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Russell King002547b2006-06-20 20:46:52 +0100169#endif /* CONFIG_MMU */
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171#endif