blob: 7d207b44a65692a6606f557777799a34dbbe7218 [file] [log] [blame]
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
Marc Zyngier5a677ce2013-04-12 19:12:06 +010022#include <asm/memory.h>
23#include <asm/page.h>
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010024
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010025/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
Marc Zyngier5a677ce2013-04-12 19:12:06 +010029#define HYP_PAGE_OFFSET_MASK UL(~0)
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010030#define HYP_PAGE_OFFSET PAGE_OFFSET
31#define KERN_TO_HYP(kva) (kva)
32
Marc Zyngier5a677ce2013-04-12 19:12:06 +010033/*
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
37 */
38#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
39
Christoffer Dall38f791a2014-10-10 12:14:28 +020040/*
41 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
42 */
43#define KVM_MMU_CACHE_MIN_PAGES 2
44
Marc Zyngier5a677ce2013-04-12 19:12:06 +010045#ifndef __ASSEMBLY__
46
Marc Zyngier363ef892014-12-19 16:48:06 +000047#include <linux/highmem.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010048#include <asm/cacheflush.h>
49#include <asm/pgalloc.h>
Suzuki K Pouloseb1ae9a32016-03-22 14:08:17 +000050#include <asm/stage2_pgtable.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010051
Christoffer Dall342cd0a2013-01-20 18:28:06 -050052int create_hyp_mappings(void *from, void *to);
53int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
Marc Zyngierd157f4a2013-04-12 19:12:07 +010054void free_boot_hyp_pgd(void);
Marc Zyngier4f728272013-04-12 19:12:05 +010055void free_hyp_pgds(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050056
Christoffer Dall957db102014-11-27 10:35:03 +010057void stage2_unmap_vm(struct kvm *kvm);
Christoffer Dalld5d81842013-01-20 18:28:07 -050058int kvm_alloc_stage2_pgd(struct kvm *kvm);
59void kvm_free_stage2_pgd(struct kvm *kvm);
60int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -070061 phys_addr_t pa, unsigned long size, bool writable);
Christoffer Dalld5d81842013-01-20 18:28:07 -050062
63int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
64
65void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
66
Christoffer Dall342cd0a2013-01-20 18:28:06 -050067phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier5a677ce2013-04-12 19:12:06 +010068phys_addr_t kvm_mmu_get_boot_httbr(void);
69phys_addr_t kvm_get_idmap_vector(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050070int kvm_mmu_init(void);
71void kvm_clear_hyp_idmap(void);
Christoffer Dall94f8e642013-01-20 18:28:12 -050072
Christoffer Dallad361f02012-11-01 17:14:45 +010073static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
74{
75 *pmd = new_pmd;
76 flush_pmd_entry(pmd);
77}
78
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010079static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
80{
Christoffer Dall0963e5d2013-08-08 20:35:07 -070081 *pte = new_pte;
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010082 /*
83 * flush_pmd_entry just takes a void pointer and cleans the necessary
84 * cache entries, so we can reuse the function for ptes.
85 */
86 flush_pmd_entry(pte);
87}
88
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010089static inline void kvm_clean_pgd(pgd_t *pgd)
90{
91 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
92}
93
Christoffer Dall38f791a2014-10-10 12:14:28 +020094static inline void kvm_clean_pmd(pmd_t *pmd)
95{
96 clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
97}
98
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010099static inline void kvm_clean_pmd_entry(pmd_t *pmd)
100{
101 clean_pmd_entry(pmd);
102}
103
104static inline void kvm_clean_pte(pte_t *pte)
105{
106 clean_pte_table(pte);
107}
108
109static inline void kvm_set_s2pte_writable(pte_t *pte)
110{
111 pte_val(*pte) |= L_PTE_S2_RDWR;
112}
113
Christoffer Dallad361f02012-11-01 17:14:45 +0100114static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
115{
116 pmd_val(*pmd) |= L_PMD_S2_RDWR;
117}
118
Mario Smarduchc6473552015-01-15 15:58:56 -0800119static inline void kvm_set_s2pte_readonly(pte_t *pte)
120{
121 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
122}
123
124static inline bool kvm_s2pte_readonly(pte_t *pte)
125{
126 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
127}
128
129static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
130{
131 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
132}
133
134static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
135{
136 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
137}
138
139
Marc Zyngiera3c8bd32014-02-18 14:29:03 +0000140/* Open coded p*d_addr_end that can deal with 64bit addresses */
141#define kvm_pgd_addr_end(addr, end) \
142({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
143 (__boundary - 1 < (end) - 1)? __boundary: (end); \
144})
145
146#define kvm_pud_addr_end(addr,end) (end)
147
148#define kvm_pmd_addr_end(addr, end) \
149({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
150 (__boundary - 1 < (end) - 1)? __boundary: (end); \
151})
152
Marc Zyngier04b8dc82015-03-10 19:07:00 +0000153#define kvm_pgd_index(addr) pgd_index(addr)
154
Christoffer Dall4f853a72014-05-09 23:31:31 +0200155static inline bool kvm_page_empty(void *ptr)
156{
157 struct page *ptr_page = virt_to_page(ptr);
158 return page_count(ptr_page) == 1;
159}
160
Christoffer Dall38f791a2014-10-10 12:14:28 +0200161#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
162#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
163#define kvm_pud_table_empty(kvm, pudp) (0)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200164
Christoffer Dall38f791a2014-10-10 12:14:28 +0200165static inline void *kvm_get_hwpgd(struct kvm *kvm)
166{
167 return kvm->arch.pgd;
168}
Christoffer Dall4f853a72014-05-09 23:31:31 +0200169
Marc Zyngiera9873702015-03-10 19:06:59 +0000170static inline unsigned int kvm_get_hwpgd_size(void)
171{
172 return PTRS_PER_S2_PGD * sizeof(pgd_t);
173}
174
Suzuki K Poulose120f0772016-03-01 10:03:06 +0000175static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
176{
177 return hwpgd;
178}
179
180static inline void kvm_free_fake_pgd(pgd_t *pgd)
181{
182}
183
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100184struct kvm;
185
Marc Zyngier15979302014-01-14 19:13:10 +0000186#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
187
188static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
189{
Marc Zyngierfb32a522016-01-03 11:26:01 +0000190 return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
Marc Zyngier15979302014-01-14 19:13:10 +0000191}
192
Dan Williamsba049e92016-01-15 16:56:11 -0800193static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
194 kvm_pfn_t pfn,
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000195 unsigned long size,
196 bool ipa_uncached)
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100197{
198 /*
199 * If we are going to insert an instruction page and the icache is
200 * either VIPT or PIPT, there is a potential problem where the host
201 * (or another VM) may have used the same page as this guest, and we
202 * read incorrect data from the icache. If we're using a PIPT cache,
203 * we can invalidate just that page, but if we are using a VIPT cache
204 * we need to invalidate the entire icache - damn shame - as written
205 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
206 *
207 * VIVT caches are tagged using both the ASID and the VMID and doesn't
208 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000209 *
210 * We need to do this through a kernel mapping (using the
211 * user-space mapping has proved to be the wrong
212 * solution). For that, we need to kmap one page at a time,
213 * and iterate over the range.
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100214 */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000215
216 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
217
Jan Kiszkaa050dfb2015-02-07 22:21:20 +0100218 VM_BUG_ON(size & ~PAGE_MASK);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000219
220 if (!need_flush && !icache_is_pipt())
221 goto vipt_cache;
222
223 while (size) {
224 void *va = kmap_atomic_pfn(pfn);
225
226 if (need_flush)
227 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
228
229 if (icache_is_pipt())
230 __cpuc_coherent_user_range((unsigned long)va,
231 (unsigned long)va + PAGE_SIZE);
232
233 size -= PAGE_SIZE;
234 pfn++;
235
236 kunmap_atomic(va);
237 }
238
239vipt_cache:
240 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100241 /* any kind of VIPT cache */
242 __flush_icache_all();
243 }
244}
245
Marc Zyngier363ef892014-12-19 16:48:06 +0000246static inline void __kvm_flush_dcache_pte(pte_t pte)
247{
248 void *va = kmap_atomic(pte_page(pte));
249
250 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
251
252 kunmap_atomic(va);
253}
254
255static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
256{
257 unsigned long size = PMD_SIZE;
Dan Williamsba049e92016-01-15 16:56:11 -0800258 kvm_pfn_t pfn = pmd_pfn(pmd);
Marc Zyngier363ef892014-12-19 16:48:06 +0000259
260 while (size) {
261 void *va = kmap_atomic_pfn(pfn);
262
263 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
264
265 pfn++;
266 size -= PAGE_SIZE;
267
268 kunmap_atomic(va);
269 }
270}
271
272static inline void __kvm_flush_dcache_pud(pud_t pud)
273{
274}
275
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500276#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100277
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000278void kvm_set_way_flush(struct kvm_vcpu *vcpu);
279void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000280
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000281static inline bool __kvm_cpu_uses_extended_idmap(void)
282{
283 return false;
284}
285
286static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
287 pgd_t *hyp_pgd,
288 pgd_t *merged_hyp_pgd,
289 unsigned long hyp_idmap_start) { }
290
Vladimir Murzin20475f72015-11-16 11:28:18 +0000291static inline unsigned int kvm_get_vmid_bits(void)
292{
293 return 8;
294}
295
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100296#endif /* !__ASSEMBLY__ */
297
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500298#endif /* __ARM_KVM_MMU_H__ */