blob: fea770f781443e93d79cddf6fbd2ed5ae6c4ce7c [file] [log] [blame]
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
Marc Zyngier5a677ce2013-04-12 19:12:06 +010022#include <asm/memory.h>
23#include <asm/page.h>
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010024
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010025/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
Marc Zyngier6c41a412016-06-30 18:40:51 +010029#define kern_hyp_va(kva) (kva)
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010030
Marc Zyngier5a677ce2013-04-12 19:12:06 +010031/*
Christoffer Dall38f791a2014-10-10 12:14:28 +020032 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
33 */
34#define KVM_MMU_CACHE_MIN_PAGES 2
35
Marc Zyngier5a677ce2013-04-12 19:12:06 +010036#ifndef __ASSEMBLY__
37
Marc Zyngier363ef892014-12-19 16:48:06 +000038#include <linux/highmem.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010039#include <asm/cacheflush.h>
Marc Zyngier91c703e2017-10-23 17:11:17 +010040#include <asm/cputype.h>
41#include <asm/kvm_hyp.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010042#include <asm/pgalloc.h>
Suzuki K Pouloseb1ae9a32016-03-22 14:08:17 +000043#include <asm/stage2_pgtable.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010044
Marc Zyngierc8dddec2016-06-13 15:00:45 +010045int create_hyp_mappings(void *from, void *to, pgprot_t prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050046int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
Marc Zyngier4f728272013-04-12 19:12:05 +010047void free_hyp_pgds(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050048
Christoffer Dall957db102014-11-27 10:35:03 +010049void stage2_unmap_vm(struct kvm *kvm);
Christoffer Dalld5d81842013-01-20 18:28:07 -050050int kvm_alloc_stage2_pgd(struct kvm *kvm);
51void kvm_free_stage2_pgd(struct kvm *kvm);
52int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -070053 phys_addr_t pa, unsigned long size, bool writable);
Christoffer Dalld5d81842013-01-20 18:28:07 -050054
55int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
56
57void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
58
Christoffer Dall342cd0a2013-01-20 18:28:06 -050059phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier5a677ce2013-04-12 19:12:06 +010060phys_addr_t kvm_get_idmap_vector(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050061int kvm_mmu_init(void);
62void kvm_clear_hyp_idmap(void);
Christoffer Dall94f8e642013-01-20 18:28:12 -050063
Christoffer Dallad361f02012-11-01 17:14:45 +010064static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
65{
66 *pmd = new_pmd;
Mark Rutlanddcadda12016-08-30 17:05:55 +010067 dsb(ishst);
Christoffer Dallad361f02012-11-01 17:14:45 +010068}
69
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010070static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
71{
Christoffer Dall0963e5d2013-08-08 20:35:07 -070072 *pte = new_pte;
Mark Rutlanddcadda12016-08-30 17:05:55 +010073 dsb(ishst);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010074}
75
Catalin Marinas06485052016-04-13 17:57:37 +010076static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010077{
Catalin Marinas06485052016-04-13 17:57:37 +010078 pte_val(pte) |= L_PTE_S2_RDWR;
79 return pte;
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010080}
81
Catalin Marinas06485052016-04-13 17:57:37 +010082static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
Christoffer Dallad361f02012-11-01 17:14:45 +010083{
Catalin Marinas06485052016-04-13 17:57:37 +010084 pmd_val(pmd) |= L_PMD_S2_RDWR;
85 return pmd;
Christoffer Dallad361f02012-11-01 17:14:45 +010086}
87
Marc Zyngierd0e22b42017-10-23 17:11:19 +010088static inline pte_t kvm_s2pte_mkexec(pte_t pte)
89{
90 pte_val(pte) &= ~L_PTE_XN;
91 return pte;
92}
93
94static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
95{
96 pmd_val(pmd) &= ~PMD_SECT_XN;
97 return pmd;
98}
99
Mario Smarduchc6473552015-01-15 15:58:56 -0800100static inline void kvm_set_s2pte_readonly(pte_t *pte)
101{
102 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
103}
104
105static inline bool kvm_s2pte_readonly(pte_t *pte)
106{
107 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
108}
109
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100110static inline bool kvm_s2pte_exec(pte_t *pte)
111{
112 return !(pte_val(*pte) & L_PTE_XN);
113}
114
Mario Smarduchc6473552015-01-15 15:58:56 -0800115static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
116{
117 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
118}
119
120static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
121{
122 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
123}
124
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100125static inline bool kvm_s2pmd_exec(pmd_t *pmd)
126{
127 return !(pmd_val(*pmd) & PMD_SECT_XN);
128}
129
Christoffer Dall4f853a72014-05-09 23:31:31 +0200130static inline bool kvm_page_empty(void *ptr)
131{
132 struct page *ptr_page = virt_to_page(ptr);
133 return page_count(ptr_page) == 1;
134}
135
Christoffer Dall38f791a2014-10-10 12:14:28 +0200136#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
137#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
Suzuki K Pouloseb1d030a2016-03-22 17:15:55 +0000138#define kvm_pud_table_empty(kvm, pudp) false
Christoffer Dall4f853a72014-05-09 23:31:31 +0200139
Suzuki K Pouloseb1d030a2016-03-22 17:15:55 +0000140#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
141#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
142#define hyp_pud_table_empty(pudp) false
Marc Zyngiera9873702015-03-10 19:06:59 +0000143
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100144struct kvm;
145
Marc Zyngier15979302014-01-14 19:13:10 +0000146#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
147
148static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
149{
Marc Zyngierfb32a522016-01-03 11:26:01 +0000150 return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
Marc Zyngier15979302014-01-14 19:13:10 +0000151}
152
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100153static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100154{
155 /*
Marc Zyngiera15f6932017-10-23 17:11:15 +0100156 * Clean the dcache to the Point of Coherency.
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000157 *
158 * We need to do this through a kernel mapping (using the
159 * user-space mapping has proved to be the wrong
160 * solution). For that, we need to kmap one page at a time,
161 * and iterate over the range.
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100162 */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000163
Jan Kiszkaa050dfb2015-02-07 22:21:20 +0100164 VM_BUG_ON(size & ~PAGE_MASK);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000165
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000166 while (size) {
167 void *va = kmap_atomic_pfn(pfn);
168
Marc Zyngier8f36eba2017-01-25 12:29:59 +0000169 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000170
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000171 size -= PAGE_SIZE;
172 pfn++;
173
174 kunmap_atomic(va);
175 }
Marc Zyngiera15f6932017-10-23 17:11:15 +0100176}
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000177
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100178static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
Marc Zyngiera15f6932017-10-23 17:11:15 +0100179 unsigned long size)
180{
Marc Zyngier91c703e2017-10-23 17:11:17 +0100181 u32 iclsz;
182
Marc Zyngiera15f6932017-10-23 17:11:15 +0100183 /*
184 * If we are going to insert an instruction page and the icache is
185 * either VIPT or PIPT, there is a potential problem where the host
186 * (or another VM) may have used the same page as this guest, and we
187 * read incorrect data from the icache. If we're using a PIPT cache,
188 * we can invalidate just that page, but if we are using a VIPT cache
189 * we need to invalidate the entire icache - damn shame - as written
190 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
191 *
192 * VIVT caches are tagged using both the ASID and the VMID and doesn't
193 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
194 */
195
196 VM_BUG_ON(size & ~PAGE_MASK);
197
198 if (icache_is_vivt_asid_tagged())
199 return;
200
201 if (!icache_is_pipt()) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100202 /* any kind of VIPT cache */
203 __flush_icache_all();
Marc Zyngiera15f6932017-10-23 17:11:15 +0100204 return;
205 }
206
Marc Zyngier91c703e2017-10-23 17:11:17 +0100207 /*
208 * CTR IminLine contains Log2 of the number of words in the
209 * cache line, so we can get the number of words as
210 * 2 << (IminLine - 1). To get the number of bytes, we
211 * multiply by 4 (the number of bytes in a 32-bit word), and
212 * get 4 << (IminLine).
213 */
214 iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
215
Marc Zyngiera15f6932017-10-23 17:11:15 +0100216 while (size) {
217 void *va = kmap_atomic_pfn(pfn);
Marc Zyngier91c703e2017-10-23 17:11:17 +0100218 void *end = va + PAGE_SIZE;
219 void *addr = va;
Marc Zyngiera15f6932017-10-23 17:11:15 +0100220
Marc Zyngier91c703e2017-10-23 17:11:17 +0100221 do {
222 write_sysreg(addr, ICIMVAU);
223 addr += iclsz;
224 } while (addr < end);
225
226 dsb(ishst);
227 isb();
Marc Zyngier363ef892014-12-19 16:48:06 +0000228
229 size -= PAGE_SIZE;
230 pfn++;
231
232 kunmap_atomic(va);
233 }
234
Marc Zyngier91c703e2017-10-23 17:11:17 +0100235 /* Check if we need to invalidate the BTB */
236 if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
237 write_sysreg(0, BPIALLIS);
238 dsb(ishst);
239 isb();
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100240 }
241}
242
Marc Zyngier363ef892014-12-19 16:48:06 +0000243static inline void __kvm_flush_dcache_pte(pte_t pte)
244{
245 void *va = kmap_atomic(pte_page(pte));
246
247 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
248
249 kunmap_atomic(va);
250}
251
252static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
253{
254 unsigned long size = PMD_SIZE;
Dan Williamsba049e92016-01-15 16:56:11 -0800255 kvm_pfn_t pfn = pmd_pfn(pmd);
Marc Zyngier363ef892014-12-19 16:48:06 +0000256
257 while (size) {
258 void *va = kmap_atomic_pfn(pfn);
259
260 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
261
262 pfn++;
263 size -= PAGE_SIZE;
264
265 kunmap_atomic(va);
266 }
267}
268
269static inline void __kvm_flush_dcache_pud(pud_t pud)
270{
271}
272
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500273#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100274
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000275void kvm_set_way_flush(struct kvm_vcpu *vcpu);
276void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000277
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000278static inline bool __kvm_cpu_uses_extended_idmap(void)
279{
280 return false;
281}
282
Kristina Martsenkofa2a8442017-12-13 17:07:24 +0000283static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
284{
285 return PTRS_PER_PGD;
286}
287
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000288static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
289 pgd_t *hyp_pgd,
290 pgd_t *merged_hyp_pgd,
291 unsigned long hyp_idmap_start) { }
292
Vladimir Murzin20475f72015-11-16 11:28:18 +0000293static inline unsigned int kvm_get_vmid_bits(void)
294{
295 return 8;
296}
297
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000298static inline void *kvm_get_hyp_vector(void)
299{
Marc Zyngier3f7e8e22018-02-01 11:07:35 +0000300 switch(read_cpuid_part()) {
301#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
302 case ARM_CPU_PART_CORTEX_A12:
303 case ARM_CPU_PART_CORTEX_A17:
304 {
305 extern char __kvm_hyp_vector_bp_inv[];
306 return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
307 }
308
Russell King3c908e162018-05-10 17:52:18 +0100309 case ARM_CPU_PART_BRAHMA_B15:
Marc Zyngier0c47ac82018-02-01 11:07:38 +0000310 case ARM_CPU_PART_CORTEX_A15:
311 {
312 extern char __kvm_hyp_vector_ic_inv[];
313 return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
314 }
Marc Zyngier3f7e8e22018-02-01 11:07:35 +0000315#endif
316 default:
317 {
318 extern char __kvm_hyp_vector[];
319 return kvm_ksym_ref(__kvm_hyp_vector);
320 }
321 }
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000322}
323
324static inline int kvm_map_vectors(void)
325{
326 return 0;
327}
328
Kristina Martsenko529c4b02017-12-13 17:07:18 +0000329#define kvm_phys_to_vttbr(addr) (addr)
330
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100331#endif /* !__ASSEMBLY__ */
332
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500333#endif /* __ARM_KVM_MMU_H__ */