blob: 31de4ab930050cb6e7584fd45747b5e329fc6bfd [file] [log] [blame]
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
Marc Zyngier5a677ce2013-04-12 19:12:06 +010022#include <asm/memory.h>
23#include <asm/page.h>
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010024
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010025/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
Marc Zyngier6c41a412016-06-30 18:40:51 +010029#define kern_hyp_va(kva) (kva)
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010030
Marc Zyngier44a497a2017-12-03 19:28:56 +000031/* Contrary to arm64, there is no need to generate a PC-relative address */
32#define hyp_symbol_addr(s) \
33 ({ \
34 typeof(s) *addr = &(s); \
35 addr; \
36 })
37
Marc Zyngier5a677ce2013-04-12 19:12:06 +010038#ifndef __ASSEMBLY__
39
Marc Zyngier363ef892014-12-19 16:48:06 +000040#include <linux/highmem.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010041#include <asm/cacheflush.h>
Marc Zyngier91c703e2017-10-23 17:11:17 +010042#include <asm/cputype.h>
Suzuki K Poulosee55cac52018-09-26 17:32:44 +010043#include <asm/kvm_arm.h>
Marc Zyngier91c703e2017-10-23 17:11:17 +010044#include <asm/kvm_hyp.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010045#include <asm/pgalloc.h>
Suzuki K Pouloseb1ae9a32016-03-22 14:08:17 +000046#include <asm/stage2_pgtable.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010047
Marc Zyngiere3f019b2017-12-04 17:04:38 +000048/* Ensure compatibility with arm64 */
49#define VA_BITS 32
50
Suzuki K Poulosee55cac52018-09-26 17:32:44 +010051#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
52#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
53#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
54#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
55
56#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
57
Marc Zyngierc8dddec2016-06-13 15:00:45 +010058int create_hyp_mappings(void *from, void *to, pgprot_t prot);
Marc Zyngier807a3782017-12-04 16:26:09 +000059int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
Marc Zyngier1bb32a42017-12-04 16:43:23 +000060 void __iomem **kaddr,
61 void __iomem **haddr);
Marc Zyngierdc2e4632018-02-13 11:00:29 +000062int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
63 void **haddr);
Marc Zyngier4f728272013-04-12 19:12:05 +010064void free_hyp_pgds(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050065
Christoffer Dall957db102014-11-27 10:35:03 +010066void stage2_unmap_vm(struct kvm *kvm);
Christoffer Dalld5d81842013-01-20 18:28:07 -050067int kvm_alloc_stage2_pgd(struct kvm *kvm);
68void kvm_free_stage2_pgd(struct kvm *kvm);
69int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -070070 phys_addr_t pa, unsigned long size, bool writable);
Christoffer Dalld5d81842013-01-20 18:28:07 -050071
72int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
73
74void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
75
Christoffer Dall342cd0a2013-01-20 18:28:06 -050076phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier5a677ce2013-04-12 19:12:06 +010077phys_addr_t kvm_get_idmap_vector(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050078int kvm_mmu_init(void);
79void kvm_clear_hyp_idmap(void);
Christoffer Dall94f8e642013-01-20 18:28:12 -050080
Marc Zyngier0db9dd82018-06-27 15:51:05 +010081#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
82#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
83#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
84
Punit Agrawalf8df7332018-12-11 17:10:36 +000085#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
86#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
Punit Agrawalb8e0ba72018-12-11 17:10:41 +000087#define kvm_pfn_pud(pfn, prot) (__pud(0))
Punit Agrawalf8df7332018-12-11 17:10:36 +000088
Punit Agrawaleb3f06242018-12-11 17:10:39 +000089#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; })
90
91
Punit Agrawalf8df7332018-12-11 17:10:36 +000092#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
Punit Agrawalb8e0ba72018-12-11 17:10:41 +000093/* No support for pud hugepages */
94#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; })
Punit Agrawalf8df7332018-12-11 17:10:36 +000095
Punit Agrawal4ea5af52018-12-11 17:10:37 +000096/*
97 * The following kvm_*pud*() functions are provided strictly to allow
98 * sharing code with arm64. They should never be called in practice.
99 */
100static inline void kvm_set_s2pud_readonly(pud_t *pud)
101{
102 WARN_ON(1);
103}
104
105static inline bool kvm_s2pud_readonly(pud_t *pud)
106{
107 WARN_ON(1);
108 return false;
109}
110
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000111static inline void kvm_set_pud(pud_t *pud, pud_t new_pud)
112{
113 WARN_ON(1);
114}
115
116static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
117{
118 WARN_ON(1);
119 return pud;
120}
121
122static inline pud_t kvm_s2pud_mkexec(pud_t pud)
123{
124 WARN_ON(1);
125 return pud;
126}
127
Punit Agrawal86d1c552018-12-11 17:10:38 +0000128static inline bool kvm_s2pud_exec(pud_t *pud)
129{
130 WARN_ON(1);
131 return false;
132}
133
Punit Agrawaleb3f06242018-12-11 17:10:39 +0000134static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
135{
136 BUG();
137 return pud;
138}
139
Punit Agrawal35a63962018-12-11 17:10:40 +0000140static inline bool kvm_s2pud_young(pud_t pud)
141{
142 WARN_ON(1);
143 return false;
144}
145
Catalin Marinas06485052016-04-13 17:57:37 +0100146static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100147{
Catalin Marinas06485052016-04-13 17:57:37 +0100148 pte_val(pte) |= L_PTE_S2_RDWR;
149 return pte;
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100150}
151
Catalin Marinas06485052016-04-13 17:57:37 +0100152static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
Christoffer Dallad361f02012-11-01 17:14:45 +0100153{
Catalin Marinas06485052016-04-13 17:57:37 +0100154 pmd_val(pmd) |= L_PMD_S2_RDWR;
155 return pmd;
Christoffer Dallad361f02012-11-01 17:14:45 +0100156}
157
Marc Zyngierd0e22b42017-10-23 17:11:19 +0100158static inline pte_t kvm_s2pte_mkexec(pte_t pte)
159{
160 pte_val(pte) &= ~L_PTE_XN;
161 return pte;
162}
163
164static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
165{
166 pmd_val(pmd) &= ~PMD_SECT_XN;
167 return pmd;
168}
169
Mario Smarduchc6473552015-01-15 15:58:56 -0800170static inline void kvm_set_s2pte_readonly(pte_t *pte)
171{
172 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
173}
174
175static inline bool kvm_s2pte_readonly(pte_t *pte)
176{
177 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
178}
179
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100180static inline bool kvm_s2pte_exec(pte_t *pte)
181{
182 return !(pte_val(*pte) & L_PTE_XN);
183}
184
Mario Smarduchc6473552015-01-15 15:58:56 -0800185static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
186{
187 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
188}
189
190static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
191{
192 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
193}
194
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100195static inline bool kvm_s2pmd_exec(pmd_t *pmd)
196{
197 return !(pmd_val(*pmd) & PMD_SECT_XN);
198}
199
Christoffer Dall4f853a72014-05-09 23:31:31 +0200200static inline bool kvm_page_empty(void *ptr)
201{
202 struct page *ptr_page = virt_to_page(ptr);
203 return page_count(ptr_page) == 1;
204}
205
Christoffer Dall38f791a2014-10-10 12:14:28 +0200206#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
207#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
Suzuki K Pouloseb1d030a2016-03-22 17:15:55 +0000208#define kvm_pud_table_empty(kvm, pudp) false
Christoffer Dall4f853a72014-05-09 23:31:31 +0200209
Suzuki K Pouloseb1d030a2016-03-22 17:15:55 +0000210#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
211#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
212#define hyp_pud_table_empty(pudp) false
Marc Zyngiera9873702015-03-10 19:06:59 +0000213
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100214struct kvm;
215
Marc Zyngier15979302014-01-14 19:13:10 +0000216#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
217
218static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
219{
Marc Zyngierfb32a522016-01-03 11:26:01 +0000220 return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
Marc Zyngier15979302014-01-14 19:13:10 +0000221}
222
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100223static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100224{
225 /*
Marc Zyngiera15f6932017-10-23 17:11:15 +0100226 * Clean the dcache to the Point of Coherency.
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000227 *
228 * We need to do this through a kernel mapping (using the
229 * user-space mapping has proved to be the wrong
230 * solution). For that, we need to kmap one page at a time,
231 * and iterate over the range.
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100232 */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000233
Jan Kiszkaa050dfb2015-02-07 22:21:20 +0100234 VM_BUG_ON(size & ~PAGE_MASK);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000235
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000236 while (size) {
237 void *va = kmap_atomic_pfn(pfn);
238
Marc Zyngier8f36eba2017-01-25 12:29:59 +0000239 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000240
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000241 size -= PAGE_SIZE;
242 pfn++;
243
244 kunmap_atomic(va);
245 }
Marc Zyngiera15f6932017-10-23 17:11:15 +0100246}
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000247
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100248static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
Marc Zyngiera15f6932017-10-23 17:11:15 +0100249 unsigned long size)
250{
Marc Zyngier91c703e2017-10-23 17:11:17 +0100251 u32 iclsz;
252
Marc Zyngiera15f6932017-10-23 17:11:15 +0100253 /*
254 * If we are going to insert an instruction page and the icache is
255 * either VIPT or PIPT, there is a potential problem where the host
256 * (or another VM) may have used the same page as this guest, and we
257 * read incorrect data from the icache. If we're using a PIPT cache,
258 * we can invalidate just that page, but if we are using a VIPT cache
259 * we need to invalidate the entire icache - damn shame - as written
260 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
261 *
262 * VIVT caches are tagged using both the ASID and the VMID and doesn't
263 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
264 */
265
266 VM_BUG_ON(size & ~PAGE_MASK);
267
268 if (icache_is_vivt_asid_tagged())
269 return;
270
271 if (!icache_is_pipt()) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100272 /* any kind of VIPT cache */
273 __flush_icache_all();
Marc Zyngiera15f6932017-10-23 17:11:15 +0100274 return;
275 }
276
Marc Zyngier91c703e2017-10-23 17:11:17 +0100277 /*
278 * CTR IminLine contains Log2 of the number of words in the
279 * cache line, so we can get the number of words as
280 * 2 << (IminLine - 1). To get the number of bytes, we
281 * multiply by 4 (the number of bytes in a 32-bit word), and
282 * get 4 << (IminLine).
283 */
284 iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
285
Marc Zyngiera15f6932017-10-23 17:11:15 +0100286 while (size) {
287 void *va = kmap_atomic_pfn(pfn);
Marc Zyngier91c703e2017-10-23 17:11:17 +0100288 void *end = va + PAGE_SIZE;
289 void *addr = va;
Marc Zyngiera15f6932017-10-23 17:11:15 +0100290
Marc Zyngier91c703e2017-10-23 17:11:17 +0100291 do {
292 write_sysreg(addr, ICIMVAU);
293 addr += iclsz;
294 } while (addr < end);
295
296 dsb(ishst);
297 isb();
Marc Zyngier363ef892014-12-19 16:48:06 +0000298
299 size -= PAGE_SIZE;
300 pfn++;
301
302 kunmap_atomic(va);
303 }
304
Marc Zyngier91c703e2017-10-23 17:11:17 +0100305 /* Check if we need to invalidate the BTB */
306 if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
307 write_sysreg(0, BPIALLIS);
308 dsb(ishst);
309 isb();
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100310 }
311}
312
Marc Zyngier363ef892014-12-19 16:48:06 +0000313static inline void __kvm_flush_dcache_pte(pte_t pte)
314{
315 void *va = kmap_atomic(pte_page(pte));
316
317 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
318
319 kunmap_atomic(va);
320}
321
322static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
323{
324 unsigned long size = PMD_SIZE;
Dan Williamsba049e92016-01-15 16:56:11 -0800325 kvm_pfn_t pfn = pmd_pfn(pmd);
Marc Zyngier363ef892014-12-19 16:48:06 +0000326
327 while (size) {
328 void *va = kmap_atomic_pfn(pfn);
329
330 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
331
332 pfn++;
333 size -= PAGE_SIZE;
334
335 kunmap_atomic(va);
336 }
337}
338
339static inline void __kvm_flush_dcache_pud(pud_t pud)
340{
341}
342
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500343#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100344
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000345void kvm_set_way_flush(struct kvm_vcpu *vcpu);
346void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000347
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000348static inline bool __kvm_cpu_uses_extended_idmap(void)
349{
350 return false;
351}
352
Kristina Martsenkofa2a8442017-12-13 17:07:24 +0000353static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
354{
355 return PTRS_PER_PGD;
356}
357
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000358static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
359 pgd_t *hyp_pgd,
360 pgd_t *merged_hyp_pgd,
361 unsigned long hyp_idmap_start) { }
362
Vladimir Murzin20475f72015-11-16 11:28:18 +0000363static inline unsigned int kvm_get_vmid_bits(void)
364{
365 return 8;
366}
367
Andre Przywarabf308242018-05-11 15:20:14 +0100368/*
369 * We are not in the kvm->srcu critical section most of the time, so we take
370 * the SRCU read lock here. Since we copy the data from the user page, we
371 * can immediately drop the lock again.
372 */
373static inline int kvm_read_guest_lock(struct kvm *kvm,
374 gpa_t gpa, void *data, unsigned long len)
375{
376 int srcu_idx = srcu_read_lock(&kvm->srcu);
377 int ret = kvm_read_guest(kvm, gpa, data, len);
378
379 srcu_read_unlock(&kvm->srcu, srcu_idx);
380
381 return ret;
382}
383
Marc Zyngiera6ecfb12019-03-19 12:47:11 +0000384static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
385 const void *data, unsigned long len)
386{
387 int srcu_idx = srcu_read_lock(&kvm->srcu);
388 int ret = kvm_write_guest(kvm, gpa, data, len);
389
390 srcu_read_unlock(&kvm->srcu, srcu_idx);
391
392 return ret;
393}
394
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000395static inline void *kvm_get_hyp_vector(void)
396{
Marc Zyngier3f7e8e22018-02-01 11:07:35 +0000397 switch(read_cpuid_part()) {
398#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
399 case ARM_CPU_PART_CORTEX_A12:
400 case ARM_CPU_PART_CORTEX_A17:
401 {
402 extern char __kvm_hyp_vector_bp_inv[];
403 return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
404 }
405
Russell King3c908e162018-05-10 17:52:18 +0100406 case ARM_CPU_PART_BRAHMA_B15:
Marc Zyngier0c47ac82018-02-01 11:07:38 +0000407 case ARM_CPU_PART_CORTEX_A15:
408 {
409 extern char __kvm_hyp_vector_ic_inv[];
410 return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
411 }
Marc Zyngier3f7e8e22018-02-01 11:07:35 +0000412#endif
413 default:
414 {
415 extern char __kvm_hyp_vector[];
416 return kvm_ksym_ref(__kvm_hyp_vector);
417 }
418 }
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000419}
420
421static inline int kvm_map_vectors(void)
422{
423 return 0;
424}
425
Marc Zyngier55e37482018-05-29 13:11:16 +0100426static inline int hyp_map_aux_data(void)
427{
428 return 0;
429}
430
Kristina Martsenko529c4b02017-12-13 17:07:18 +0000431#define kvm_phys_to_vttbr(addr) (addr)
432
Suzuki K Poulose0f62f0e2018-09-26 17:32:52 +0100433static inline void kvm_set_ipa_limit(void) {}
434
Christoffer Dalle329fb72018-12-11 15:26:31 +0100435static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
Vladimir Murzinab510022018-07-31 14:08:57 +0100436{
Christoffer Dalle329fb72018-12-11 15:26:31 +0100437 struct kvm_vmid *vmid = &kvm->arch.vmid;
438 u64 vmid_field, baddr;
439
440 baddr = kvm->arch.pgd_phys;
441 vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
442 return kvm_phys_to_vttbr(baddr) | vmid_field;
Vladimir Murzinab510022018-07-31 14:08:57 +0100443}
444
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100445#endif /* !__ASSEMBLY__ */
446
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500447#endif /* __ARM_KVM_MMU_H__ */