blob: 6ecbda87ee4683f0f6252636fd5aa81af74cf8d4 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/mm/flush.c
4 *
5 * Copyright (C) 1995-2002 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/pagemap.h>
Nicolas Pitre39af22a2010-12-15 15:14:45 -050010#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010013#include <asm/cachetype.h>
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +010014#include <asm/highmem.h>
Russell King2ef7f3d2009-11-05 13:29:36 +000015#include <asm/smp_plat.h>
Russell King8d802d22005-05-10 17:31:43 +010016#include <asm/tlbflush.h>
Steve Capper0b19f932013-05-17 12:33:28 +010017#include <linux/hugetlb.h>
Russell King8d802d22005-05-10 17:31:43 +010018
Russell King1b2e2b72006-08-21 17:06:38 +010019#include "mm.h"
20
Russell Kingf8130902015-06-01 23:44:46 +010021#ifdef CONFIG_ARM_HEAVY_MB
Russell King4e1f8a62015-06-03 13:10:16 +010022void (*soc_mb)(void);
23
Russell Kingf8130902015-06-01 23:44:46 +010024void arm_heavy_mb(void)
25{
26#ifdef CONFIG_OUTER_CACHE_SYNC
27 if (outer_cache.sync)
28 outer_cache.sync();
29#endif
Russell King4e1f8a62015-06-03 13:10:16 +010030 if (soc_mb)
31 soc_mb();
Russell Kingf8130902015-06-01 23:44:46 +010032}
33EXPORT_SYMBOL(arm_heavy_mb);
34#endif
35
Russell King8d802d22005-05-10 17:31:43 +010036#ifdef CONFIG_CPU_CACHE_VIPT
Russell Kingd7b6b352005-09-08 15:32:23 +010037
Catalin Marinas481467d2005-09-30 16:07:04 +010038static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39{
Russell Kingde27c302011-07-02 14:46:27 +010040 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
Catalin Marinas141fa402006-03-10 22:26:47 +000041 const int zero = 0;
Catalin Marinas481467d2005-09-30 16:07:04 +010042
Russell King67ece142011-07-02 15:20:44 +010043 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
Catalin Marinas481467d2005-09-30 16:07:04 +010044
45 asm( "mcrr p15, 0, %1, %0, c14\n"
Russell Kingdf71dfd2009-10-24 22:36:36 +010046 " mcr p15, 0, %2, c7, c10, 4"
Catalin Marinas481467d2005-09-30 16:07:04 +010047 :
Jungseung Lee12e669b2014-11-29 02:54:27 +010048 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
Catalin Marinas481467d2005-09-30 16:07:04 +010049 : "cc");
50}
51
Will Deaconc4e259c2010-09-13 16:19:41 +010052static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
53{
Russell King67ece142011-07-02 15:20:44 +010054 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
Will Deaconc4e259c2010-09-13 16:19:41 +010055 unsigned long offset = vaddr & (PAGE_SIZE - 1);
56 unsigned long to;
57
Russell King67ece142011-07-02 15:20:44 +010058 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
59 to = va + offset;
Will Deaconc4e259c2010-09-13 16:19:41 +010060 flush_icache_range(to, to + len);
61}
62
Russell Kingd7b6b352005-09-08 15:32:23 +010063void flush_cache_mm(struct mm_struct *mm)
64{
65 if (cache_is_vivt()) {
Russell King2f0b1922009-10-25 10:40:02 +000066 vivt_flush_cache_mm(mm);
Russell Kingd7b6b352005-09-08 15:32:23 +010067 return;
68 }
69
70 if (cache_is_vipt_aliasing()) {
71 asm( "mcr p15, 0, %0, c7, c14, 0\n"
Russell Kingdf71dfd2009-10-24 22:36:36 +010072 " mcr p15, 0, %0, c7, c10, 4"
Russell Kingd7b6b352005-09-08 15:32:23 +010073 :
74 : "r" (0)
75 : "cc");
76 }
77}
78
79void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
80{
81 if (cache_is_vivt()) {
Russell King2f0b1922009-10-25 10:40:02 +000082 vivt_flush_cache_range(vma, start, end);
Russell Kingd7b6b352005-09-08 15:32:23 +010083 return;
84 }
85
86 if (cache_is_vipt_aliasing()) {
87 asm( "mcr p15, 0, %0, c7, c14, 0\n"
Russell Kingdf71dfd2009-10-24 22:36:36 +010088 " mcr p15, 0, %0, c7, c10, 4"
Russell Kingd7b6b352005-09-08 15:32:23 +010089 :
90 : "r" (0)
91 : "cc");
92 }
Russell King9e959222009-10-25 13:35:13 +000093
Russell King6060e8d2009-10-25 14:12:27 +000094 if (vma->vm_flags & VM_EXEC)
Russell King9e959222009-10-25 13:35:13 +000095 __flush_icache_all();
Russell Kingd7b6b352005-09-08 15:32:23 +010096}
97
98void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
99{
100 if (cache_is_vivt()) {
Russell King2f0b1922009-10-25 10:40:02 +0000101 vivt_flush_cache_page(vma, user_addr, pfn);
Russell Kingd7b6b352005-09-08 15:32:23 +0100102 return;
103 }
104
Russell King2df341e2009-10-24 22:58:40 +0100105 if (cache_is_vipt_aliasing()) {
Russell Kingd7b6b352005-09-08 15:32:23 +0100106 flush_pfn_alias(pfn, user_addr);
Russell King2df341e2009-10-24 22:58:40 +0100107 __flush_icache_all();
108 }
Russell King9e959222009-10-25 13:35:13 +0000109
110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
111 __flush_icache_all();
Russell Kingd7b6b352005-09-08 15:32:23 +0100112}
Will Deaconc4e259c2010-09-13 16:19:41 +0100113
Russell King2ef7f3d2009-11-05 13:29:36 +0000114#else
Will Deaconc4e259c2010-09-13 16:19:41 +0100115#define flush_pfn_alias(pfn,vaddr) do { } while (0)
116#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
Russell King2ef7f3d2009-11-05 13:29:36 +0000117#endif
George G. Davisa188ad22006-09-02 18:43:20 +0100118
Victor Kamensky72e6ae22014-04-29 04:20:52 +0100119#define FLAG_PA_IS_EXEC 1
120#define FLAG_PA_CORE_IN_MM 2
121
Russell King2ef7f3d2009-11-05 13:29:36 +0000122static void flush_ptrace_access_other(void *args)
123{
124 __flush_icache_all();
125}
Russell King2ef7f3d2009-11-05 13:29:36 +0000126
Victor Kamensky72e6ae22014-04-29 04:20:52 +0100127static inline
128void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
129 unsigned long len, unsigned int flags)
George G. Davisa188ad22006-09-02 18:43:20 +0100130{
131 if (cache_is_vivt()) {
Victor Kamensky72e6ae22014-04-29 04:20:52 +0100132 if (flags & FLAG_PA_CORE_IN_MM) {
Russell King2ef7f3d2009-11-05 13:29:36 +0000133 unsigned long addr = (unsigned long)kaddr;
134 __cpuc_coherent_kern_range(addr, addr + len);
135 }
George G. Davisa188ad22006-09-02 18:43:20 +0100136 return;
137 }
138
139 if (cache_is_vipt_aliasing()) {
140 flush_pfn_alias(page_to_pfn(page), uaddr);
Russell King2df341e2009-10-24 22:58:40 +0100141 __flush_icache_all();
George G. Davisa188ad22006-09-02 18:43:20 +0100142 return;
143 }
144
Will Deaconc4e259c2010-09-13 16:19:41 +0100145 /* VIPT non-aliasing D-cache */
Victor Kamensky72e6ae22014-04-29 04:20:52 +0100146 if (flags & FLAG_PA_IS_EXEC) {
George G. Davisa188ad22006-09-02 18:43:20 +0100147 unsigned long addr = (unsigned long)kaddr;
Will Deaconc4e259c2010-09-13 16:19:41 +0100148 if (icache_is_vipt_aliasing())
149 flush_icache_alias(page_to_pfn(page), uaddr, len);
150 else
151 __cpuc_coherent_kern_range(addr, addr + len);
Russell King2ef7f3d2009-11-05 13:29:36 +0000152 if (cache_ops_need_broadcast())
153 smp_call_function(flush_ptrace_access_other,
154 NULL, 1);
George G. Davisa188ad22006-09-02 18:43:20 +0100155 }
156}
Russell King2ef7f3d2009-11-05 13:29:36 +0000157
Victor Kamensky72e6ae22014-04-29 04:20:52 +0100158static
159void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
160 unsigned long uaddr, void *kaddr, unsigned long len)
161{
162 unsigned int flags = 0;
163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
164 flags |= FLAG_PA_CORE_IN_MM;
165 if (vma->vm_flags & VM_EXEC)
166 flags |= FLAG_PA_IS_EXEC;
167 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
168}
169
170void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
171 void *kaddr, unsigned long len)
172{
173 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
174
175 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
176}
177
Russell King2ef7f3d2009-11-05 13:29:36 +0000178/*
179 * Copy user data from/to a page which is mapped into a different
180 * processes address space. Really, we want to allow our "user
181 * space" model to handle this.
182 *
183 * Note that this code needs to run on the current CPU.
184 */
185void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
186 unsigned long uaddr, void *dst, const void *src,
187 unsigned long len)
188{
189#ifdef CONFIG_SMP
190 preempt_disable();
Russell King8d802d22005-05-10 17:31:43 +0100191#endif
Russell King2ef7f3d2009-11-05 13:29:36 +0000192 memcpy(dst, src, len);
193 flush_ptrace_access(vma, page, uaddr, dst, len);
194#ifdef CONFIG_SMP
195 preempt_enable();
196#endif
197}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Russell King8830f042005-06-20 09:51:03 +0100199void __flush_dcache_page(struct address_space *mapping, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 /*
202 * Writeback any data associated with the kernel mapping of this
203 * page. This ensures that data in the physical page is mutually
204 * coherent with the kernels mapping.
205 */
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100206 if (!PageHighMem(page)) {
Steve Capper0b19f932013-05-17 12:33:28 +0100207 size_t page_size = PAGE_SIZE << compound_order(page);
208 __cpuc_flush_dcache_area(page_address(page), page_size);
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100209 } else {
Steve Capper0b19f932013-05-17 12:33:28 +0100210 unsigned long i;
Joonsoo Kimdd0f67f2013-04-05 03:16:14 +0100211 if (cache_is_vipt_nonaliasing()) {
Steve Capper0b19f932013-05-17 12:33:28 +0100212 for (i = 0; i < (1 << compound_order(page)); i++) {
Steven Capper2a7cfcb2013-12-16 17:25:52 +0100213 void *addr = kmap_atomic(page + i);
Joonsoo Kimdd0f67f2013-04-05 03:16:14 +0100214 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
Steve Capper0b19f932013-05-17 12:33:28 +0100215 kunmap_atomic(addr);
216 }
217 } else {
218 for (i = 0; i < (1 << compound_order(page)); i++) {
Steven Capper2a7cfcb2013-12-16 17:25:52 +0100219 void *addr = kmap_high_get(page + i);
Steve Capper0b19f932013-05-17 12:33:28 +0100220 if (addr) {
221 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
Steven Capper2a7cfcb2013-12-16 17:25:52 +0100222 kunmap_high(page + i);
Steve Capper0b19f932013-05-17 12:33:28 +0100223 }
Joonsoo Kimdd0f67f2013-04-05 03:16:14 +0100224 }
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100225 }
226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /*
Russell King8830f042005-06-20 09:51:03 +0100229 * If this is a page cache page, and we have an aliasing VIPT cache,
230 * we only need to do one flush - which would be at the relevant
Russell King8d802d22005-05-10 17:31:43 +0100231 * userspace colour, which is congruent with page->index.
232 */
Russell Kingf91fb052009-10-24 23:05:34 +0100233 if (mapping && cache_is_vipt_aliasing())
Russell King8830f042005-06-20 09:51:03 +0100234 flush_pfn_alias(page_to_pfn(page),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300235 page->index << PAGE_SHIFT);
Russell King8830f042005-06-20 09:51:03 +0100236}
237
238static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
239{
240 struct mm_struct *mm = current->active_mm;
241 struct vm_area_struct *mpnt;
Russell King8830f042005-06-20 09:51:03 +0100242 pgoff_t pgoff;
Russell King8d802d22005-05-10 17:31:43 +0100243
244 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 * There are possible user space mappings of this page:
246 * - VIVT cache: we need to also write back and invalidate all user
247 * data in the current VM view associated with this page.
248 * - aliasing VIPT: we only need to find one mapping of this page.
249 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300250 pgoff = page->index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 flush_dcache_mmap_lock(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700253 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 unsigned long offset;
255
256 /*
257 * If this VMA is not in our MM, we can ignore it.
258 */
259 if (mpnt->vm_mm != mm)
260 continue;
261 if (!(mpnt->vm_flags & VM_MAYSHARE))
262 continue;
263 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
264 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 }
266 flush_dcache_mmap_unlock(mapping);
267}
268
Catalin Marinas60121912010-09-13 15:58:06 +0100269#if __LINUX_ARM_ARCH__ >= 6
270void __sync_icache_dcache(pte_t pteval)
271{
272 unsigned long pfn;
273 struct page *page;
274 struct address_space *mapping;
275
Catalin Marinas60121912010-09-13 15:58:06 +0100276 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
277 /* only flush non-aliasing VIPT caches for exec mappings */
278 return;
279 pfn = pte_pfn(pteval);
280 if (!pfn_valid(pfn))
281 return;
282
283 page = pfn_to_page(pfn);
284 if (cache_is_vipt_aliasing())
Huang Yingcb9f7532018-04-05 16:24:39 -0700285 mapping = page_mapping_file(page);
Catalin Marinas60121912010-09-13 15:58:06 +0100286 else
287 mapping = NULL;
288
289 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
290 __flush_dcache_page(mapping, page);
saeed bishara8373dc32011-05-16 15:41:15 +0100291
292 if (pte_exec(pteval))
Catalin Marinas60121912010-09-13 15:58:06 +0100293 __flush_icache_all();
294}
295#endif
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297/*
298 * Ensure cache coherency between kernel mapping and userspace mapping
299 * of this page.
300 *
301 * We have three cases to consider:
302 * - VIPT non-aliasing cache: fully coherent so nothing required.
303 * - VIVT: fully aliasing, so we need to handle every alias in our
304 * current VM view.
305 * - VIPT aliasing: need to handle one alias in our current VM view.
306 *
307 * If we need to handle aliasing:
308 * If the page only exists in the page cache and there are no user
309 * space mappings, we can be lazy and remember that we may have dirty
310 * kernel cache lines for later. Otherwise, we assume we have
311 * aliasing mappings.
Russell Kingdf2f5e72005-11-30 16:02:54 +0000312 *
saeed bishara31bee4c2011-05-16 11:25:21 +0100313 * Note that we disable the lazy flush for SMP configurations where
314 * the cache maintenance operations are not automatically broadcasted.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 */
316void flush_dcache_page(struct page *page)
317{
Russell King421fe932009-10-25 10:23:04 +0000318 struct address_space *mapping;
319
320 /*
321 * The zero page is never written to, so never has any dirty
322 * cache lines, and therefore never needs to be flushed.
323 */
324 if (page == ZERO_PAGE(0))
325 return;
326
Rabin Vincent00a19f32016-11-08 09:21:19 +0100327 if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
328 if (test_bit(PG_dcache_clean, &page->flags))
329 clear_bit(PG_dcache_clean, &page->flags);
330 return;
331 }
332
Huang Yingcb9f7532018-04-05 16:24:39 -0700333 mapping = page_mapping_file(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Catalin Marinas85848dd2010-09-13 15:58:37 +0100335 if (!cache_ops_need_broadcast() &&
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800336 mapping && !page_mapcount(page))
Catalin Marinasc0177802010-09-13 15:57:36 +0100337 clear_bit(PG_dcache_clean, &page->flags);
Catalin Marinas85848dd2010-09-13 15:58:37 +0100338 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 __flush_dcache_page(mapping, page);
Russell King8830f042005-06-20 09:51:03 +0100340 if (mapping && cache_is_vivt())
341 __flush_dcache_aliases(mapping, page);
Catalin Marinas826cbda2008-06-13 10:28:36 +0100342 else if (mapping)
343 __flush_icache_all();
Catalin Marinasc0177802010-09-13 15:57:36 +0100344 set_bit(PG_dcache_clean, &page->flags);
Russell King8830f042005-06-20 09:51:03 +0100345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347EXPORT_SYMBOL(flush_dcache_page);
Russell King6020dff2006-12-30 23:17:40 +0000348
349/*
Simon Baatz1bc39742013-06-10 21:10:12 +0100350 * Ensure cache coherency for the kernel mapping of this page. We can
351 * assume that the page is pinned via kmap.
352 *
353 * If the page only exists in the page cache and there are no user
354 * space mappings, this is a no-op since the page was already marked
355 * dirty at creation. Otherwise, we need to flush the dirty kernel
356 * cache lines directly.
357 */
358void flush_kernel_dcache_page(struct page *page)
359{
360 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
361 struct address_space *mapping;
362
Huang Yingcb9f7532018-04-05 16:24:39 -0700363 mapping = page_mapping_file(page);
Simon Baatz1bc39742013-06-10 21:10:12 +0100364
365 if (!mapping || mapping_mapped(mapping)) {
366 void *addr;
367
368 addr = page_address(page);
369 /*
370 * kmap_atomic() doesn't set the page virtual
371 * address for highmem pages, and
372 * kunmap_atomic() takes care of cache
373 * flushing already.
374 */
375 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
376 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
377 }
378 }
379}
380EXPORT_SYMBOL(flush_kernel_dcache_page);
381
382/*
Russell King6020dff2006-12-30 23:17:40 +0000383 * Flush an anonymous page so that users of get_user_pages()
384 * can safely access the data. The expected sequence is:
385 *
386 * get_user_pages()
387 * -> flush_anon_page
388 * memcpy() to/from page
389 * if written to page, flush_dcache_page()
390 */
391void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
392{
393 unsigned long pfn;
394
395 /* VIPT non-aliasing caches need do nothing */
396 if (cache_is_vipt_nonaliasing())
397 return;
398
399 /*
400 * Write back and invalidate userspace mapping.
401 */
402 pfn = page_to_pfn(page);
403 if (cache_is_vivt()) {
404 flush_cache_page(vma, vmaddr, pfn);
405 } else {
406 /*
407 * For aliasing VIPT, we can flush an alias of the
408 * userspace address only.
409 */
410 flush_pfn_alias(pfn, vmaddr);
Russell King2df341e2009-10-24 22:58:40 +0100411 __flush_icache_all();
Russell King6020dff2006-12-30 23:17:40 +0000412 }
413
414 /*
415 * Invalidate kernel mapping. No data should be contained
416 * in this mapping of the page. FIXME: this is overkill
417 * since we actually ask for a write-back and invalidate.
418 */
Russell King2c9b9c82009-11-26 12:56:21 +0000419 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
Russell King6020dff2006-12-30 23:17:40 +0000420}