blob: d6667b8cfca5616434e743f36be2f2192a5e31ab [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Russell King4baa9922008-08-02 10:55:55 +01003 * arch/arm/include/asm/cacheflush.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 1999-2002 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7#ifndef _ASMARM_CACHEFLUSH_H
8#define _ASMARM_CACHEFLUSH_H
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
11
Russell King753790e2011-02-06 15:32:24 +000012#include <asm/glue-cache.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010013#include <asm/shmparam.h>
Catalin Marinas376e1422008-11-06 13:23:08 +000014#include <asm/cachetype.h>
Catalin Marinas33f663f2010-03-24 16:46:52 +010015#include <asm/outercache.h>
Russell Kingb8a9b662005-06-20 11:31:09 +010016
17#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19/*
Catalin Marinasc0177802010-09-13 15:57:36 +010020 * This flag is used to indicate that the page pointed to by a pte is clean
21 * and does not require cleaning before returning it to the user.
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
Catalin Marinasc0177802010-09-13 15:57:36 +010023#define PG_dcache_clean PG_arch_1
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25/*
26 * MM Cache Management
27 * ===================
28 *
29 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
30 * implement these methods.
31 *
32 * Start addresses are inclusive and end addresses are exclusive;
33 * start addresses should be rounded down, end addresses up.
34 *
Mauro Carvalho Chehab5fb94e92018-05-08 15:14:57 -030035 * See Documentation/core-api/cachetlb.rst for more information.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * Please note that the implementation of these, and the required
37 * effects are cache-type (VIVT/VIPT/PIPT) specific.
38 *
Tony Lindgren81d11952010-09-21 17:16:40 +010039 * flush_icache_all()
40 *
41 * Unconditionally clean and invalidate the entire icache.
42 * Currently only needed for cache-v6.S and cache-v7.S, see
43 * __flush_icache_all for the generic implementation.
44 *
Tony Lindgren20451242010-01-19 23:42:08 +010045 * flush_kern_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 *
47 * Unconditionally clean and invalidate the entire cache.
48 *
Lorenzo Pieralisi031bd872012-09-06 18:35:13 +053049 * flush_kern_louis()
50 *
51 * Flush data cache levels up to the level of unification
52 * inner shareable and invalidate the I-cache.
53 * Only needed from v7 onwards, falls back to flush_cache_all()
54 * for all other processor versions.
55 *
Tony Lindgren20451242010-01-19 23:42:08 +010056 * flush_user_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 *
58 * Clean and invalidate all user space cache entries
59 * before a change of page tables.
60 *
Tony Lindgren20451242010-01-19 23:42:08 +010061 * flush_user_range(start, end, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 *
63 * Clean and invalidate a range of cache entries in the
64 * specified address space before a change of page tables.
65 * - start - user start address (inclusive, page aligned)
66 * - end - user end address (exclusive, page aligned)
67 * - flags - vma->vm_flags field
68 *
69 * coherent_kern_range(start, end)
70 *
71 * Ensure coherency between the Icache and the Dcache in the
72 * region described by start, end. If you have non-snooping
73 * Harvard caches, you need to implement this function.
74 * - start - virtual start address
75 * - end - virtual end address
76 *
Tony Lindgren20451242010-01-19 23:42:08 +010077 * coherent_user_range(start, end)
78 *
79 * Ensure coherency between the Icache and the Dcache in the
80 * region described by start, end. If you have non-snooping
81 * Harvard caches, you need to implement this function.
82 * - start - virtual start address
83 * - end - virtual end address
84 *
85 * flush_kern_dcache_area(kaddr, size)
86 *
87 * Ensure that the data held in page is written back.
88 * - kaddr - page address
89 * - size - region size
90 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 * DMA Cache Coherency
92 * ===================
93 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 * dma_flush_range(start, end)
95 *
96 * Clean and invalidate the specified virtual address range.
97 * - start - virtual start address
98 * - end - virtual end address
99 */
100
101struct cpu_cache_fns {
Tony Lindgren81d11952010-09-21 17:16:40 +0100102 void (*flush_icache_all)(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 void (*flush_kern_all)(void);
Lorenzo Pieralisi031bd872012-09-06 18:35:13 +0530104 void (*flush_kern_louis)(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 void (*flush_user_all)(void);
106 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
107
108 void (*coherent_kern_range)(unsigned long, unsigned long);
Will Deaconc5102f52012-04-27 13:08:53 +0100109 int (*coherent_user_range)(unsigned long, unsigned long);
Russell King2c9b9c82009-11-26 12:56:21 +0000110 void (*flush_kern_dcache_area)(void *, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Russell Kinga9c91472009-11-26 16:19:58 +0000112 void (*dma_map_area)(const void *, size_t, int);
113 void (*dma_unmap_area)(const void *, size_t, int);
114
Russell King7ae5a762007-02-06 17:39:31 +0000115 void (*dma_flush_range)(const void *, const void *);
Kees Cook8acdf502016-10-28 00:45:16 -0700116} __no_randomize_layout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118/*
119 * Select the calling method
120 */
121#ifdef MULTI_CACHE
122
123extern struct cpu_cache_fns cpu_cache;
124
Tony Lindgren81d11952010-09-21 17:16:40 +0100125#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
Lorenzo Pieralisi031bd872012-09-06 18:35:13 +0530127#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#define __cpuc_flush_user_all cpu_cache.flush_user_all
129#define __cpuc_flush_user_range cpu_cache.flush_user_range
130#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
131#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
Russell King2c9b9c82009-11-26 12:56:21 +0000132#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134/*
135 * These are private to the dma-mapping API. Do not use directly.
136 * Their sole purpose is to ensure that data held in the cache
137 * is visible to DMA, or data written by DMA to system memory is
138 * visible to the CPU.
139 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140#define dmac_flush_range cpu_cache.dma_flush_range
141
142#else
143
Tony Lindgren81d11952010-09-21 17:16:40 +0100144extern void __cpuc_flush_icache_all(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145extern void __cpuc_flush_kern_all(void);
Lorenzo Pieralisi031bd872012-09-06 18:35:13 +0530146extern void __cpuc_flush_kern_louis(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147extern void __cpuc_flush_user_all(void);
148extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
149extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
Will Deaconc5102f52012-04-27 13:08:53 +0100150extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
Russell King2c9b9c82009-11-26 12:56:21 +0000151extern void __cpuc_flush_dcache_area(void *, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153/*
154 * These are private to the dma-mapping API. Do not use directly.
155 * Their sole purpose is to ensure that data held in the cache
156 * is visible to DMA, or data written by DMA to system memory is
157 * visible to the CPU.
158 */
Russell King7ae5a762007-02-06 17:39:31 +0000159extern void dmac_flush_range(const void *, const void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161#endif
162
163/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 * Copy user data from/to a page which is mapped into a different
165 * processes address space. Really, we want to allow our "user
166 * space" model to handle this.
167 */
Russell King2ef7f3d2009-11-05 13:29:36 +0000168extern void copy_to_user_page(struct vm_area_struct *, struct page *,
169 unsigned long, void *, const void *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
171 do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 memcpy(dst, src, len); \
173 } while (0)
174
175/*
176 * Convert calls to our calling convention.
177 */
Tony Lindgren81d11952010-09-21 17:16:40 +0100178
179/* Invalidate I-cache */
180#define __flush_icache_all_generic() \
181 asm("mcr p15, 0, %0, c7, c5, 0" \
182 : : "r" (0));
183
184/* Invalidate I-cache inner shareable */
185#define __flush_icache_all_v7_smp() \
186 asm("mcr p15, 0, %0, c7, c1, 0" \
187 : : "r" (0));
188
189/*
190 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
191 * will fall through to use __flush_icache_all_generic.
192 */
Russell Kinge399b1a2011-01-17 15:08:32 +0000193#if (defined(CONFIG_CPU_V7) && \
194 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
Tony Lindgren81d11952010-09-21 17:16:40 +0100195 defined(CONFIG_SMP_ON_UP)
196#define __flush_icache_preferred __cpuc_flush_icache_all
197#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
198#define __flush_icache_preferred __flush_icache_all_v7_smp
199#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
200#define __flush_icache_preferred __cpuc_flush_icache_all
201#else
202#define __flush_icache_preferred __flush_icache_all_generic
203#endif
204
205static inline void __flush_icache_all(void)
206{
207 __flush_icache_preferred();
Will Deacon95819602014-05-09 18:36:27 +0100208 dsb(ishst);
Tony Lindgren81d11952010-09-21 17:16:40 +0100209}
210
Lorenzo Pieralisi031bd872012-09-06 18:35:13 +0530211/*
212 * Flush caches up to Level of Unification Inner Shareable
213 */
214#define flush_cache_louis() __cpuc_flush_kern_louis()
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216#define flush_cache_all() __cpuc_flush_kern_all()
Russell King2f0b1922009-10-25 10:40:02 +0000217
218static inline void vivt_flush_cache_mm(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
Rusty Russell56f8ba82009-09-24 09:34:49 -0600220 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 __cpuc_flush_user_all();
222}
223
224static inline void
Russell King2f0b1922009-10-25 10:40:02 +0000225vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
Will Deaconb74253f2012-07-23 14:18:13 +0100227 struct mm_struct *mm = vma->vm_mm;
228
229 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
231 vma->vm_flags);
232}
233
234static inline void
Russell King2f0b1922009-10-25 10:40:02 +0000235vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236{
Will Deaconb74253f2012-07-23 14:18:13 +0100237 struct mm_struct *mm = vma->vm_mm;
238
239 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 unsigned long addr = user_addr & PAGE_MASK;
241 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
242 }
243}
George G. Davisa188ad22006-09-02 18:43:20 +0100244
Russell King2f0b1922009-10-25 10:40:02 +0000245#ifndef CONFIG_CPU_CACHE_VIPT
246#define flush_cache_mm(mm) \
247 vivt_flush_cache_mm(mm)
248#define flush_cache_range(vma,start,end) \
249 vivt_flush_cache_range(vma,start,end)
250#define flush_cache_page(vma,addr,pfn) \
251 vivt_flush_cache_page(vma,addr,pfn)
Russell Kingd7b6b352005-09-08 15:32:23 +0100252#else
253extern void flush_cache_mm(struct mm_struct *mm);
254extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
255extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
256#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Ralf Baechleec8c0442006-12-12 17:14:57 +0000258#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260/*
261 * flush_cache_user_range is used when we want to ensure that the
262 * Harvard caches are synchronised for the user space address range.
263 * This is used for the ARM private sys_cacheflush system call.
264 */
Will Deacond9524dc2012-08-21 15:33:19 +0100265#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267/*
268 * Perform necessary cache operations to ensure that data previously
269 * stored within this range of addresses can be executed by the CPU.
270 */
271#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
272
273/*
274 * Perform necessary cache operations to ensure that the TLB will
275 * see data written in the specified area.
276 */
277#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
278
279/*
280 * flush_dcache_page is used when the kernel has written to the page
281 * cache page at virtual address page->virtual.
282 *
283 * If this page isn't mapped (ie, page_mapping == NULL), or it might
284 * have userspace mappings, then we _must_ always clean + invalidate
285 * the dcache entries associated with the kernel mapping.
286 *
287 * Otherwise we can defer the operation, and clean the cache when we are
288 * about to change to user space. This is the same method as used on SPARC64.
289 * See update_mmu_cache for the user space part.
290 */
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100291#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292extern void flush_dcache_page(struct page *);
293
James Bottomley252a9af2010-01-25 11:42:22 -0600294static inline void flush_kernel_vmap_range(void *addr, int size)
295{
296 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
297 __cpuc_flush_dcache_area(addr, (size_t)size);
298}
299static inline void invalidate_kernel_vmap_range(void *addr, int size)
300{
301 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
302 __cpuc_flush_dcache_area(addr, (size_t)size);
303}
Catalin Marinas826cbda2008-06-13 10:28:36 +0100304
Russell King6020dff2006-12-30 23:17:40 +0000305#define ARCH_HAS_FLUSH_ANON_PAGE
306static inline void flush_anon_page(struct vm_area_struct *vma,
307 struct page *page, unsigned long vmaddr)
308{
309 extern void __flush_anon_page(struct vm_area_struct *vma,
310 struct page *, unsigned long);
311 if (PageAnon(page))
312 __flush_anon_page(vma, page, vmaddr);
313}
314
Nicolas Pitre73be1592009-06-12 03:09:29 +0100315#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
Simon Baatz1bc39742013-06-10 21:10:12 +0100316extern void flush_kernel_dcache_page(struct page *);
Nicolas Pitre73be1592009-06-12 03:09:29 +0100317
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700318#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
319#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321#define flush_icache_user_range(vma,page,addr,len) \
322 flush_dcache_page(page)
323
324/*
325 * We don't appear to need to do anything here. In fact, if we did, we'd
326 * duplicate cache flushing elsewhere performed by flush_dcache_page().
327 */
328#define flush_icache_page(vma,page) do { } while (0)
329
Catalin Marinas376e1422008-11-06 13:23:08 +0000330/*
331 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
332 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
333 * caches, since the direct-mappings of these pages may contain cached
334 * data, we need to do a full cache flush to ensure that writebacks
335 * don't corrupt data placed into these pages via the new mappings.
336 */
337static inline void flush_cache_vmap(unsigned long start, unsigned long end)
338{
339 if (!cache_is_vipt_nonaliasing())
340 flush_cache_all();
341 else
342 /*
343 * set_pte_at() called from vmap_pte_range() does not
344 * have a DSB after cleaning the cache line.
345 */
Will Deacon6af396a2013-06-12 10:03:30 +0100346 dsb(ishst);
Catalin Marinas376e1422008-11-06 13:23:08 +0000347}
348
349static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
350{
351 if (!cache_is_vipt_nonaliasing())
352 flush_cache_all();
353}
354
Nicolas Pitre0c91e7e2013-04-23 16:45:40 -0400355/*
356 * Memory synchronization helpers for mixed cached vs non cached accesses.
357 *
358 * Some synchronization algorithms have to set states in memory with the
359 * cache enabled or disabled depending on the code path. It is crucial
360 * to always ensure proper cache maintenance to update main memory right
361 * away in that case.
362 *
363 * Any cached write must be followed by a cache clean operation.
364 * Any cached read must be preceded by a cache invalidate operation.
365 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
366 * operation is needed to avoid discarding possible concurrent writes to the
367 * accessed memory.
368 *
369 * Also, in order to prevent a cached writer from interfering with an
370 * adjacent non-cached writer, each state variable must be located to
371 * a separate cache line.
372 */
373
374/*
375 * This needs to be >= the max cache writeback size of all
376 * supported platforms included in the current kernel configuration.
377 * This is used to align state variables to their own cache lines.
378 */
379#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
380#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
381
382/*
383 * There is no __cpuc_clean_dcache_area but we use it anyway for
384 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
385 */
386#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
387
388/*
389 * Ensure preceding writes to *p by this CPU are visible to
390 * subsequent reads by other CPUs:
391 */
392static inline void __sync_cache_range_w(volatile void *p, size_t size)
393{
394 char *_p = (char *)p;
395
396 __cpuc_clean_dcache_area(_p, size);
397 outer_clean_range(__pa(_p), __pa(_p + size));
398}
399
400/*
401 * Ensure preceding writes to *p by other CPUs are visible to
402 * subsequent reads by this CPU. We must be careful not to
403 * discard data simultaneously written by another CPU, hence the
404 * usage of flush rather than invalidate operations.
405 */
406static inline void __sync_cache_range_r(volatile void *p, size_t size)
407{
408 char *_p = (char *)p;
409
410#ifdef CONFIG_OUTER_CACHE
411 if (outer_cache.flush_range) {
412 /*
413 * Ensure dirty data migrated from other CPUs into our cache
414 * are cleaned out safely before the outer cache is cleaned:
415 */
416 __cpuc_clean_dcache_area(_p, size);
417
418 /* Clean and invalidate stale data for *p from outer ... */
419 outer_flush_range(__pa(_p), __pa(_p + size));
420 }
421#endif
422
423 /* ... and inner cache: */
424 __cpuc_flush_dcache_area(_p, size);
425}
426
427#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
428#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
429
Nicolas Pitre39792c72013-10-18 22:06:03 +0100430/*
431 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
432 * To do so we must:
433 *
434 * - Clear the SCTLR.C bit to prevent further cache allocations
435 * - Flush the desired level of cache
436 * - Clear the ACTLR "SMP" bit to disable local coherency
437 *
438 * ... and so without any intervening memory access in between those steps,
439 * not even to the stack.
440 *
441 * WARNING -- After this has been called:
442 *
443 * - No ldrex/strex (and similar) instructions must be used.
444 * - The CPU is obviously no longer coherent with the other CPUs.
445 * - This is unlikely to work as expected if Linux is running non-secure.
446 *
447 * Note:
448 *
449 * - This is known to apply to several ARMv7 processor implementations,
450 * however some exceptions may exist. Caveat emptor.
451 *
452 * - The clobber list is dictated by the call to v7_flush_dcache_*.
453 * fp is preserved to the stack explicitly prior disabling the cache
454 * since adding it to the clobber list is incompatible with having
455 * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
456 * trampoline are inserted by the linker and to keep sp 64-bit aligned.
457 */
458#define v7_exit_coherency_flush(level) \
459 asm volatile( \
Krzysztof Kozlowskiebc77252014-09-28 05:36:46 +0100460 ".arch armv7-a \n\t" \
Nicolas Pitre39792c72013-10-18 22:06:03 +0100461 "stmfd sp!, {fp, ip} \n\t" \
462 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
463 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
464 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
465 "isb \n\t" \
466 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
Nicolas Pitre39792c72013-10-18 22:06:03 +0100467 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
468 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
469 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
470 "isb \n\t" \
471 "dsb \n\t" \
472 "ldmfd sp!, {fp, ip}" \
473 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
474 "r9","r10","lr","memory" )
475
Victor Kamensky72e6ae22014-04-29 04:20:52 +0100476void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
477 void *kaddr, unsigned long len);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479#endif