Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 3 | * arch/arm/include/asm/cacheflush.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * Copyright (C) 1999-2002 Russell King |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | #ifndef _ASMARM_CACHEFLUSH_H |
| 8 | #define _ASMARM_CACHEFLUSH_H |
| 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | |
Russell King | 753790e | 2011-02-06 15:32:24 +0000 | [diff] [blame] | 12 | #include <asm/glue-cache.h> |
Russell King | b8a9b66 | 2005-06-20 11:31:09 +0100 | [diff] [blame] | 13 | #include <asm/shmparam.h> |
Catalin Marinas | 376e142 | 2008-11-06 13:23:08 +0000 | [diff] [blame] | 14 | #include <asm/cachetype.h> |
Catalin Marinas | 33f663f | 2010-03-24 16:46:52 +0100 | [diff] [blame] | 15 | #include <asm/outercache.h> |
Russell King | b8a9b66 | 2005-06-20 11:31:09 +0100 | [diff] [blame] | 16 | |
| 17 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | /* |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 20 | * This flag is used to indicate that the page pointed to by a pte is clean |
| 21 | * and does not require cleaning before returning it to the user. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | */ |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 23 | #define PG_dcache_clean PG_arch_1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * MM Cache Management |
| 27 | * =================== |
| 28 | * |
| 29 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files |
| 30 | * implement these methods. |
| 31 | * |
| 32 | * Start addresses are inclusive and end addresses are exclusive; |
| 33 | * start addresses should be rounded down, end addresses up. |
| 34 | * |
Mauro Carvalho Chehab | 5fb94e9 | 2018-05-08 15:14:57 -0300 | [diff] [blame] | 35 | * See Documentation/core-api/cachetlb.rst for more information. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | * Please note that the implementation of these, and the required |
| 37 | * effects are cache-type (VIVT/VIPT/PIPT) specific. |
| 38 | * |
Tony Lindgren | 81d1195 | 2010-09-21 17:16:40 +0100 | [diff] [blame] | 39 | * flush_icache_all() |
| 40 | * |
| 41 | * Unconditionally clean and invalidate the entire icache. |
| 42 | * Currently only needed for cache-v6.S and cache-v7.S, see |
| 43 | * __flush_icache_all for the generic implementation. |
| 44 | * |
Tony Lindgren | 2045124 | 2010-01-19 23:42:08 +0100 | [diff] [blame] | 45 | * flush_kern_all() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | * |
| 47 | * Unconditionally clean and invalidate the entire cache. |
| 48 | * |
Lorenzo Pieralisi | 031bd87 | 2012-09-06 18:35:13 +0530 | [diff] [blame] | 49 | * flush_kern_louis() |
| 50 | * |
| 51 | * Flush data cache levels up to the level of unification |
| 52 | * inner shareable and invalidate the I-cache. |
| 53 | * Only needed from v7 onwards, falls back to flush_cache_all() |
| 54 | * for all other processor versions. |
| 55 | * |
Tony Lindgren | 2045124 | 2010-01-19 23:42:08 +0100 | [diff] [blame] | 56 | * flush_user_all() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | * |
| 58 | * Clean and invalidate all user space cache entries |
| 59 | * before a change of page tables. |
| 60 | * |
Tony Lindgren | 2045124 | 2010-01-19 23:42:08 +0100 | [diff] [blame] | 61 | * flush_user_range(start, end, flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | * |
| 63 | * Clean and invalidate a range of cache entries in the |
| 64 | * specified address space before a change of page tables. |
| 65 | * - start - user start address (inclusive, page aligned) |
| 66 | * - end - user end address (exclusive, page aligned) |
| 67 | * - flags - vma->vm_flags field |
| 68 | * |
| 69 | * coherent_kern_range(start, end) |
| 70 | * |
| 71 | * Ensure coherency between the Icache and the Dcache in the |
| 72 | * region described by start, end. If you have non-snooping |
| 73 | * Harvard caches, you need to implement this function. |
| 74 | * - start - virtual start address |
| 75 | * - end - virtual end address |
| 76 | * |
Tony Lindgren | 2045124 | 2010-01-19 23:42:08 +0100 | [diff] [blame] | 77 | * coherent_user_range(start, end) |
| 78 | * |
| 79 | * Ensure coherency between the Icache and the Dcache in the |
| 80 | * region described by start, end. If you have non-snooping |
| 81 | * Harvard caches, you need to implement this function. |
| 82 | * - start - virtual start address |
| 83 | * - end - virtual end address |
| 84 | * |
| 85 | * flush_kern_dcache_area(kaddr, size) |
| 86 | * |
| 87 | * Ensure that the data held in page is written back. |
| 88 | * - kaddr - page address |
| 89 | * - size - region size |
| 90 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | * DMA Cache Coherency |
| 92 | * =================== |
| 93 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | * dma_flush_range(start, end) |
| 95 | * |
| 96 | * Clean and invalidate the specified virtual address range. |
| 97 | * - start - virtual start address |
| 98 | * - end - virtual end address |
| 99 | */ |
| 100 | |
| 101 | struct cpu_cache_fns { |
Tony Lindgren | 81d1195 | 2010-09-21 17:16:40 +0100 | [diff] [blame] | 102 | void (*flush_icache_all)(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | void (*flush_kern_all)(void); |
Lorenzo Pieralisi | 031bd87 | 2012-09-06 18:35:13 +0530 | [diff] [blame] | 104 | void (*flush_kern_louis)(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | void (*flush_user_all)(void); |
| 106 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); |
| 107 | |
| 108 | void (*coherent_kern_range)(unsigned long, unsigned long); |
Will Deacon | c5102f5 | 2012-04-27 13:08:53 +0100 | [diff] [blame] | 109 | int (*coherent_user_range)(unsigned long, unsigned long); |
Russell King | 2c9b9c8 | 2009-11-26 12:56:21 +0000 | [diff] [blame] | 110 | void (*flush_kern_dcache_area)(void *, size_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 112 | void (*dma_map_area)(const void *, size_t, int); |
| 113 | void (*dma_unmap_area)(const void *, size_t, int); |
| 114 | |
Russell King | 7ae5a76 | 2007-02-06 17:39:31 +0000 | [diff] [blame] | 115 | void (*dma_flush_range)(const void *, const void *); |
Kees Cook | 8acdf50 | 2016-10-28 00:45:16 -0700 | [diff] [blame] | 116 | } __no_randomize_layout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * Select the calling method |
| 120 | */ |
| 121 | #ifdef MULTI_CACHE |
| 122 | |
| 123 | extern struct cpu_cache_fns cpu_cache; |
| 124 | |
Tony Lindgren | 81d1195 | 2010-09-21 17:16:40 +0100 | [diff] [blame] | 125 | #define __cpuc_flush_icache_all cpu_cache.flush_icache_all |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all |
Lorenzo Pieralisi | 031bd87 | 2012-09-06 18:35:13 +0530 | [diff] [blame] | 127 | #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | #define __cpuc_flush_user_all cpu_cache.flush_user_all |
| 129 | #define __cpuc_flush_user_range cpu_cache.flush_user_range |
| 130 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range |
| 131 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range |
Russell King | 2c9b9c8 | 2009-11-26 12:56:21 +0000 | [diff] [blame] | 132 | #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
| 134 | /* |
| 135 | * These are private to the dma-mapping API. Do not use directly. |
| 136 | * Their sole purpose is to ensure that data held in the cache |
| 137 | * is visible to DMA, or data written by DMA to system memory is |
| 138 | * visible to the CPU. |
| 139 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | #define dmac_flush_range cpu_cache.dma_flush_range |
| 141 | |
| 142 | #else |
| 143 | |
Tony Lindgren | 81d1195 | 2010-09-21 17:16:40 +0100 | [diff] [blame] | 144 | extern void __cpuc_flush_icache_all(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | extern void __cpuc_flush_kern_all(void); |
Lorenzo Pieralisi | 031bd87 | 2012-09-06 18:35:13 +0530 | [diff] [blame] | 146 | extern void __cpuc_flush_kern_louis(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | extern void __cpuc_flush_user_all(void); |
| 148 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); |
| 149 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); |
Will Deacon | c5102f5 | 2012-04-27 13:08:53 +0100 | [diff] [blame] | 150 | extern int __cpuc_coherent_user_range(unsigned long, unsigned long); |
Russell King | 2c9b9c8 | 2009-11-26 12:56:21 +0000 | [diff] [blame] | 151 | extern void __cpuc_flush_dcache_area(void *, size_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
| 153 | /* |
| 154 | * These are private to the dma-mapping API. Do not use directly. |
| 155 | * Their sole purpose is to ensure that data held in the cache |
| 156 | * is visible to DMA, or data written by DMA to system memory is |
| 157 | * visible to the CPU. |
| 158 | */ |
Russell King | 7ae5a76 | 2007-02-06 17:39:31 +0000 | [diff] [blame] | 159 | extern void dmac_flush_range(const void *, const void *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
| 161 | #endif |
| 162 | |
| 163 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | * Copy user data from/to a page which is mapped into a different |
| 165 | * processes address space. Really, we want to allow our "user |
| 166 | * space" model to handle this. |
| 167 | */ |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 168 | extern void copy_to_user_page(struct vm_area_struct *, struct page *, |
| 169 | unsigned long, void *, const void *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
| 171 | do { \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | memcpy(dst, src, len); \ |
| 173 | } while (0) |
| 174 | |
| 175 | /* |
| 176 | * Convert calls to our calling convention. |
| 177 | */ |
Tony Lindgren | 81d1195 | 2010-09-21 17:16:40 +0100 | [diff] [blame] | 178 | |
| 179 | /* Invalidate I-cache */ |
| 180 | #define __flush_icache_all_generic() \ |
| 181 | asm("mcr p15, 0, %0, c7, c5, 0" \ |
| 182 | : : "r" (0)); |
| 183 | |
| 184 | /* Invalidate I-cache inner shareable */ |
| 185 | #define __flush_icache_all_v7_smp() \ |
| 186 | asm("mcr p15, 0, %0, c7, c1, 0" \ |
| 187 | : : "r" (0)); |
| 188 | |
| 189 | /* |
| 190 | * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 |
| 191 | * will fall through to use __flush_icache_all_generic. |
| 192 | */ |
Russell King | e399b1a | 2011-01-17 15:08:32 +0000 | [diff] [blame] | 193 | #if (defined(CONFIG_CPU_V7) && \ |
| 194 | (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ |
Tony Lindgren | 81d1195 | 2010-09-21 17:16:40 +0100 | [diff] [blame] | 195 | defined(CONFIG_SMP_ON_UP) |
| 196 | #define __flush_icache_preferred __cpuc_flush_icache_all |
| 197 | #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) |
| 198 | #define __flush_icache_preferred __flush_icache_all_v7_smp |
| 199 | #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920) |
| 200 | #define __flush_icache_preferred __cpuc_flush_icache_all |
| 201 | #else |
| 202 | #define __flush_icache_preferred __flush_icache_all_generic |
| 203 | #endif |
| 204 | |
| 205 | static inline void __flush_icache_all(void) |
| 206 | { |
| 207 | __flush_icache_preferred(); |
Will Deacon | 9581960 | 2014-05-09 18:36:27 +0100 | [diff] [blame] | 208 | dsb(ishst); |
Tony Lindgren | 81d1195 | 2010-09-21 17:16:40 +0100 | [diff] [blame] | 209 | } |
| 210 | |
Lorenzo Pieralisi | 031bd87 | 2012-09-06 18:35:13 +0530 | [diff] [blame] | 211 | /* |
| 212 | * Flush caches up to Level of Unification Inner Shareable |
| 213 | */ |
| 214 | #define flush_cache_louis() __cpuc_flush_kern_louis() |
| 215 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | #define flush_cache_all() __cpuc_flush_kern_all() |
Russell King | 2f0b192 | 2009-10-25 10:40:02 +0000 | [diff] [blame] | 217 | |
| 218 | static inline void vivt_flush_cache_mm(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | { |
Rusty Russell | 56f8ba8 | 2009-09-24 09:34:49 -0600 | [diff] [blame] | 220 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | __cpuc_flush_user_all(); |
| 222 | } |
| 223 | |
| 224 | static inline void |
Russell King | 2f0b192 | 2009-10-25 10:40:02 +0000 | [diff] [blame] | 225 | vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | { |
Will Deacon | b74253f | 2012-07-23 14:18:13 +0100 | [diff] [blame] | 227 | struct mm_struct *mm = vma->vm_mm; |
| 228 | |
| 229 | if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), |
| 231 | vma->vm_flags); |
| 232 | } |
| 233 | |
| 234 | static inline void |
Russell King | 2f0b192 | 2009-10-25 10:40:02 +0000 | [diff] [blame] | 235 | vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
Will Deacon | b74253f | 2012-07-23 14:18:13 +0100 | [diff] [blame] | 237 | struct mm_struct *mm = vma->vm_mm; |
| 238 | |
| 239 | if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | unsigned long addr = user_addr & PAGE_MASK; |
| 241 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); |
| 242 | } |
| 243 | } |
George G. Davis | a188ad2 | 2006-09-02 18:43:20 +0100 | [diff] [blame] | 244 | |
Russell King | 2f0b192 | 2009-10-25 10:40:02 +0000 | [diff] [blame] | 245 | #ifndef CONFIG_CPU_CACHE_VIPT |
| 246 | #define flush_cache_mm(mm) \ |
| 247 | vivt_flush_cache_mm(mm) |
| 248 | #define flush_cache_range(vma,start,end) \ |
| 249 | vivt_flush_cache_range(vma,start,end) |
| 250 | #define flush_cache_page(vma,addr,pfn) \ |
| 251 | vivt_flush_cache_page(vma,addr,pfn) |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 252 | #else |
| 253 | extern void flush_cache_mm(struct mm_struct *mm); |
| 254 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 255 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); |
| 256 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 258 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
| 259 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | /* |
| 261 | * flush_cache_user_range is used when we want to ensure that the |
| 262 | * Harvard caches are synchronised for the user space address range. |
| 263 | * This is used for the ARM private sys_cacheflush system call. |
| 264 | */ |
Will Deacon | d9524dc | 2012-08-21 15:33:19 +0100 | [diff] [blame] | 265 | #define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
| 267 | /* |
| 268 | * Perform necessary cache operations to ensure that data previously |
| 269 | * stored within this range of addresses can be executed by the CPU. |
| 270 | */ |
| 271 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) |
| 272 | |
| 273 | /* |
| 274 | * Perform necessary cache operations to ensure that the TLB will |
| 275 | * see data written in the specified area. |
| 276 | */ |
| 277 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) |
| 278 | |
| 279 | /* |
| 280 | * flush_dcache_page is used when the kernel has written to the page |
| 281 | * cache page at virtual address page->virtual. |
| 282 | * |
| 283 | * If this page isn't mapped (ie, page_mapping == NULL), or it might |
| 284 | * have userspace mappings, then we _must_ always clean + invalidate |
| 285 | * the dcache entries associated with the kernel mapping. |
| 286 | * |
| 287 | * Otherwise we can defer the operation, and clean the cache when we are |
| 288 | * about to change to user space. This is the same method as used on SPARC64. |
| 289 | * See update_mmu_cache for the user space part. |
| 290 | */ |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 291 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | extern void flush_dcache_page(struct page *); |
| 293 | |
James Bottomley | 252a9af | 2010-01-25 11:42:22 -0600 | [diff] [blame] | 294 | static inline void flush_kernel_vmap_range(void *addr, int size) |
| 295 | { |
| 296 | if ((cache_is_vivt() || cache_is_vipt_aliasing())) |
| 297 | __cpuc_flush_dcache_area(addr, (size_t)size); |
| 298 | } |
| 299 | static inline void invalidate_kernel_vmap_range(void *addr, int size) |
| 300 | { |
| 301 | if ((cache_is_vivt() || cache_is_vipt_aliasing())) |
| 302 | __cpuc_flush_dcache_area(addr, (size_t)size); |
| 303 | } |
Catalin Marinas | 826cbda | 2008-06-13 10:28:36 +0100 | [diff] [blame] | 304 | |
Russell King | 6020dff | 2006-12-30 23:17:40 +0000 | [diff] [blame] | 305 | #define ARCH_HAS_FLUSH_ANON_PAGE |
| 306 | static inline void flush_anon_page(struct vm_area_struct *vma, |
| 307 | struct page *page, unsigned long vmaddr) |
| 308 | { |
| 309 | extern void __flush_anon_page(struct vm_area_struct *vma, |
| 310 | struct page *, unsigned long); |
| 311 | if (PageAnon(page)) |
| 312 | __flush_anon_page(vma, page, vmaddr); |
| 313 | } |
| 314 | |
Nicolas Pitre | 73be159 | 2009-06-12 03:09:29 +0100 | [diff] [blame] | 315 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
Simon Baatz | 1bc3974 | 2013-06-10 21:10:12 +0100 | [diff] [blame] | 316 | extern void flush_kernel_dcache_page(struct page *); |
Nicolas Pitre | 73be159 | 2009-06-12 03:09:29 +0100 | [diff] [blame] | 317 | |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 318 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) |
| 319 | #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | |
| 321 | #define flush_icache_user_range(vma,page,addr,len) \ |
| 322 | flush_dcache_page(page) |
| 323 | |
| 324 | /* |
| 325 | * We don't appear to need to do anything here. In fact, if we did, we'd |
| 326 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). |
| 327 | */ |
| 328 | #define flush_icache_page(vma,page) do { } while (0) |
| 329 | |
Catalin Marinas | 376e142 | 2008-11-06 13:23:08 +0000 | [diff] [blame] | 330 | /* |
| 331 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, |
| 332 | * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT |
| 333 | * caches, since the direct-mappings of these pages may contain cached |
| 334 | * data, we need to do a full cache flush to ensure that writebacks |
| 335 | * don't corrupt data placed into these pages via the new mappings. |
| 336 | */ |
| 337 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
| 338 | { |
| 339 | if (!cache_is_vipt_nonaliasing()) |
| 340 | flush_cache_all(); |
| 341 | else |
| 342 | /* |
| 343 | * set_pte_at() called from vmap_pte_range() does not |
| 344 | * have a DSB after cleaning the cache line. |
| 345 | */ |
Will Deacon | 6af396a | 2013-06-12 10:03:30 +0100 | [diff] [blame] | 346 | dsb(ishst); |
Catalin Marinas | 376e142 | 2008-11-06 13:23:08 +0000 | [diff] [blame] | 347 | } |
| 348 | |
| 349 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
| 350 | { |
| 351 | if (!cache_is_vipt_nonaliasing()) |
| 352 | flush_cache_all(); |
| 353 | } |
| 354 | |
Nicolas Pitre | 0c91e7e | 2013-04-23 16:45:40 -0400 | [diff] [blame] | 355 | /* |
| 356 | * Memory synchronization helpers for mixed cached vs non cached accesses. |
| 357 | * |
| 358 | * Some synchronization algorithms have to set states in memory with the |
| 359 | * cache enabled or disabled depending on the code path. It is crucial |
| 360 | * to always ensure proper cache maintenance to update main memory right |
| 361 | * away in that case. |
| 362 | * |
| 363 | * Any cached write must be followed by a cache clean operation. |
| 364 | * Any cached read must be preceded by a cache invalidate operation. |
| 365 | * Yet, in the read case, a cache flush i.e. atomic clean+invalidate |
| 366 | * operation is needed to avoid discarding possible concurrent writes to the |
| 367 | * accessed memory. |
| 368 | * |
| 369 | * Also, in order to prevent a cached writer from interfering with an |
| 370 | * adjacent non-cached writer, each state variable must be located to |
| 371 | * a separate cache line. |
| 372 | */ |
| 373 | |
| 374 | /* |
| 375 | * This needs to be >= the max cache writeback size of all |
| 376 | * supported platforms included in the current kernel configuration. |
| 377 | * This is used to align state variables to their own cache lines. |
| 378 | */ |
| 379 | #define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ |
| 380 | #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) |
| 381 | |
| 382 | /* |
| 383 | * There is no __cpuc_clean_dcache_area but we use it anyway for |
| 384 | * code intent clarity, and alias it to __cpuc_flush_dcache_area. |
| 385 | */ |
| 386 | #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area |
| 387 | |
| 388 | /* |
| 389 | * Ensure preceding writes to *p by this CPU are visible to |
| 390 | * subsequent reads by other CPUs: |
| 391 | */ |
| 392 | static inline void __sync_cache_range_w(volatile void *p, size_t size) |
| 393 | { |
| 394 | char *_p = (char *)p; |
| 395 | |
| 396 | __cpuc_clean_dcache_area(_p, size); |
| 397 | outer_clean_range(__pa(_p), __pa(_p + size)); |
| 398 | } |
| 399 | |
| 400 | /* |
| 401 | * Ensure preceding writes to *p by other CPUs are visible to |
| 402 | * subsequent reads by this CPU. We must be careful not to |
| 403 | * discard data simultaneously written by another CPU, hence the |
| 404 | * usage of flush rather than invalidate operations. |
| 405 | */ |
| 406 | static inline void __sync_cache_range_r(volatile void *p, size_t size) |
| 407 | { |
| 408 | char *_p = (char *)p; |
| 409 | |
| 410 | #ifdef CONFIG_OUTER_CACHE |
| 411 | if (outer_cache.flush_range) { |
| 412 | /* |
| 413 | * Ensure dirty data migrated from other CPUs into our cache |
| 414 | * are cleaned out safely before the outer cache is cleaned: |
| 415 | */ |
| 416 | __cpuc_clean_dcache_area(_p, size); |
| 417 | |
| 418 | /* Clean and invalidate stale data for *p from outer ... */ |
| 419 | outer_flush_range(__pa(_p), __pa(_p + size)); |
| 420 | } |
| 421 | #endif |
| 422 | |
| 423 | /* ... and inner cache: */ |
| 424 | __cpuc_flush_dcache_area(_p, size); |
| 425 | } |
| 426 | |
| 427 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) |
| 428 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) |
| 429 | |
Nicolas Pitre | 39792c7 | 2013-10-18 22:06:03 +0100 | [diff] [blame] | 430 | /* |
| 431 | * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. |
| 432 | * To do so we must: |
| 433 | * |
| 434 | * - Clear the SCTLR.C bit to prevent further cache allocations |
| 435 | * - Flush the desired level of cache |
| 436 | * - Clear the ACTLR "SMP" bit to disable local coherency |
| 437 | * |
| 438 | * ... and so without any intervening memory access in between those steps, |
| 439 | * not even to the stack. |
| 440 | * |
| 441 | * WARNING -- After this has been called: |
| 442 | * |
| 443 | * - No ldrex/strex (and similar) instructions must be used. |
| 444 | * - The CPU is obviously no longer coherent with the other CPUs. |
| 445 | * - This is unlikely to work as expected if Linux is running non-secure. |
| 446 | * |
| 447 | * Note: |
| 448 | * |
| 449 | * - This is known to apply to several ARMv7 processor implementations, |
| 450 | * however some exceptions may exist. Caveat emptor. |
| 451 | * |
| 452 | * - The clobber list is dictated by the call to v7_flush_dcache_*. |
| 453 | * fp is preserved to the stack explicitly prior disabling the cache |
| 454 | * since adding it to the clobber list is incompatible with having |
| 455 | * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering |
| 456 | * trampoline are inserted by the linker and to keep sp 64-bit aligned. |
| 457 | */ |
| 458 | #define v7_exit_coherency_flush(level) \ |
| 459 | asm volatile( \ |
Krzysztof Kozlowski | ebc7725 | 2014-09-28 05:36:46 +0100 | [diff] [blame] | 460 | ".arch armv7-a \n\t" \ |
Nicolas Pitre | 39792c7 | 2013-10-18 22:06:03 +0100 | [diff] [blame] | 461 | "stmfd sp!, {fp, ip} \n\t" \ |
| 462 | "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ |
| 463 | "bic r0, r0, #"__stringify(CR_C)" \n\t" \ |
| 464 | "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ |
| 465 | "isb \n\t" \ |
| 466 | "bl v7_flush_dcache_"__stringify(level)" \n\t" \ |
Nicolas Pitre | 39792c7 | 2013-10-18 22:06:03 +0100 | [diff] [blame] | 467 | "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ |
| 468 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ |
| 469 | "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ |
| 470 | "isb \n\t" \ |
| 471 | "dsb \n\t" \ |
| 472 | "ldmfd sp!, {fp, ip}" \ |
| 473 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ |
| 474 | "r9","r10","lr","memory" ) |
| 475 | |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 476 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, |
| 477 | void *kaddr, unsigned long len); |
Kees Cook | 80d6b0c | 2014-04-03 13:29:50 -0700 | [diff] [blame] | 478 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | #endif |