Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
GuanXuetao | 10c9c10 | 2011-01-15 18:18:29 +0800 | [diff] [blame] | 2 | /* |
| 3 | * linux/arch/unicore32/include/asm/cacheflush.h |
| 4 | * |
| 5 | * Code specific to PKUnity SoC and UniCore ISA |
| 6 | * |
| 7 | * Copyright (C) 2001-2010 GUAN Xue-tao |
GuanXuetao | 10c9c10 | 2011-01-15 18:18:29 +0800 | [diff] [blame] | 8 | */ |
| 9 | #ifndef __UNICORE_CACHEFLUSH_H__ |
| 10 | #define __UNICORE_CACHEFLUSH_H__ |
| 11 | |
| 12 | #include <linux/mm.h> |
| 13 | |
| 14 | #include <asm/shmparam.h> |
| 15 | |
| 16 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) |
| 17 | |
| 18 | /* |
| 19 | * This flag is used to indicate that the page pointed to by a pte is clean |
| 20 | * and does not require cleaning before returning it to the user. |
| 21 | */ |
| 22 | #define PG_dcache_clean PG_arch_1 |
| 23 | |
| 24 | /* |
| 25 | * MM Cache Management |
| 26 | * =================== |
| 27 | * |
| 28 | * The arch/unicore32/mm/cache.S files implement these methods. |
| 29 | * |
| 30 | * Start addresses are inclusive and end addresses are exclusive; |
| 31 | * start addresses should be rounded down, end addresses up. |
| 32 | * |
Mauro Carvalho Chehab | 5fb94e9 | 2018-05-08 15:14:57 -0300 | [diff] [blame] | 33 | * See Documentation/core-api/cachetlb.rst for more information. |
GuanXuetao | 10c9c10 | 2011-01-15 18:18:29 +0800 | [diff] [blame] | 34 | * Please note that the implementation of these, and the required |
| 35 | * effects are cache-type (VIVT/VIPT/PIPT) specific. |
| 36 | * |
| 37 | * flush_icache_all() |
| 38 | * |
| 39 | * Unconditionally clean and invalidate the entire icache. |
| 40 | * Currently only needed for cache-v6.S and cache-v7.S, see |
| 41 | * __flush_icache_all for the generic implementation. |
| 42 | * |
| 43 | * flush_kern_all() |
| 44 | * |
| 45 | * Unconditionally clean and invalidate the entire cache. |
| 46 | * |
| 47 | * flush_user_all() |
| 48 | * |
| 49 | * Clean and invalidate all user space cache entries |
| 50 | * before a change of page tables. |
| 51 | * |
| 52 | * flush_user_range(start, end, flags) |
| 53 | * |
| 54 | * Clean and invalidate a range of cache entries in the |
| 55 | * specified address space before a change of page tables. |
| 56 | * - start - user start address (inclusive, page aligned) |
| 57 | * - end - user end address (exclusive, page aligned) |
| 58 | * - flags - vma->vm_flags field |
| 59 | * |
| 60 | * coherent_kern_range(start, end) |
| 61 | * |
| 62 | * Ensure coherency between the Icache and the Dcache in the |
| 63 | * region described by start, end. If you have non-snooping |
| 64 | * Harvard caches, you need to implement this function. |
| 65 | * - start - virtual start address |
| 66 | * - end - virtual end address |
| 67 | * |
| 68 | * coherent_user_range(start, end) |
| 69 | * |
| 70 | * Ensure coherency between the Icache and the Dcache in the |
| 71 | * region described by start, end. If you have non-snooping |
| 72 | * Harvard caches, you need to implement this function. |
| 73 | * - start - virtual start address |
| 74 | * - end - virtual end address |
| 75 | * |
| 76 | * flush_kern_dcache_area(kaddr, size) |
| 77 | * |
| 78 | * Ensure that the data held in page is written back. |
| 79 | * - kaddr - page address |
| 80 | * - size - region size |
| 81 | * |
| 82 | * DMA Cache Coherency |
| 83 | * =================== |
| 84 | * |
| 85 | * dma_flush_range(start, end) |
| 86 | * |
| 87 | * Clean and invalidate the specified virtual address range. |
| 88 | * - start - virtual start address |
| 89 | * - end - virtual end address |
| 90 | */ |
| 91 | |
| 92 | extern void __cpuc_flush_icache_all(void); |
| 93 | extern void __cpuc_flush_kern_all(void); |
| 94 | extern void __cpuc_flush_user_all(void); |
| 95 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); |
| 96 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); |
| 97 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); |
| 98 | extern void __cpuc_flush_dcache_area(void *, size_t); |
| 99 | extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size); |
| 100 | |
| 101 | /* |
GuanXuetao | 10c9c10 | 2011-01-15 18:18:29 +0800 | [diff] [blame] | 102 | * Copy user data from/to a page which is mapped into a different |
| 103 | * processes address space. Really, we want to allow our "user |
| 104 | * space" model to handle this. |
| 105 | */ |
| 106 | extern void copy_to_user_page(struct vm_area_struct *, struct page *, |
| 107 | unsigned long, void *, const void *, unsigned long); |
| 108 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
| 109 | do { \ |
| 110 | memcpy(dst, src, len); \ |
| 111 | } while (0) |
| 112 | |
| 113 | /* |
| 114 | * Convert calls to our calling convention. |
| 115 | */ |
| 116 | /* Invalidate I-cache */ |
| 117 | static inline void __flush_icache_all(void) |
| 118 | { |
| 119 | asm("movc p0.c5, %0, #20;\n" |
| 120 | "nop; nop; nop; nop; nop; nop; nop; nop\n" |
| 121 | : |
| 122 | : "r" (0)); |
| 123 | } |
| 124 | |
| 125 | #define flush_cache_all() __cpuc_flush_kern_all() |
| 126 | |
| 127 | extern void flush_cache_mm(struct mm_struct *mm); |
| 128 | extern void flush_cache_range(struct vm_area_struct *vma, |
| 129 | unsigned long start, unsigned long end); |
| 130 | extern void flush_cache_page(struct vm_area_struct *vma, |
| 131 | unsigned long user_addr, unsigned long pfn); |
| 132 | |
| 133 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
| 134 | |
| 135 | /* |
| 136 | * flush_cache_user_range is used when we want to ensure that the |
| 137 | * Harvard caches are synchronised for the user space address range. |
| 138 | * This is used for the UniCore private sys_cacheflush system call. |
| 139 | */ |
| 140 | #define flush_cache_user_range(vma, start, end) \ |
| 141 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) |
| 142 | |
| 143 | /* |
| 144 | * Perform necessary cache operations to ensure that data previously |
| 145 | * stored within this range of addresses can be executed by the CPU. |
| 146 | */ |
| 147 | #define flush_icache_range(s, e) __cpuc_coherent_kern_range(s, e) |
| 148 | |
| 149 | /* |
| 150 | * Perform necessary cache operations to ensure that the TLB will |
| 151 | * see data written in the specified area. |
| 152 | */ |
| 153 | #define clean_dcache_area(start, size) cpu_dcache_clean_area(start, size) |
| 154 | |
| 155 | /* |
| 156 | * flush_dcache_page is used when the kernel has written to the page |
| 157 | * cache page at virtual address page->virtual. |
| 158 | * |
| 159 | * If this page isn't mapped (ie, page_mapping == NULL), or it might |
| 160 | * have userspace mappings, then we _must_ always clean + invalidate |
| 161 | * the dcache entries associated with the kernel mapping. |
| 162 | * |
| 163 | * Otherwise we can defer the operation, and clean the cache when we are |
| 164 | * about to change to user space. This is the same method as used on SPARC64. |
| 165 | * See update_mmu_cache for the user space part. |
| 166 | */ |
| 167 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
| 168 | extern void flush_dcache_page(struct page *); |
| 169 | |
Matthew Wilcox | d339d70 | 2018-04-10 16:36:40 -0700 | [diff] [blame] | 170 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 171 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
GuanXuetao | 10c9c10 | 2011-01-15 18:18:29 +0800 | [diff] [blame] | 172 | |
| 173 | #define flush_icache_user_range(vma, page, addr, len) \ |
| 174 | flush_dcache_page(page) |
| 175 | |
| 176 | /* |
| 177 | * We don't appear to need to do anything here. In fact, if we did, we'd |
| 178 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). |
| 179 | */ |
| 180 | #define flush_icache_page(vma, page) do { } while (0) |
| 181 | |
| 182 | /* |
| 183 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, |
| 184 | * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT |
| 185 | * caches, since the direct-mappings of these pages may contain cached |
| 186 | * data, we need to do a full cache flush to ensure that writebacks |
| 187 | * don't corrupt data placed into these pages via the new mappings. |
| 188 | */ |
| 189 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
| 190 | { |
| 191 | } |
| 192 | |
| 193 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
| 194 | { |
| 195 | } |
| 196 | |
| 197 | #endif |