Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 4 | * |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 5 | * vineetg: May 2011 |
| 6 | * -Refactored get_new_mmu_context( ) to only handle live-mm. |
| 7 | * retiring-mm handled in other hooks |
| 8 | * |
| 9 | * Vineetg: March 25th, 2008: Bug #92690 |
| 10 | * -Major rewrite of Core ASID allocation routine get_new_mmu_context |
| 11 | * |
| 12 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 |
| 13 | */ |
| 14 | |
| 15 | #ifndef _ASM_ARC_MMU_CONTEXT_H |
| 16 | #define _ASM_ARC_MMU_CONTEXT_H |
| 17 | |
| 18 | #include <asm/arcregs.h> |
| 19 | #include <asm/tlb.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 20 | #include <linux/sched/mm.h> |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 21 | |
| 22 | #include <asm-generic/mm_hooks.h> |
| 23 | |
| 24 | /* ARC700 ASID Management |
| 25 | * |
| 26 | * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries |
| 27 | * with same vaddr (different tasks) to co-exit. This provides for |
| 28 | * "Fast Context Switch" i.e. no TLB flush on ctxt-switch |
| 29 | * |
| 30 | * Linux assigns each task a unique ASID. A simple round-robin allocation |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 31 | * of H/w ASID is done using software tracker @asid_cpu. |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 32 | * When it reaches max 255, the allocation cycle starts afresh by flushing |
| 33 | * the entire TLB and wrapping ASID back to zero. |
| 34 | * |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 35 | * A new allocation cycle, post rollover, could potentially reassign an ASID |
| 36 | * to a different task. Thus the rule is to refresh the ASID in a new cycle. |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 37 | * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 38 | * serve as cycle/generation indicator and natural 32 bit unsigned math |
| 39 | * automagically increments the generation when lower 8 bits rollover. |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 40 | */ |
| 41 | |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 42 | #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */ |
| 43 | #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK) |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 44 | |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 45 | #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) |
| 46 | #define MM_CTXT_NO_ASID 0UL |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 47 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 48 | #define asid_mm(mm, cpu) mm->context.asid[cpu] |
| 49 | #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 50 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 51 | DECLARE_PER_CPU(unsigned int, asid_cache); |
| 52 | #define asid_cpu(cpu) per_cpu(asid_cache, cpu) |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 53 | |
| 54 | /* |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 55 | * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
| 56 | * Also set the MMU PID register to existing/updated ASID |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 57 | */ |
| 58 | static inline void get_new_mmu_context(struct mm_struct *mm) |
| 59 | { |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 60 | const unsigned int cpu = smp_processor_id(); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 61 | unsigned long flags; |
| 62 | |
| 63 | local_irq_save(flags); |
| 64 | |
| 65 | /* |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 66 | * Move to new ASID if it was not from current alloc-cycle/generation. |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 67 | * This is done by ensuring that the generation bits in both mm->ASID |
| 68 | * and cpu's ASID counter are exactly same. |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 69 | * |
| 70 | * Note: Callers needing new ASID unconditionally, independent of |
| 71 | * generation, e.g. local_flush_tlb_mm() for forking parent, |
| 72 | * first need to destroy the context, setting it to invalid |
| 73 | * value. |
| 74 | */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 75 | if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 76 | goto set_hw; |
| 77 | |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 78 | /* move to new ASID and handle rollover */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 79 | if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 80 | |
Vineet Gupta | 5ea72a9 | 2013-10-27 14:49:02 +0530 | [diff] [blame] | 81 | local_flush_tlb_all(); |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 82 | |
| 83 | /* |
Andrea Gelmini | 2547476 | 2016-05-21 13:45:35 +0200 | [diff] [blame] | 84 | * Above check for rollover of 8 bit ASID in 32 bit container. |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 85 | * If the container itself wrapped around, set it to a non zero |
| 86 | * "generation" to distinguish from no context |
| 87 | */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 88 | if (!asid_cpu(cpu)) |
| 89 | asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 90 | } |
| 91 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 92 | /* Assign new ASID to tsk */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 93 | asid_mm(mm, cpu) = asid_cpu(cpu); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 94 | |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 95 | set_hw: |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 96 | write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 97 | |
| 98 | local_irq_restore(flags); |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Initialize the context related info for a new mm_struct |
| 103 | * instance. |
| 104 | */ |
| 105 | static inline int |
| 106 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 107 | { |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 108 | int i; |
| 109 | |
| 110 | for_each_possible_cpu(i) |
| 111 | asid_mm(mm, i) = MM_CTXT_NO_ASID; |
| 112 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 113 | return 0; |
| 114 | } |
| 115 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 116 | static inline void destroy_context(struct mm_struct *mm) |
| 117 | { |
| 118 | unsigned long flags; |
| 119 | |
| 120 | /* Needed to elide CONFIG_DEBUG_PREEMPT warning */ |
| 121 | local_irq_save(flags); |
| 122 | asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; |
| 123 | local_irq_restore(flags); |
| 124 | } |
| 125 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 126 | /* Prepare the MMU for task: setup PID reg with allocated ASID |
| 127 | If task doesn't have an ASID (never alloc or stolen, get a new ASID) |
| 128 | */ |
| 129 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 130 | struct task_struct *tsk) |
| 131 | { |
Vineet Gupta | 5ea72a9 | 2013-10-27 14:49:02 +0530 | [diff] [blame] | 132 | const int cpu = smp_processor_id(); |
| 133 | |
| 134 | /* |
| 135 | * Note that the mm_cpumask is "aggregating" only, we don't clear it |
| 136 | * for the switched-out task, unlike some other arches. |
| 137 | * It is used to enlist cpus for sending TLB flush IPIs and not sending |
| 138 | * it to CPUs where a task once ran-on, could cause stale TLB entry |
| 139 | * re-use, specially for a multi-threaded task. |
| 140 | * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps. |
| 141 | * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1 |
| 142 | * were to re-migrate to C1, it could access the unmapped region |
| 143 | * via any existing stale TLB entries. |
| 144 | */ |
| 145 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 146 | |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 147 | #ifndef CONFIG_SMP |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 148 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ |
| 149 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 150 | #endif |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 151 | |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 152 | get_new_mmu_context(next); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 153 | } |
| 154 | |
Vineet Gupta | c601155 | 2013-07-24 17:31:08 -0700 | [diff] [blame] | 155 | /* |
| 156 | * Called at the time of execve() to get a new ASID |
| 157 | * Note the subtlety here: get_new_mmu_context() behaves differently here |
| 158 | * vs. in switch_mm(). Here it always returns a new ASID, because mm has |
| 159 | * an unallocated "initial" value, while in latter, it moves to a new ASID, |
| 160 | * only if it was unallocated |
| 161 | */ |
| 162 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) |
| 163 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 164 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping |
| 165 | * for retiring-mm. However destroy_context( ) still needs to do that because |
| 166 | * between mm_release( ) = >deactive_mm( ) and |
| 167 | * mmput => .. => __mmdrop( ) => destroy_context( ) |
| 168 | * there is a good chance that task gets sched-out/in, making it's ASID valid |
| 169 | * again (this teased me for a whole day). |
| 170 | */ |
| 171 | #define deactivate_mm(tsk, mm) do { } while (0) |
| 172 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 173 | #define enter_lazy_tlb(mm, tsk) |
| 174 | |
| 175 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ |