Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _ASM_X86_MMU_CONTEXT_H
3 : #define _ASM_X86_MMU_CONTEXT_H
4 :
5 : #include <asm/desc.h>
6 : #include <linux/atomic.h>
7 : #include <linux/mm_types.h>
8 : #include <linux/pkeys.h>
9 :
10 : #include <trace/events/tlb.h>
11 :
12 : #include <asm/tlbflush.h>
13 : #include <asm/paravirt.h>
14 : #include <asm/debugreg.h>
15 :
16 : extern atomic64_t last_mm_ctx_id;
17 :
18 : #ifndef CONFIG_PARAVIRT_XXL
19 1021 : static inline void paravirt_activate_mm(struct mm_struct *prev,
20 : struct mm_struct *next)
21 : {
22 1021 : }
23 : #endif /* !CONFIG_PARAVIRT_XXL */
24 :
25 : #ifdef CONFIG_PERF_EVENTS
26 : DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
27 : DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
28 : void cr4_update_pce(void *ignored);
29 : #endif
30 :
31 : #ifdef CONFIG_MODIFY_LDT_SYSCALL
32 : /*
33 : * ldt_structs can be allocated, used, and freed, but they are never
34 : * modified while live.
35 : */
36 : struct ldt_struct {
37 : /*
38 : * Xen requires page-aligned LDTs with special permissions. This is
39 : * needed to prevent us from installing evil descriptors such as
40 : * call gates. On native, we could merge the ldt_struct and LDT
41 : * allocations, but it's not worth trying to optimize.
42 : */
43 : struct desc_struct *entries;
44 : unsigned int nr_entries;
45 :
46 : /*
47 : * If PTI is in use, then the entries array is not mapped while we're
48 : * in user mode. The whole array will be aliased at the addressed
49 : * given by ldt_slot_va(slot). We use two slots so that we can allocate
50 : * and map, and enable a new LDT without invalidating the mapping
51 : * of an older, still-in-use LDT.
52 : *
53 : * slot will be -1 if this LDT doesn't have an alias mapping.
54 : */
55 : int slot;
56 : };
57 :
58 : /*
59 : * Used for LDT copy/destruction.
60 : */
61 : static inline void init_new_context_ldt(struct mm_struct *mm)
62 : {
63 : mm->context.ldt = NULL;
64 : init_rwsem(&mm->context.ldt_usr_sem);
65 : }
66 : int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
67 : void destroy_context_ldt(struct mm_struct *mm);
68 : void ldt_arch_exit_mmap(struct mm_struct *mm);
69 : #else /* CONFIG_MODIFY_LDT_SYSCALL */
70 2019 : static inline void init_new_context_ldt(struct mm_struct *mm) { }
71 : static inline int ldt_dup_context(struct mm_struct *oldmm,
72 : struct mm_struct *mm)
73 : {
74 : return 0;
75 : }
76 1992 : static inline void destroy_context_ldt(struct mm_struct *mm) { }
77 3728 : static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
78 : #endif
79 :
80 : #ifdef CONFIG_MODIFY_LDT_SYSCALL
81 : extern void load_mm_ldt(struct mm_struct *mm);
82 : extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
83 : #else
84 4 : static inline void load_mm_ldt(struct mm_struct *mm)
85 : {
86 4 : clear_LDT();
87 : }
88 13306 : static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
89 : {
90 13306 : DEBUG_LOCKS_WARN_ON(preemptible());
91 13306 : }
92 : #endif
93 :
94 : #define enter_lazy_tlb enter_lazy_tlb
95 : extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
96 :
97 : /*
98 : * Init a new mm. Used on mm copies, like at fork()
99 : * and on mm's that are brand-new, like at execve().
100 : */
101 : #define init_new_context init_new_context
102 2019 : static inline int init_new_context(struct task_struct *tsk,
103 : struct mm_struct *mm)
104 : {
105 2019 : mutex_init(&mm->context.lock);
106 :
107 2019 : mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
108 2019 : atomic64_set(&mm->context.tlb_gen, 0);
109 :
110 : #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
111 : if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
112 : /* pkey 0 is the default and allocated implicitly */
113 : mm->context.pkey_allocation_map = 0x1;
114 : /* -1 means unallocated or invalid */
115 : mm->context.execute_only_pkey = -1;
116 : }
117 : #endif
118 2019 : init_new_context_ldt(mm);
119 2019 : return 0;
120 : }
121 :
122 : #define destroy_context destroy_context
123 1992 : static inline void destroy_context(struct mm_struct *mm)
124 : {
125 1992 : destroy_context_ldt(mm);
126 : }
127 :
128 : extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
129 : struct task_struct *tsk);
130 :
131 : extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
132 : struct task_struct *tsk);
133 : #define switch_mm_irqs_off switch_mm_irqs_off
134 :
135 : #define activate_mm(prev, next) \
136 : do { \
137 : paravirt_activate_mm((prev), (next)); \
138 : switch_mm((prev), (next), NULL); \
139 : } while (0);
140 :
141 : #ifdef CONFIG_X86_32
142 : #define deactivate_mm(tsk, mm) \
143 : do { \
144 : lazy_load_gs(0); \
145 : } while (0)
146 : #else
147 : #define deactivate_mm(tsk, mm) \
148 : do { \
149 : load_gs_index(0); \
150 : loadsegment(fs, 0); \
151 : } while (0)
152 : #endif
153 :
154 : static inline void arch_dup_pkeys(struct mm_struct *oldmm,
155 : struct mm_struct *mm)
156 : {
157 : #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
158 : if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
159 : return;
160 :
161 : /* Duplicate the oldmm pkey state in mm: */
162 : mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
163 : mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
164 : #endif
165 : }
166 :
167 : static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
168 : {
169 858 : arch_dup_pkeys(oldmm, mm);
170 858 : paravirt_arch_dup_mmap(oldmm, mm);
171 858 : return ldt_dup_context(oldmm, mm);
172 : }
173 :
174 3728 : static inline void arch_exit_mmap(struct mm_struct *mm)
175 : {
176 3728 : paravirt_arch_exit_mmap(mm);
177 3728 : ldt_arch_exit_mmap(mm);
178 : }
179 :
180 : #ifdef CONFIG_X86_64
181 : static inline bool is_64bit_mm(struct mm_struct *mm)
182 : {
183 : return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
184 : !(mm->context.flags & MM_CONTEXT_UPROBE_IA32);
185 : }
186 : #else
187 : static inline bool is_64bit_mm(struct mm_struct *mm)
188 : {
189 : return false;
190 : }
191 : #endif
192 :
193 16778 : static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
194 : unsigned long end)
195 : {
196 16778 : }
197 :
198 : /*
199 : * We only want to enforce protection keys on the current process
200 : * because we effectively have no access to PKRU for other
201 : * processes or any way to tell *which * PKRU in a threaded
202 : * process we could use.
203 : *
204 : * So do not enforce things if the VMA is not from the current
205 : * mm, or if we are in a kernel thread.
206 : */
207 476081 : static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
208 : bool write, bool execute, bool foreign)
209 : {
210 : /* pkeys never affect instruction fetches */
211 476081 : if (execute)
212 : return true;
213 : /* allow access if the VMA is not one from this process */
214 360052 : if (foreign || vma_is_foreign(vma))
215 : return true;
216 349795 : return __pkru_allows_pkey(vma_pkey(vma), write);
217 : }
218 :
219 : unsigned long __get_current_cr3_fast(void);
220 :
221 : #include <asm-generic/mmu_context.h>
222 :
223 : #endif /* _ASM_X86_MMU_CONTEXT_H */
|