Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * mm/pgtable-generic.c
4 : *
5 : * Generic pgtable methods declared in linux/pgtable.h
6 : *
7 : * Copyright (C) 2010 Linus Torvalds
8 : */
9 :
10 : #include <linux/pagemap.h>
11 : #include <linux/hugetlb.h>
12 : #include <linux/pgtable.h>
13 : #include <asm/tlb.h>
14 :
15 : /*
16 : * If a p?d_bad entry is found while walking page tables, report
17 : * the error, before resetting entry to p?d_none. Usually (but
18 : * very seldom) called out from the p?d_none_or_clear_bad macros.
19 : */
20 :
21 0 : void pgd_clear_bad(pgd_t *pgd)
22 : {
23 0 : pgd_ERROR(*pgd);
24 0 : pgd_clear(pgd);
25 : }
26 :
27 : #ifndef __PAGETABLE_P4D_FOLDED
28 : void p4d_clear_bad(p4d_t *p4d)
29 : {
30 : p4d_ERROR(*p4d);
31 : p4d_clear(p4d);
32 : }
33 : #endif
34 :
35 : #ifndef __PAGETABLE_PUD_FOLDED
36 0 : void pud_clear_bad(pud_t *pud)
37 : {
38 0 : pud_ERROR(*pud);
39 0 : pud_clear(pud);
40 0 : }
41 : #endif
42 :
43 : /*
44 : * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
45 : * above. pmd folding is special and typically pmd_* macros refer to upper
46 : * level even when folded
47 : */
48 0 : void pmd_clear_bad(pmd_t *pmd)
49 : {
50 0 : pmd_ERROR(*pmd);
51 0 : pmd_clear(pmd);
52 0 : }
53 :
54 : #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
55 : /*
56 : * Only sets the access flags (dirty, accessed), as well as write
57 : * permission. Furthermore, we know it always gets set to a "more
58 : * permissive" setting, which allows most architectures to optimize
59 : * this. We return whether the PTE actually changed, which in turn
60 : * instructs the caller to do things like update__mmu_cache. This
61 : * used to be done in the caller, but sparc needs minor faults to
62 : * force that call on sun4c so we changed this macro slightly
63 : */
64 : int ptep_set_access_flags(struct vm_area_struct *vma,
65 : unsigned long address, pte_t *ptep,
66 : pte_t entry, int dirty)
67 : {
68 : int changed = !pte_same(*ptep, entry);
69 : if (changed) {
70 : set_pte_at(vma->vm_mm, address, ptep, entry);
71 : flush_tlb_fix_spurious_fault(vma, address);
72 : }
73 : return changed;
74 : }
75 : #endif
76 :
77 : #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
78 : int ptep_clear_flush_young(struct vm_area_struct *vma,
79 : unsigned long address, pte_t *ptep)
80 : {
81 : int young;
82 : young = ptep_test_and_clear_young(vma, address, ptep);
83 : if (young)
84 : flush_tlb_page(vma, address);
85 : return young;
86 : }
87 : #endif
88 :
89 : #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
90 30402 : pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
91 : pte_t *ptep)
92 : {
93 30402 : struct mm_struct *mm = (vma)->vm_mm;
94 30402 : pte_t pte;
95 30402 : pte = ptep_get_and_clear(mm, address, ptep);
96 30407 : if (pte_accessible(mm, pte))
97 30407 : flush_tlb_page(vma, address);
98 30406 : return pte;
99 : }
100 : #endif
101 :
102 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
103 :
104 : #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
105 : int pmdp_set_access_flags(struct vm_area_struct *vma,
106 : unsigned long address, pmd_t *pmdp,
107 : pmd_t entry, int dirty)
108 : {
109 : int changed = !pmd_same(*pmdp, entry);
110 : VM_BUG_ON(address & ~HPAGE_PMD_MASK);
111 : if (changed) {
112 : set_pmd_at(vma->vm_mm, address, pmdp, entry);
113 : flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
114 : }
115 : return changed;
116 : }
117 : #endif
118 :
119 : #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
120 : int pmdp_clear_flush_young(struct vm_area_struct *vma,
121 : unsigned long address, pmd_t *pmdp)
122 : {
123 : int young;
124 : VM_BUG_ON(address & ~HPAGE_PMD_MASK);
125 : young = pmdp_test_and_clear_young(vma, address, pmdp);
126 : if (young)
127 : flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
128 : return young;
129 : }
130 : #endif
131 :
132 : #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
133 0 : pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
134 : pmd_t *pmdp)
135 : {
136 0 : pmd_t pmd;
137 0 : VM_BUG_ON(address & ~HPAGE_PMD_MASK);
138 0 : VM_BUG_ON(!pmd_present(*pmdp));
139 : /* Below assumes pmd_present() is true */
140 0 : VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
141 0 : pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
142 0 : flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
143 0 : return pmd;
144 : }
145 :
146 : #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
147 0 : pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
148 : pud_t *pudp)
149 : {
150 0 : pud_t pud;
151 :
152 0 : VM_BUG_ON(address & ~HPAGE_PUD_MASK);
153 0 : VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
154 0 : pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
155 0 : flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
156 0 : return pud;
157 : }
158 : #endif
159 : #endif
160 :
161 : #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
162 19 : void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
163 : pgtable_t pgtable)
164 : {
165 19 : assert_spin_locked(pmd_lockptr(mm, pmdp));
166 :
167 : /* FIFO */
168 19 : if (!pmd_huge_pte(mm, pmdp))
169 17 : INIT_LIST_HEAD(&pgtable->lru);
170 : else
171 2 : list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
172 19 : pmd_huge_pte(mm, pmdp) = pgtable;
173 19 : }
174 : #endif
175 :
176 : #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
177 : /* no "address" argument so destroys page coloring of some arch */
178 17 : pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
179 : {
180 17 : pgtable_t pgtable;
181 :
182 17 : assert_spin_locked(pmd_lockptr(mm, pmdp));
183 :
184 : /* FIFO */
185 17 : pgtable = pmd_huge_pte(mm, pmdp);
186 17 : pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
187 : struct page, lru);
188 17 : if (pmd_huge_pte(mm, pmdp))
189 2 : list_del(&pgtable->lru);
190 17 : return pgtable;
191 : }
192 : #endif
193 :
194 : #ifndef __HAVE_ARCH_PMDP_INVALIDATE
195 0 : pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
196 : pmd_t *pmdp)
197 : {
198 0 : pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
199 0 : flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
200 0 : return old;
201 : }
202 : #endif
203 :
204 : #ifndef pmdp_collapse_flush
205 2 : pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
206 : pmd_t *pmdp)
207 : {
208 : /*
209 : * pmd and hugepage pte format are same. So we could
210 : * use the same function.
211 : */
212 2 : pmd_t pmd;
213 :
214 2 : VM_BUG_ON(address & ~HPAGE_PMD_MASK);
215 2 : VM_BUG_ON(pmd_trans_huge(*pmdp));
216 2 : pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
217 :
218 : /* collapse entails shooting down ptes not pmd */
219 2 : flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
220 2 : return pmd;
221 : }
222 : #endif
223 : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|