Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */ 2 : #ifndef __ASM_GENERIC_PGALLOC_H 3 : #define __ASM_GENERIC_PGALLOC_H 4 : 5 : #ifdef CONFIG_MMU 6 : 7 : #define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) 8 : #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) 9 : 10 : /** 11 : * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table 12 : * @mm: the mm_struct of the current context 13 : * 14 : * This function is intended for architectures that need 15 : * anything beyond simple page allocation. 16 : * 17 : * Return: pointer to the allocated memory or %NULL on error 18 : */ 19 46 : static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) 20 : { 21 92 : return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); 22 : } 23 : 24 : #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL 25 : /** 26 : * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table 27 : * @mm: the mm_struct of the current context 28 : * 29 : * Return: pointer to the allocated memory or %NULL on error 30 : */ 31 46 : static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 32 : { 33 46 : return __pte_alloc_one_kernel(mm); 34 : } 35 : #endif 36 : 37 : /** 38 : * pte_free_kernel - free PTE-level kernel page table page 39 : * @mm: the mm_struct of the current context 40 : * @pte: pointer to the memory containing the page table 41 : */ 42 0 : static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 43 : { 44 0 : free_page((unsigned long)pte); 45 0 : } 46 : 47 : /** 48 : * __pte_alloc_one - allocate a page for PTE-level user page table 49 : * @mm: the mm_struct of the current context 50 : * @gfp: GFP flags to use for the allocation 51 : * 52 : * Allocates a page and runs the pgtable_pte_page_ctor(). 53 : * 54 : * This function is intended for architectures that need 55 : * anything beyond simple page allocation or must have custom GFP flags. 56 : * 57 : * Return: `struct page` initialized as page table or %NULL on error 58 : */ 59 45080 : static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) 60 : { 61 45080 : struct page *pte; 62 : 63 45080 : pte = alloc_page(gfp); 64 45080 : if (!pte) 65 : return NULL; 66 45080 : if (!pgtable_pte_page_ctor(pte)) { 67 0 : __free_page(pte); 68 0 : return NULL; 69 : } 70 : 71 : return pte; 72 : } 73 : 74 : #ifndef __HAVE_ARCH_PTE_ALLOC_ONE 75 : /** 76 : * pte_alloc_one - allocate a page for PTE-level user page table 77 : * @mm: the mm_struct of the current context 78 : * 79 : * Allocates a page and runs the pgtable_pte_page_ctor(). 80 : * 81 : * Return: `struct page` initialized as page table or %NULL on error 82 : */ 83 : static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 84 : { 85 : return __pte_alloc_one(mm, GFP_PGTABLE_USER); 86 : } 87 : #endif 88 : 89 : /* 90 : * Should really implement gc for free page table pages. This could be 91 : * done with a reference count in struct page. 92 : */ 93 : 94 : /** 95 : * pte_free - free PTE-level user page table page 96 : * @mm: the mm_struct of the current context 97 : * @pte_page: the `struct page` representing the page table 98 : */ 99 1992 : static inline void pte_free(struct mm_struct *mm, struct page *pte_page) 100 : { 101 1992 : pgtable_pte_page_dtor(pte_page); 102 1992 : __free_page(pte_page); 103 0 : } 104 : 105 : 106 : #if CONFIG_PGTABLE_LEVELS > 2 107 : 108 : #ifndef __HAVE_ARCH_PMD_ALLOC_ONE 109 : /** 110 : * pmd_alloc_one - allocate a page for PMD-level page table 111 : * @mm: the mm_struct of the current context 112 : * 113 : * Allocates a page and runs the pgtable_pmd_page_ctor(). 114 : * Allocations use %GFP_PGTABLE_USER in user context and 115 : * %GFP_PGTABLE_KERNEL in kernel context. 116 : * 117 : * Return: pointer to the allocated memory or %NULL on error 118 : */ 119 8967 : static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 120 : { 121 8967 : struct page *page; 122 8967 : gfp_t gfp = GFP_PGTABLE_USER; 123 : 124 8967 : if (mm == &init_mm) 125 4 : gfp = GFP_PGTABLE_KERNEL; 126 8967 : page = alloc_pages(gfp, 0); 127 8967 : if (!page) 128 : return NULL; 129 8967 : if (!pgtable_pmd_page_ctor(page)) { 130 0 : __free_pages(page, 0); 131 0 : return NULL; 132 : } 133 8967 : return (pmd_t *)page_address(page); 134 : } 135 : #endif 136 : 137 : #ifndef __HAVE_ARCH_PMD_FREE 138 0 : static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 139 : { 140 0 : BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); 141 0 : pgtable_pmd_page_dtor(virt_to_page(pmd)); 142 0 : free_page((unsigned long)pmd); 143 0 : } 144 : #endif 145 : 146 : #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 147 : 148 : #if CONFIG_PGTABLE_LEVELS > 3 149 : 150 : #ifndef __HAVE_ARCH_PUD_ALLOC_ONE 151 : /** 152 : * pud_alloc_one - allocate a page for PUD-level page table 153 : * @mm: the mm_struct of the current context 154 : * 155 : * Allocates a page using %GFP_PGTABLE_USER for user context and 156 : * %GFP_PGTABLE_KERNEL for kernel context. 157 : * 158 : * Return: pointer to the allocated memory or %NULL on error 159 : */ 160 7132 : static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 161 : { 162 7132 : gfp_t gfp = GFP_PGTABLE_USER; 163 : 164 7132 : if (mm == &init_mm) 165 64 : gfp = GFP_PGTABLE_KERNEL; 166 7132 : return (pud_t *)get_zeroed_page(gfp); 167 : } 168 : #endif 169 : 170 0 : static inline void pud_free(struct mm_struct *mm, pud_t *pud) 171 : { 172 0 : BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); 173 0 : free_page((unsigned long)pud); 174 0 : } 175 : 176 : #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 177 : 178 : #ifndef __HAVE_ARCH_PGD_FREE 179 : static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 180 : { 181 : free_page((unsigned long)pgd); 182 : } 183 : #endif 184 : 185 : #endif /* CONFIG_MMU */ 186 : 187 : #endif /* __ASM_GENERIC_PGALLOC_H */