Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/compat.h>
3 : #include <linux/errno.h>
4 : #include <linux/sched.h>
5 : #include <linux/sched/mm.h>
6 : #include <linux/syscalls.h>
7 : #include <linux/mm.h>
8 : #include <linux/fs.h>
9 : #include <linux/smp.h>
10 : #include <linux/sem.h>
11 : #include <linux/msg.h>
12 : #include <linux/shm.h>
13 : #include <linux/stat.h>
14 : #include <linux/mman.h>
15 : #include <linux/file.h>
16 : #include <linux/utsname.h>
17 : #include <linux/personality.h>
18 : #include <linux/random.h>
19 : #include <linux/uaccess.h>
20 : #include <linux/elf.h>
21 :
22 : #include <asm/elf.h>
23 : #include <asm/ia32.h>
24 :
25 : /*
26 : * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27 : */
28 26878 : static unsigned long get_align_mask(void)
29 : {
30 : /* handle 32- and 64-bit case with a single conditional */
31 26878 : if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
32 26878 : return 0;
33 :
34 0 : if (!(current->flags & PF_RANDOMIZE))
35 : return 0;
36 :
37 0 : return va_align.mask;
38 : }
39 :
40 : /*
41 : * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
42 : * va_align.bits, [12:upper_bit), are set to a random value instead of
43 : * zeroing them. This random value is computed once per boot. This form
44 : * of ASLR is known as "per-boot ASLR".
45 : *
46 : * To achieve this, the random value is added to the info.align_offset
47 : * value before calling vm_unmapped_area() or ORed directly to the
48 : * address.
49 : */
50 13439 : static unsigned long get_align_bits(void)
51 : {
52 26878 : return va_align.bits & get_align_mask();
53 : }
54 :
55 2215 : unsigned long align_vdso_addr(unsigned long addr)
56 : {
57 2215 : unsigned long align_mask = get_align_mask();
58 2215 : addr = (addr + align_mask) & ~align_mask;
59 2215 : return addr | get_align_bits();
60 : }
61 :
62 0 : static int __init control_va_addr_alignment(char *str)
63 : {
64 : /* guard against enabling this on other CPU families */
65 0 : if (va_align.flags < 0)
66 : return 1;
67 :
68 0 : if (*str == 0)
69 : return 1;
70 :
71 0 : if (*str == '=')
72 0 : str++;
73 :
74 0 : if (!strcmp(str, "32"))
75 0 : va_align.flags = ALIGN_VA_32;
76 0 : else if (!strcmp(str, "64"))
77 0 : va_align.flags = ALIGN_VA_64;
78 0 : else if (!strcmp(str, "off"))
79 0 : va_align.flags = 0;
80 0 : else if (!strcmp(str, "on"))
81 0 : va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
82 : else
83 : return 0;
84 :
85 : return 1;
86 : }
87 : __setup("align_va_addr", control_va_addr_alignment);
88 :
89 63196 : SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
90 : unsigned long, prot, unsigned long, flags,
91 : unsigned long, fd, unsigned long, off)
92 : {
93 31598 : if (off & ~PAGE_MASK)
94 : return -EINVAL;
95 :
96 31598 : return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
97 : }
98 :
99 0 : static void find_start_end(unsigned long addr, unsigned long flags,
100 : unsigned long *begin, unsigned long *end)
101 : {
102 0 : if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
103 : /* This is usually used needed to map code in small
104 : model, so it needs to be in the first 31bit. Limit
105 : it to that. This means we need to move the
106 : unmapped base down for this case. This can give
107 : conflicts with the heap, but we assume that glibc
108 : malloc knows how to fall back to mmap. Give it 1GB
109 : of playground for now. -AK */
110 0 : *begin = 0x40000000;
111 0 : *end = 0x80000000;
112 0 : if (current->flags & PF_RANDOMIZE) {
113 0 : *begin = randomize_page(*begin, 0x02000000);
114 : }
115 0 : return;
116 : }
117 :
118 0 : *begin = get_mmap_base(1);
119 0 : if (in_32bit_syscall())
120 0 : *end = task_size_32bit();
121 : else
122 0 : *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
123 : }
124 :
125 : unsigned long
126 0 : arch_get_unmapped_area(struct file *filp, unsigned long addr,
127 : unsigned long len, unsigned long pgoff, unsigned long flags)
128 : {
129 0 : struct mm_struct *mm = current->mm;
130 0 : struct vm_area_struct *vma;
131 0 : struct vm_unmapped_area_info info;
132 0 : unsigned long begin, end;
133 :
134 0 : if (flags & MAP_FIXED)
135 : return addr;
136 :
137 0 : find_start_end(addr, flags, &begin, &end);
138 :
139 0 : if (len > end)
140 : return -ENOMEM;
141 :
142 0 : if (addr) {
143 0 : addr = PAGE_ALIGN(addr);
144 0 : vma = find_vma(mm, addr);
145 0 : if (end - len >= addr &&
146 0 : (!vma || addr + len <= vm_start_gap(vma)))
147 : return addr;
148 : }
149 :
150 0 : info.flags = 0;
151 0 : info.length = len;
152 0 : info.low_limit = begin;
153 0 : info.high_limit = end;
154 0 : info.align_mask = 0;
155 0 : info.align_offset = pgoff << PAGE_SHIFT;
156 0 : if (filp) {
157 0 : info.align_mask = get_align_mask();
158 0 : info.align_offset += get_align_bits();
159 : }
160 0 : return vm_unmapped_area(&info);
161 : }
162 :
163 : unsigned long
164 57237 : arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
165 : const unsigned long len, const unsigned long pgoff,
166 : const unsigned long flags)
167 : {
168 57237 : struct vm_area_struct *vma;
169 57237 : struct mm_struct *mm = current->mm;
170 57237 : unsigned long addr = addr0;
171 57237 : struct vm_unmapped_area_info info;
172 :
173 : /* requested length too big for entire address space */
174 57237 : if (len > TASK_SIZE)
175 : return -ENOMEM;
176 :
177 : /* No address checking. See comment at mmap_address_hint_valid() */
178 57237 : if (flags & MAP_FIXED)
179 : return addr;
180 :
181 : /* for MAP_32BIT mappings we force the legacy mmap base */
182 17495 : if (!in_32bit_syscall() && (flags & MAP_32BIT))
183 0 : goto bottomup;
184 :
185 : /* requesting a specific address */
186 17495 : if (addr) {
187 2216 : addr &= PAGE_MASK;
188 2216 : if (!mmap_address_hint_valid(addr, len))
189 0 : goto get_unmapped_area;
190 :
191 2216 : vma = find_vma(mm, addr);
192 2216 : if (!vma || addr + len <= vm_start_gap(vma))
193 : return addr;
194 : }
195 15279 : get_unmapped_area:
196 :
197 15279 : info.flags = VM_UNMAPPED_AREA_TOPDOWN;
198 15279 : info.length = len;
199 15279 : info.low_limit = PAGE_SIZE;
200 15279 : info.high_limit = get_mmap_base(0);
201 :
202 : /*
203 : * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
204 : * in the full address space.
205 : *
206 : * !in_32bit_syscall() check to avoid high addresses for x32
207 : * (and make it no op on native i386).
208 : */
209 15279 : if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
210 : info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
211 :
212 15279 : info.align_mask = 0;
213 15279 : info.align_offset = pgoff << PAGE_SHIFT;
214 15279 : if (filp) {
215 11224 : info.align_mask = get_align_mask();
216 11224 : info.align_offset += get_align_bits();
217 : }
218 15279 : addr = vm_unmapped_area(&info);
219 15279 : if (!(addr & ~PAGE_MASK))
220 : return addr;
221 0 : VM_BUG_ON(addr != -ENOMEM);
222 :
223 0 : bottomup:
224 : /*
225 : * A failed mmap() very likely causes application failure,
226 : * so fall back to the bottom-up function here. This scenario
227 : * can happen with large stack limits and large mmap()
228 : * allocations.
229 : */
230 0 : return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
231 : }
|