x86/vdso: Replace calculate_addr in map_vdso() with addr
[cascardo/linux.git] / arch / x86 / entry / vdso / vma.c
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <linux/ptrace.h>
16 #include <asm/pvclock.h>
17 #include <asm/vgtod.h>
18 #include <asm/proto.h>
19 #include <asm/vdso.h>
20 #include <asm/vvar.h>
21 #include <asm/page.h>
22 #include <asm/desc.h>
23 #include <asm/cpufeature.h>
24
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled = 1;
27 #endif
28
29 void __init init_vdso_image(const struct vdso_image *image)
30 {
31         BUG_ON(image->size % PAGE_SIZE != 0);
32
33         apply_alternatives((struct alt_instr *)(image->data + image->alt),
34                            (struct alt_instr *)(image->data + image->alt +
35                                                 image->alt_len));
36 }
37
38 struct linux_binprm;
39
40 /*
41  * Put the vdso above the (randomized) stack with another randomized
42  * offset.  This way there is no hole in the middle of address space.
43  * To save memory make sure it is still in the same PTE as the stack
44  * top.  This doesn't give that many random bits.
45  *
46  * Note that this algorithm is imperfect: the distribution of the vdso
47  * start address within a PMD is biased toward the end.
48  *
49  * Only used for the 64-bit and x32 vdsos.
50  */
51 static unsigned long vdso_addr(unsigned long start, unsigned len)
52 {
53 #ifdef CONFIG_X86_32
54         return 0;
55 #else
56         unsigned long addr, end;
57         unsigned offset;
58
59         /*
60          * Round up the start address.  It can start out unaligned as a result
61          * of stack start randomization.
62          */
63         start = PAGE_ALIGN(start);
64
65         /* Round the lowest possible end address up to a PMD boundary. */
66         end = (start + len + PMD_SIZE - 1) & PMD_MASK;
67         if (end >= TASK_SIZE_MAX)
68                 end = TASK_SIZE_MAX;
69         end -= len;
70
71         if (end > start) {
72                 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
73                 addr = start + (offset << PAGE_SHIFT);
74         } else {
75                 addr = start;
76         }
77
78         /*
79          * Forcibly align the final address in case we have a hardware
80          * issue that requires alignment for performance reasons.
81          */
82         addr = align_vdso_addr(addr);
83
84         return addr;
85 #endif
86 }
87
88 static int vdso_fault(const struct vm_special_mapping *sm,
89                       struct vm_area_struct *vma, struct vm_fault *vmf)
90 {
91         const struct vdso_image *image = vma->vm_mm->context.vdso_image;
92
93         if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
94                 return VM_FAULT_SIGBUS;
95
96         vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
97         get_page(vmf->page);
98         return 0;
99 }
100
101 static void vdso_fix_landing(const struct vdso_image *image,
102                 struct vm_area_struct *new_vma)
103 {
104 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
105         if (in_ia32_syscall() && image == &vdso_image_32) {
106                 struct pt_regs *regs = current_pt_regs();
107                 unsigned long vdso_land = image->sym_int80_landing_pad;
108                 unsigned long old_land_addr = vdso_land +
109                         (unsigned long)current->mm->context.vdso;
110
111                 /* Fixing userspace landing - look at do_fast_syscall_32 */
112                 if (regs->ip == old_land_addr)
113                         regs->ip = new_vma->vm_start + vdso_land;
114         }
115 #endif
116 }
117
118 static int vdso_mremap(const struct vm_special_mapping *sm,
119                 struct vm_area_struct *new_vma)
120 {
121         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
122         const struct vdso_image *image = current->mm->context.vdso_image;
123
124         if (image->size != new_size)
125                 return -EINVAL;
126
127         if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
128                 return -EFAULT;
129
130         vdso_fix_landing(image, new_vma);
131         current->mm->context.vdso = (void __user *)new_vma->vm_start;
132
133         return 0;
134 }
135
136 static int vvar_fault(const struct vm_special_mapping *sm,
137                       struct vm_area_struct *vma, struct vm_fault *vmf)
138 {
139         const struct vdso_image *image = vma->vm_mm->context.vdso_image;
140         long sym_offset;
141         int ret = -EFAULT;
142
143         if (!image)
144                 return VM_FAULT_SIGBUS;
145
146         sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
147                 image->sym_vvar_start;
148
149         /*
150          * Sanity check: a symbol offset of zero means that the page
151          * does not exist for this vdso image, not that the page is at
152          * offset zero relative to the text mapping.  This should be
153          * impossible here, because sym_offset should only be zero for
154          * the page past the end of the vvar mapping.
155          */
156         if (sym_offset == 0)
157                 return VM_FAULT_SIGBUS;
158
159         if (sym_offset == image->sym_vvar_page) {
160                 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
161                                     __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
162         } else if (sym_offset == image->sym_pvclock_page) {
163                 struct pvclock_vsyscall_time_info *pvti =
164                         pvclock_pvti_cpu0_va();
165                 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
166                         ret = vm_insert_pfn(
167                                 vma,
168                                 (unsigned long)vmf->virtual_address,
169                                 __pa(pvti) >> PAGE_SHIFT);
170                 }
171         }
172
173         if (ret == 0 || ret == -EBUSY)
174                 return VM_FAULT_NOPAGE;
175
176         return VM_FAULT_SIGBUS;
177 }
178
179 /*
180  * Add vdso and vvar mappings to current process.
181  * @image          - blob to map
182  * @addr           - request a specific address (zero to map at free addr)
183  */
184 static int map_vdso(const struct vdso_image *image, unsigned long addr)
185 {
186         struct mm_struct *mm = current->mm;
187         struct vm_area_struct *vma;
188         unsigned long text_start;
189         int ret = 0;
190
191         static const struct vm_special_mapping vdso_mapping = {
192                 .name = "[vdso]",
193                 .fault = vdso_fault,
194                 .mremap = vdso_mremap,
195         };
196         static const struct vm_special_mapping vvar_mapping = {
197                 .name = "[vvar]",
198                 .fault = vvar_fault,
199         };
200
201         if (down_write_killable(&mm->mmap_sem))
202                 return -EINTR;
203
204         addr = get_unmapped_area(NULL, addr,
205                                  image->size - image->sym_vvar_start, 0, 0);
206         if (IS_ERR_VALUE(addr)) {
207                 ret = addr;
208                 goto up_fail;
209         }
210
211         text_start = addr - image->sym_vvar_start;
212         current->mm->context.vdso = (void __user *)text_start;
213         current->mm->context.vdso_image = image;
214
215         /*
216          * MAYWRITE to allow gdb to COW and set breakpoints
217          */
218         vma = _install_special_mapping(mm,
219                                        text_start,
220                                        image->size,
221                                        VM_READ|VM_EXEC|
222                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
223                                        &vdso_mapping);
224
225         if (IS_ERR(vma)) {
226                 ret = PTR_ERR(vma);
227                 goto up_fail;
228         }
229
230         vma = _install_special_mapping(mm,
231                                        addr,
232                                        -image->sym_vvar_start,
233                                        VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
234                                        VM_PFNMAP,
235                                        &vvar_mapping);
236
237         if (IS_ERR(vma)) {
238                 ret = PTR_ERR(vma);
239                 do_munmap(mm, text_start, image->size);
240         }
241
242 up_fail:
243         if (ret) {
244                 current->mm->context.vdso = NULL;
245                 current->mm->context.vdso_image = NULL;
246         }
247
248         up_write(&mm->mmap_sem);
249         return ret;
250 }
251
252 static int map_vdso_randomized(const struct vdso_image *image)
253 {
254         unsigned long addr = vdso_addr(current->mm->start_stack,
255                                  image->size - image->sym_vvar_start);
256         return map_vdso(image, addr);
257 }
258
259 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
260 static int load_vdso32(void)
261 {
262         if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
263                 return 0;
264
265         return map_vdso(&vdso_image_32, 0);
266 }
267 #endif
268
269 #ifdef CONFIG_X86_64
270 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
271 {
272         if (!vdso64_enabled)
273                 return 0;
274
275         return map_vdso_randomized(&vdso_image_64);
276 }
277
278 #ifdef CONFIG_COMPAT
279 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
280                                        int uses_interp)
281 {
282 #ifdef CONFIG_X86_X32_ABI
283         if (test_thread_flag(TIF_X32)) {
284                 if (!vdso64_enabled)
285                         return 0;
286                 return map_vdso_randomized(&vdso_image_x32);
287         }
288 #endif
289 #ifdef CONFIG_IA32_EMULATION
290         return load_vdso32();
291 #else
292         return 0;
293 #endif
294 }
295 #endif
296 #else
297 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
298 {
299         return load_vdso32();
300 }
301 #endif
302
303 #ifdef CONFIG_X86_64
304 static __init int vdso_setup(char *s)
305 {
306         vdso64_enabled = simple_strtoul(s, NULL, 0);
307         return 0;
308 }
309 __setup("vdso=", vdso_setup);
310 #endif
311
312 #ifdef CONFIG_X86_64
313 static void vgetcpu_cpu_init(void *arg)
314 {
315         int cpu = smp_processor_id();
316         struct desc_struct d = { };
317         unsigned long node = 0;
318 #ifdef CONFIG_NUMA
319         node = cpu_to_node(cpu);
320 #endif
321         if (static_cpu_has(X86_FEATURE_RDTSCP))
322                 write_rdtscp_aux((node << 12) | cpu);
323
324         /*
325          * Store cpu number in limit so that it can be loaded
326          * quickly in user space in vgetcpu. (12 bits for the CPU
327          * and 8 bits for the node)
328          */
329         d.limit0 = cpu | ((node & 0xf) << 12);
330         d.limit = node >> 4;
331         d.type = 5;             /* RO data, expand down, accessed */
332         d.dpl = 3;              /* Visible to user code */
333         d.s = 1;                /* Not a system segment */
334         d.p = 1;                /* Present */
335         d.d = 1;                /* 32-bit */
336
337         write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
338 }
339
340 static int vgetcpu_online(unsigned int cpu)
341 {
342         return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
343 }
344
345 static int __init init_vdso(void)
346 {
347         init_vdso_image(&vdso_image_64);
348
349 #ifdef CONFIG_X86_X32_ABI
350         init_vdso_image(&vdso_image_x32);
351 #endif
352
353         /* notifier priority > KVM */
354         return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
355                                  "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
356 }
357 subsys_initcall(init_vdso);
358 #endif /* CONFIG_X86_64 */