1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 *
5 * This contains most of the x86 vDSO kernel-side code.
6 */
7#include <linux/mm.h>
8#include <linux/err.h>
9#include <linux/sched.h>
10#include <linux/sched/task_stack.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/random.h>
14#include <linux/elf.h>
15#include <linux/cpu.h>
16#include <linux/ptrace.h>
17#include <linux/vdso_datastore.h>
18
19#include <asm/pvclock.h>
20#include <asm/vgtod.h>
21#include <asm/proto.h>
22#include <asm/vdso.h>
23#include <asm/tlb.h>
24#include <asm/page.h>
25#include <asm/desc.h>
26#include <asm/cpufeature.h>
27#include <asm/vdso/vsyscall.h>
28#include <clocksource/hyperv_timer.h>
29
30static_assert(VDSO_NR_PAGES + VDSO_NR_VCLOCK_PAGES == __VDSO_PAGES);
31
32unsigned int vclocks_used __read_mostly;
33
34#if defined(CONFIG_X86_64)
35unsigned int __read_mostly vdso64_enabled = 1;
36#endif
37
38int __init init_vdso_image(const struct vdso_image *image)
39{
40 BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
41 BUG_ON(image->size % PAGE_SIZE != 0);
42
43 apply_alternatives(start: (struct alt_instr *)(image->data + image->alt),
44 end: (struct alt_instr *)(image->data + image->alt +
45 image->alt_len));
46
47 return 0;
48}
49
50struct linux_binprm;
51
52static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
53 struct vm_area_struct *vma, struct vm_fault *vmf)
54{
55 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
56
57 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
58 return VM_FAULT_SIGBUS;
59
60 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
61 get_page(page: vmf->page);
62 return 0;
63}
64
65static void vdso_fix_landing(const struct vdso_image *image,
66 struct vm_area_struct *new_vma)
67{
68 if (in_ia32_syscall() && image == &vdso_image_32) {
69 struct pt_regs *regs = current_pt_regs();
70 unsigned long vdso_land = image->sym_int80_landing_pad;
71 unsigned long old_land_addr = vdso_land +
72 (unsigned long)current->mm->context.vdso;
73
74 /* Fixing userspace landing - look at do_fast_syscall_32 */
75 if (regs->ip == old_land_addr)
76 regs->ip = new_vma->vm_start + vdso_land;
77 }
78}
79
80static int vdso_mremap(const struct vm_special_mapping *sm,
81 struct vm_area_struct *new_vma)
82{
83 const struct vdso_image *image = current->mm->context.vdso_image;
84
85 vdso_fix_landing(image, new_vma);
86 current->mm->context.vdso = (void __user *)new_vma->vm_start;
87
88 return 0;
89}
90
91static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm,
92 struct vm_area_struct *vma, struct vm_fault *vmf)
93{
94 switch (vmf->pgoff) {
95#ifdef CONFIG_PARAVIRT_CLOCK
96 case VDSO_PAGE_PVCLOCK_OFFSET:
97 {
98 struct pvclock_vsyscall_time_info *pvti =
99 pvclock_get_pvti_cpu0_va();
100
101 if (pvti && vclock_was_used(vclock: VDSO_CLOCKMODE_PVCLOCK))
102 return vmf_insert_pfn_prot(vma, addr: vmf->address,
103 __pa(pvti) >> PAGE_SHIFT,
104 pgprot_decrypted(vma->vm_page_prot));
105 break;
106 }
107#endif /* CONFIG_PARAVIRT_CLOCK */
108#ifdef CONFIG_HYPERV_TIMER
109 case VDSO_PAGE_HVCLOCK_OFFSET:
110 {
111 unsigned long pfn = hv_get_tsc_pfn();
112 if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
113 return vmf_insert_pfn(vma, vmf->address, pfn);
114 break;
115 }
116#endif /* CONFIG_HYPERV_TIMER */
117 }
118
119 return VM_FAULT_SIGBUS;
120}
121
122static const struct vm_special_mapping vdso_mapping = {
123 .name = "[vdso]",
124 .fault = vdso_fault,
125 .mremap = vdso_mremap,
126};
127static const struct vm_special_mapping vvar_vclock_mapping = {
128 .name = "[vvar_vclock]",
129 .fault = vvar_vclock_fault,
130};
131
132/*
133 * Add vdso and vvar mappings to current process.
134 * @image - blob to map
135 * @addr - request a specific address (zero to map at free addr)
136 */
137static int map_vdso(const struct vdso_image *image, unsigned long addr)
138{
139 struct mm_struct *mm = current->mm;
140 struct vm_area_struct *vma;
141 unsigned long text_start;
142 int ret = 0;
143
144 if (mmap_write_lock_killable(mm))
145 return -EINTR;
146
147 addr = get_unmapped_area(NULL, addr,
148 len: image->size + __VDSO_PAGES * PAGE_SIZE, pgoff: 0, flags: 0);
149 if (IS_ERR_VALUE(addr)) {
150 ret = addr;
151 goto up_fail;
152 }
153
154 text_start = addr + __VDSO_PAGES * PAGE_SIZE;
155
156 /*
157 * MAYWRITE to allow gdb to COW and set breakpoints
158 */
159 vma = _install_special_mapping(mm,
160 addr: text_start,
161 len: image->size,
162 VM_READ|VM_EXEC|
163 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
164 VM_SEALED_SYSMAP,
165 spec: &vdso_mapping);
166
167 if (IS_ERR(ptr: vma)) {
168 ret = PTR_ERR(ptr: vma);
169 goto up_fail;
170 }
171
172 vma = vdso_install_vvar_mapping(mm, addr);
173 if (IS_ERR(ptr: vma)) {
174 ret = PTR_ERR(ptr: vma);
175 do_munmap(mm, text_start, image->size, NULL);
176 goto up_fail;
177 }
178
179 vma = _install_special_mapping(mm,
180 VDSO_VCLOCK_PAGES_START(addr),
181 VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
182 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
183 VM_PFNMAP|VM_SEALED_SYSMAP,
184 spec: &vvar_vclock_mapping);
185
186 if (IS_ERR(ptr: vma)) {
187 ret = PTR_ERR(ptr: vma);
188 do_munmap(mm, text_start, image->size, NULL);
189 do_munmap(mm, addr, image->size, NULL);
190 goto up_fail;
191 }
192
193 current->mm->context.vdso = (void __user *)text_start;
194 current->mm->context.vdso_image = image;
195
196up_fail:
197 mmap_write_unlock(mm);
198 return ret;
199}
200
201int map_vdso_once(const struct vdso_image *image, unsigned long addr)
202{
203 struct mm_struct *mm = current->mm;
204 struct vm_area_struct *vma;
205 VMA_ITERATOR(vmi, mm, 0);
206
207 mmap_write_lock(mm);
208 /*
209 * Check if we have already mapped vdso blob - fail to prevent
210 * abusing from userspace install_special_mapping, which may
211 * not do accounting and rlimit right.
212 * We could search vma near context.vdso, but it's a slowpath,
213 * so let's explicitly check all VMAs to be completely sure.
214 */
215 for_each_vma(vmi, vma) {
216 if (vma_is_special_mapping(vma, sm: &vdso_mapping) ||
217 vma_is_special_mapping(vma, sm: &vdso_vvar_mapping) ||
218 vma_is_special_mapping(vma, sm: &vvar_vclock_mapping)) {
219 mmap_write_unlock(mm);
220 return -EEXIST;
221 }
222 }
223 mmap_write_unlock(mm);
224
225 return map_vdso(image, addr);
226}
227
228static int load_vdso32(void)
229{
230 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
231 return 0;
232
233 return map_vdso(image: &vdso_image_32, addr: 0);
234}
235
236int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
237{
238 if (IS_ENABLED(CONFIG_X86_64)) {
239 if (!vdso64_enabled)
240 return 0;
241
242 return map_vdso(image: &vdso_image_64, addr: 0);
243 }
244
245 return load_vdso32();
246}
247
248#ifdef CONFIG_COMPAT
249int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
250 int uses_interp, bool x32)
251{
252 if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) {
253 if (!vdso64_enabled)
254 return 0;
255 return map_vdso(image: &vdso_image_x32, addr: 0);
256 }
257
258 if (IS_ENABLED(CONFIG_IA32_EMULATION))
259 return load_vdso32();
260
261 return 0;
262}
263#endif
264
265bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
266{
267 const struct vdso_image *image = current->mm->context.vdso_image;
268 unsigned long vdso = (unsigned long) current->mm->context.vdso;
269
270 if (in_ia32_syscall() && image == &vdso_image_32) {
271 if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
272 regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
273 return true;
274 }
275 return false;
276}
277
278#ifdef CONFIG_X86_64
279static __init int vdso_setup(char *s)
280{
281 vdso64_enabled = simple_strtoul(s, NULL, 0);
282 return 1;
283}
284__setup("vdso=", vdso_setup);
285#endif /* CONFIG_X86_64 */
286