1// SPDX-License-Identifier: GPL-2.0
2/*
3 * x86_64 specific EFI support functions
4 * Based on Extensible Firmware Interface Specification version 1.0
5 *
6 * Copyright (C) 2005-2008 Intel Co.
7 * Fenghua Yu <fenghua.yu@intel.com>
8 * Bibo Mao <bibo.mao@intel.com>
9 * Chandramouli Narayanan <mouli@linux.intel.com>
10 * Huang Ying <ying.huang@intel.com>
11 *
12 * Code to convert EFI to E820 map has been implemented in elilo bootloader
13 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
14 * is setup appropriately for EFI runtime code.
15 * - mouli 06/14/2007.
16 *
17 */
18
19#define pr_fmt(fmt) "efi: " fmt
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/types.h>
25#include <linux/spinlock.h>
26#include <linux/memblock.h>
27#include <linux/ioport.h>
28#include <linux/mc146818rtc.h>
29#include <linux/efi.h>
30#include <linux/export.h>
31#include <linux/uaccess.h>
32#include <linux/io.h>
33#include <linux/reboot.h>
34#include <linux/slab.h>
35#include <linux/ucs2_string.h>
36#include <linux/cc_platform.h>
37#include <linux/sched/task.h>
38
39#include <asm/setup.h>
40#include <asm/page.h>
41#include <asm/e820/api.h>
42#include <asm/tlbflush.h>
43#include <asm/proto.h>
44#include <asm/efi.h>
45#include <asm/cacheflush.h>
46#include <asm/fixmap.h>
47#include <asm/realmode.h>
48#include <asm/time.h>
49#include <asm/pgalloc.h>
50#include <asm/sev.h>
51
52/*
53 * We allocate runtime services regions top-down, starting from -4G, i.e.
54 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
55 */
56static u64 efi_va = EFI_VA_START;
57static struct mm_struct *efi_prev_mm;
58
59/*
60 * We need our own copy of the higher levels of the page tables
61 * because we want to avoid inserting EFI region mappings (EFI_VA_END
62 * to EFI_VA_START) into the standard kernel page tables. Everything
63 * else can be shared, see efi_sync_low_kernel_mappings().
64 *
65 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
66 * allocation.
67 */
68int __init efi_alloc_page_tables(void)
69{
70 pgd_t *pgd, *efi_pgd;
71 p4d_t *p4d;
72 pud_t *pud;
73 gfp_t gfp_mask;
74
75 gfp_mask = GFP_KERNEL | __GFP_ZERO;
76 efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, pgd_allocation_order());
77 if (!efi_pgd)
78 goto fail;
79
80 pgd = efi_pgd + pgd_index(EFI_VA_END);
81 p4d = p4d_alloc(mm: &init_mm, pgd, EFI_VA_END);
82 if (!p4d)
83 goto free_pgd;
84
85 pud = pud_alloc(mm: &init_mm, p4d, EFI_VA_END);
86 if (!pud)
87 goto free_p4d;
88
89 efi_mm.pgd = efi_pgd;
90 mm_init_cpumask(mm: &efi_mm);
91 init_new_context(NULL, mm: &efi_mm);
92 set_notrack_mm(&efi_mm);
93
94 return 0;
95
96free_p4d:
97 if (pgtable_l5_enabled())
98 free_page((unsigned long)pgd_page_vaddr(*pgd));
99free_pgd:
100 free_pages(addr: (unsigned long)efi_pgd, order: pgd_allocation_order());
101fail:
102 return -ENOMEM;
103}
104
105/*
106 * Add low kernel mappings for passing arguments to EFI functions.
107 */
108void efi_sync_low_kernel_mappings(void)
109{
110 unsigned num_entries;
111 pgd_t *pgd_k, *pgd_efi;
112 p4d_t *p4d_k, *p4d_efi;
113 pud_t *pud_k, *pud_efi;
114 pgd_t *efi_pgd = efi_mm.pgd;
115
116 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
117 pgd_k = pgd_offset_k(PAGE_OFFSET);
118
119 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
120 memcpy(to: pgd_efi, from: pgd_k, len: sizeof(pgd_t) * num_entries);
121
122 pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
123 pgd_k = pgd_offset_k(EFI_VA_END);
124 p4d_efi = p4d_offset(pgd: pgd_efi, address: 0);
125 p4d_k = p4d_offset(pgd: pgd_k, address: 0);
126
127 num_entries = p4d_index(EFI_VA_END);
128 memcpy(to: p4d_efi, from: p4d_k, len: sizeof(p4d_t) * num_entries);
129
130 /*
131 * We share all the PUD entries apart from those that map the
132 * EFI regions. Copy around them.
133 */
134 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
135 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
136
137 p4d_efi = p4d_offset(pgd: pgd_efi, EFI_VA_END);
138 p4d_k = p4d_offset(pgd: pgd_k, EFI_VA_END);
139 pud_efi = pud_offset(p4d: p4d_efi, address: 0);
140 pud_k = pud_offset(p4d: p4d_k, address: 0);
141
142 num_entries = pud_index(EFI_VA_END);
143 memcpy(to: pud_efi, from: pud_k, len: sizeof(pud_t) * num_entries);
144
145 pud_efi = pud_offset(p4d: p4d_efi, EFI_VA_START);
146 pud_k = pud_offset(p4d: p4d_k, EFI_VA_START);
147
148 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
149 memcpy(to: pud_efi, from: pud_k, len: sizeof(pud_t) * num_entries);
150}
151
152/*
153 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
154 */
155static inline phys_addr_t
156virt_to_phys_or_null_size(void *va, unsigned long size)
157{
158 phys_addr_t pa;
159
160 if (!va)
161 return 0;
162
163 if (virt_addr_valid(va))
164 return virt_to_phys(address: va);
165
166 pa = slow_virt_to_phys(address: va);
167
168 /* check if the object crosses a page boundary */
169 if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
170 return 0;
171
172 return pa;
173}
174
175#define virt_to_phys_or_null(addr) \
176 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
177
178int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
179{
180 extern const u8 __efi64_thunk_ret_tramp[];
181 unsigned long pfn, text, pf, rodata, tramp;
182 struct page *page;
183 unsigned npages;
184 pgd_t *pgd = efi_mm.pgd;
185
186 /*
187 * It can happen that the physical address of new_memmap lands in memory
188 * which is not mapped in the EFI page table. Therefore we need to go
189 * and ident-map those pages containing the map before calling
190 * phys_efi_set_virtual_address_map().
191 */
192 pfn = pa_memmap >> PAGE_SHIFT;
193 pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
194 if (kernel_map_pages_in_pgd(pgd, pfn, address: pa_memmap, numpages: num_pages, page_flags: pf)) {
195 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
196 return 1;
197 }
198
199 /*
200 * Certain firmware versions are way too sentimental and still believe
201 * they are exclusive and unquestionable owners of the first physical page,
202 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
203 * (but then write-access it later during SetVirtualAddressMap()).
204 *
205 * Create a 1:1 mapping for this page, to avoid triple faults during early
206 * boot with such firmware. We are free to hand this page to the BIOS,
207 * as trim_bios_range() will reserve the first page and isolate it away
208 * from memory allocators anyway.
209 */
210 if (kernel_map_pages_in_pgd(pgd, pfn: 0x0, address: 0x0, numpages: 1, page_flags: pf)) {
211 pr_err("Failed to create 1:1 mapping for the first page!\n");
212 return 1;
213 }
214
215 /*
216 * When SEV-ES is active, the GHCB as set by the kernel will be used
217 * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
218 */
219 if (sev_es_efi_map_ghcbs_cas(pgd)) {
220 pr_err("Failed to create 1:1 mapping for the GHCBs and CAs!\n");
221 return 1;
222 }
223
224 /*
225 * When making calls to the firmware everything needs to be 1:1
226 * mapped and addressable with 32-bit pointers. Map the kernel
227 * text and allocate a new stack because we can't rely on the
228 * stack pointer being < 4GB.
229 */
230 if (!efi_is_mixed())
231 return 0;
232
233 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
234 if (!page) {
235 pr_err("Unable to allocate EFI runtime stack < 4GB\n");
236 return 1;
237 }
238
239 efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
240
241 npages = (_etext - _text) >> PAGE_SHIFT;
242 text = __pa(_text);
243
244 if (kernel_unmap_pages_in_pgd(pgd, address: text, numpages: npages)) {
245 pr_err("Failed to unmap kernel text 1:1 mapping\n");
246 return 1;
247 }
248
249 npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
250 rodata = __pa(__start_rodata);
251 pfn = rodata >> PAGE_SHIFT;
252
253 pf = _PAGE_NX | _PAGE_ENC;
254 if (kernel_map_pages_in_pgd(pgd, pfn, address: rodata, numpages: npages, page_flags: pf)) {
255 pr_err("Failed to map kernel rodata 1:1\n");
256 return 1;
257 }
258
259 tramp = __pa(__efi64_thunk_ret_tramp);
260 pfn = tramp >> PAGE_SHIFT;
261
262 pf = _PAGE_ENC;
263 if (kernel_map_pages_in_pgd(pgd, pfn, address: tramp, numpages: 1, page_flags: pf)) {
264 pr_err("Failed to map mixed mode return trampoline\n");
265 return 1;
266 }
267
268 return 0;
269}
270
271static void __init __map_region(efi_memory_desc_t *md, u64 va)
272{
273 unsigned long flags = _PAGE_RW;
274 unsigned long pfn;
275 pgd_t *pgd = efi_mm.pgd;
276
277 /*
278 * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
279 * executable images in memory that consist of both R-X and
280 * RW- sections, so we cannot apply read-only or non-exec
281 * permissions just yet. However, modern EFI systems provide
282 * a memory attributes table that describes those sections
283 * with the appropriate restricted permissions, which are
284 * applied in efi_runtime_update_mappings() below. All other
285 * regions can be mapped non-executable at this point, with
286 * the exception of boot services code regions, but those will
287 * be unmapped again entirely in efi_free_boot_services().
288 */
289 if (md->type != EFI_BOOT_SERVICES_CODE &&
290 md->type != EFI_RUNTIME_SERVICES_CODE)
291 flags |= _PAGE_NX;
292
293 if (!(md->attribute & EFI_MEMORY_WB))
294 flags |= _PAGE_PCD;
295
296 if (cc_platform_has(attr: CC_ATTR_GUEST_MEM_ENCRYPT) &&
297 md->type != EFI_MEMORY_MAPPED_IO)
298 flags |= _PAGE_ENC;
299
300 pfn = md->phys_addr >> PAGE_SHIFT;
301 if (kernel_map_pages_in_pgd(pgd, pfn, address: va, numpages: md->num_pages, page_flags: flags))
302 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
303 md->phys_addr, va);
304}
305
306void __init efi_map_region(efi_memory_desc_t *md)
307{
308 unsigned long size = md->num_pages << PAGE_SHIFT;
309 u64 pa = md->phys_addr;
310
311 /*
312 * Make sure the 1:1 mappings are present as a catch-all for b0rked
313 * firmware which doesn't update all internal pointers after switching
314 * to virtual mode and would otherwise crap on us.
315 */
316 __map_region(md, va: md->phys_addr);
317
318 /*
319 * Enforce the 1:1 mapping as the default virtual address when
320 * booting in EFI mixed mode, because even though we may be
321 * running a 64-bit kernel, the firmware may only be 32-bit.
322 */
323 if (efi_is_mixed()) {
324 md->virt_addr = md->phys_addr;
325 return;
326 }
327
328 efi_va -= size;
329
330 /* Is PA 2M-aligned? */
331 if (!(pa & (PMD_SIZE - 1))) {
332 efi_va &= PMD_MASK;
333 } else {
334 u64 pa_offset = pa & (PMD_SIZE - 1);
335 u64 prev_va = efi_va;
336
337 /* get us the same offset within this 2M page */
338 efi_va = (efi_va & PMD_MASK) + pa_offset;
339
340 if (efi_va > prev_va)
341 efi_va -= PMD_SIZE;
342 }
343
344 if (efi_va < EFI_VA_END) {
345 pr_warn(FW_WARN "VA address range overflow!\n");
346 return;
347 }
348
349 /* Do the VA map */
350 __map_region(md, va: efi_va);
351 md->virt_addr = efi_va;
352}
353
354/*
355 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
356 * md->virt_addr is the original virtual address which had been mapped in kexec
357 * 1st kernel.
358 */
359void __init efi_map_region_fixed(efi_memory_desc_t *md)
360{
361 __map_region(md, va: md->phys_addr);
362 __map_region(md, va: md->virt_addr);
363}
364
365void __init parse_efi_setup(u64 phys_addr, u32 data_len)
366{
367 efi_setup = phys_addr + sizeof(struct setup_data);
368}
369
370static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
371{
372 unsigned long pfn;
373 pgd_t *pgd = efi_mm.pgd;
374 int err1, err2;
375
376 /* Update the 1:1 mapping */
377 pfn = md->phys_addr >> PAGE_SHIFT;
378 err1 = kernel_map_pages_in_pgd(pgd, pfn, address: md->phys_addr, numpages: md->num_pages, page_flags: pf);
379 if (err1) {
380 pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
381 md->phys_addr, md->virt_addr);
382 }
383
384 err2 = kernel_map_pages_in_pgd(pgd, pfn, address: md->virt_addr, numpages: md->num_pages, page_flags: pf);
385 if (err2) {
386 pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
387 md->phys_addr, md->virt_addr);
388 }
389
390 return err1 || err2;
391}
392
393bool efi_disable_ibt_for_runtime __ro_after_init = true;
394
395static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md,
396 bool has_ibt)
397{
398 unsigned long pf = 0;
399
400 efi_disable_ibt_for_runtime |= !has_ibt;
401
402 if (md->attribute & EFI_MEMORY_XP)
403 pf |= _PAGE_NX;
404
405 if (!(md->attribute & EFI_MEMORY_RO))
406 pf |= _PAGE_RW;
407
408 if (cc_platform_has(attr: CC_ATTR_GUEST_MEM_ENCRYPT))
409 pf |= _PAGE_ENC;
410
411 return efi_update_mappings(md, pf);
412}
413
414void __init efi_runtime_update_mappings(void)
415{
416 if (efi_enabled(EFI_MEM_ATTR)) {
417 efi_disable_ibt_for_runtime = false;
418 efi_memattr_apply_permissions(NULL, fn: efi_update_mem_attr);
419 }
420}
421
422void __init efi_dump_pagetable(void)
423{
424#ifdef CONFIG_EFI_PGT_DUMP
425 ptdump_walk_pgd_level(NULL, &efi_mm);
426#endif
427}
428
429/*
430 * Makes the calling thread switch to/from efi_mm context. Can be used
431 * in a kernel thread and user context. Preemption needs to remain disabled
432 * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
433 * can not change under us.
434 * It should be ensured that there are no concurrent calls to this function.
435 */
436static void efi_enter_mm(void)
437{
438 efi_prev_mm = use_temporary_mm(temp_mm: &efi_mm);
439}
440
441static void efi_leave_mm(void)
442{
443 unuse_temporary_mm(prev_mm: efi_prev_mm);
444}
445
446void arch_efi_call_virt_setup(void)
447{
448 efi_sync_low_kernel_mappings();
449 efi_fpu_begin();
450 firmware_restrict_branch_speculation_start();
451 efi_enter_mm();
452}
453
454void arch_efi_call_virt_teardown(void)
455{
456 efi_leave_mm();
457 firmware_restrict_branch_speculation_end();
458 efi_fpu_end();
459}
460
461static DEFINE_SPINLOCK(efi_runtime_lock);
462
463/*
464 * DS and ES contain user values. We need to save them.
465 * The 32-bit EFI code needs a valid DS, ES, and SS. There's no
466 * need to save the old SS: __KERNEL_DS is always acceptable.
467 */
468#define __efi_thunk(func, ...) \
469({ \
470 unsigned short __ds, __es; \
471 efi_status_t ____s; \
472 \
473 savesegment(ds, __ds); \
474 savesegment(es, __es); \
475 \
476 loadsegment(ss, __KERNEL_DS); \
477 loadsegment(ds, __KERNEL_DS); \
478 loadsegment(es, __KERNEL_DS); \
479 \
480 ____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__); \
481 \
482 loadsegment(ds, __ds); \
483 loadsegment(es, __es); \
484 \
485 ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
486 ____s; \
487})
488
489/*
490 * Switch to the EFI page tables early so that we can access the 1:1
491 * runtime services mappings which are not mapped in any other page
492 * tables.
493 *
494 * Also, disable interrupts because the IDT points to 64-bit handlers,
495 * which aren't going to function correctly when we switch to 32-bit.
496 */
497#define efi_thunk(func...) \
498({ \
499 efi_status_t __s; \
500 \
501 arch_efi_call_virt_setup(); \
502 \
503 __s = __efi_thunk(func); \
504 \
505 arch_efi_call_virt_teardown(); \
506 \
507 __s; \
508})
509
510static efi_status_t __init __no_sanitize_address
511efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
512 unsigned long descriptor_size,
513 u32 descriptor_version,
514 efi_memory_desc_t *virtual_map)
515{
516 efi_status_t status;
517 unsigned long flags;
518
519 efi_sync_low_kernel_mappings();
520 local_irq_save(flags);
521
522 efi_enter_mm();
523
524 status = __efi_thunk(set_virtual_address_map, memory_map_size,
525 descriptor_size, descriptor_version, virtual_map);
526
527 efi_leave_mm();
528 local_irq_restore(flags);
529
530 return status;
531}
532
533static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
534{
535 return EFI_UNSUPPORTED;
536}
537
538static efi_status_t efi_thunk_set_time(efi_time_t *tm)
539{
540 return EFI_UNSUPPORTED;
541}
542
543static efi_status_t
544efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
545 efi_time_t *tm)
546{
547 return EFI_UNSUPPORTED;
548}
549
550static efi_status_t
551efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
552{
553 return EFI_UNSUPPORTED;
554}
555
556static unsigned long efi_name_size(efi_char16_t *name)
557{
558 return ucs2_strsize(data: name, EFI_VAR_NAME_LEN) + 1;
559}
560
561static efi_status_t
562efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
563 u32 *attr, unsigned long *data_size, void *data)
564{
565 u8 buf[24] __aligned(8);
566 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
567 efi_status_t status;
568 u32 phys_name, phys_vendor, phys_attr;
569 u32 phys_data_size, phys_data;
570 unsigned long flags;
571
572 spin_lock_irqsave(&efi_runtime_lock, flags);
573
574 *vnd = *vendor;
575
576 phys_data_size = virt_to_phys_or_null(data_size);
577 phys_vendor = virt_to_phys_or_null(vnd);
578 phys_name = virt_to_phys_or_null_size(va: name, size: efi_name_size(name));
579 phys_attr = virt_to_phys_or_null(attr);
580 phys_data = virt_to_phys_or_null_size(va: data, size: *data_size);
581
582 if (!phys_name || (data && !phys_data))
583 status = EFI_INVALID_PARAMETER;
584 else
585 status = efi_thunk(get_variable, phys_name, phys_vendor,
586 phys_attr, phys_data_size, phys_data);
587
588 spin_unlock_irqrestore(lock: &efi_runtime_lock, flags);
589
590 return status;
591}
592
593static efi_status_t
594efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
595 u32 attr, unsigned long data_size, void *data)
596{
597 u8 buf[24] __aligned(8);
598 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
599 u32 phys_name, phys_vendor, phys_data;
600 efi_status_t status;
601 unsigned long flags;
602
603 spin_lock_irqsave(&efi_runtime_lock, flags);
604
605 *vnd = *vendor;
606
607 phys_name = virt_to_phys_or_null_size(va: name, size: efi_name_size(name));
608 phys_vendor = virt_to_phys_or_null(vnd);
609 phys_data = virt_to_phys_or_null_size(va: data, size: data_size);
610
611 if (!phys_name || (data && !phys_data))
612 status = EFI_INVALID_PARAMETER;
613 else
614 status = efi_thunk(set_variable, phys_name, phys_vendor,
615 attr, data_size, phys_data);
616
617 spin_unlock_irqrestore(lock: &efi_runtime_lock, flags);
618
619 return status;
620}
621
622static efi_status_t
623efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
624 u32 attr, unsigned long data_size,
625 void *data)
626{
627 u8 buf[24] __aligned(8);
628 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
629 u32 phys_name, phys_vendor, phys_data;
630 efi_status_t status;
631 unsigned long flags;
632
633 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
634 return EFI_NOT_READY;
635
636 *vnd = *vendor;
637
638 phys_name = virt_to_phys_or_null_size(va: name, size: efi_name_size(name));
639 phys_vendor = virt_to_phys_or_null(vnd);
640 phys_data = virt_to_phys_or_null_size(va: data, size: data_size);
641
642 if (!phys_name || (data && !phys_data))
643 status = EFI_INVALID_PARAMETER;
644 else
645 status = efi_thunk(set_variable, phys_name, phys_vendor,
646 attr, data_size, phys_data);
647
648 spin_unlock_irqrestore(lock: &efi_runtime_lock, flags);
649
650 return status;
651}
652
653static efi_status_t
654efi_thunk_get_next_variable(unsigned long *name_size,
655 efi_char16_t *name,
656 efi_guid_t *vendor)
657{
658 u8 buf[24] __aligned(8);
659 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
660 efi_status_t status;
661 u32 phys_name_size, phys_name, phys_vendor;
662 unsigned long flags;
663
664 spin_lock_irqsave(&efi_runtime_lock, flags);
665
666 *vnd = *vendor;
667
668 phys_name_size = virt_to_phys_or_null(name_size);
669 phys_vendor = virt_to_phys_or_null(vnd);
670 phys_name = virt_to_phys_or_null_size(va: name, size: *name_size);
671
672 if (!phys_name)
673 status = EFI_INVALID_PARAMETER;
674 else
675 status = efi_thunk(get_next_variable, phys_name_size,
676 phys_name, phys_vendor);
677
678 spin_unlock_irqrestore(lock: &efi_runtime_lock, flags);
679
680 *vendor = *vnd;
681 return status;
682}
683
684static efi_status_t
685efi_thunk_get_next_high_mono_count(u32 *count)
686{
687 return EFI_UNSUPPORTED;
688}
689
690static void
691efi_thunk_reset_system(int reset_type, efi_status_t status,
692 unsigned long data_size, efi_char16_t *data)
693{
694 u32 phys_data;
695 unsigned long flags;
696
697 spin_lock_irqsave(&efi_runtime_lock, flags);
698
699 phys_data = virt_to_phys_or_null_size(va: data, size: data_size);
700
701 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
702
703 spin_unlock_irqrestore(lock: &efi_runtime_lock, flags);
704}
705
706static efi_status_t
707efi_thunk_update_capsule(efi_capsule_header_t **capsules,
708 unsigned long count, unsigned long sg_list)
709{
710 /*
711 * To properly support this function we would need to repackage
712 * 'capsules' because the firmware doesn't understand 64-bit
713 * pointers.
714 */
715 return EFI_UNSUPPORTED;
716}
717
718static efi_status_t
719efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
720 u64 *remaining_space,
721 u64 *max_variable_size)
722{
723 efi_status_t status;
724 u32 phys_storage, phys_remaining, phys_max;
725 unsigned long flags;
726
727 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
728 return EFI_UNSUPPORTED;
729
730 spin_lock_irqsave(&efi_runtime_lock, flags);
731
732 phys_storage = virt_to_phys_or_null(storage_space);
733 phys_remaining = virt_to_phys_or_null(remaining_space);
734 phys_max = virt_to_phys_or_null(max_variable_size);
735
736 status = efi_thunk(query_variable_info, attr, phys_storage,
737 phys_remaining, phys_max);
738
739 spin_unlock_irqrestore(lock: &efi_runtime_lock, flags);
740
741 return status;
742}
743
744static efi_status_t
745efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
746 u64 *remaining_space,
747 u64 *max_variable_size)
748{
749 efi_status_t status;
750 u32 phys_storage, phys_remaining, phys_max;
751 unsigned long flags;
752
753 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
754 return EFI_UNSUPPORTED;
755
756 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
757 return EFI_NOT_READY;
758
759 phys_storage = virt_to_phys_or_null(storage_space);
760 phys_remaining = virt_to_phys_or_null(remaining_space);
761 phys_max = virt_to_phys_or_null(max_variable_size);
762
763 status = efi_thunk(query_variable_info, attr, phys_storage,
764 phys_remaining, phys_max);
765
766 spin_unlock_irqrestore(lock: &efi_runtime_lock, flags);
767
768 return status;
769}
770
771static efi_status_t
772efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
773 unsigned long count, u64 *max_size,
774 int *reset_type)
775{
776 /*
777 * To properly support this function we would need to repackage
778 * 'capsules' because the firmware doesn't understand 64-bit
779 * pointers.
780 */
781 return EFI_UNSUPPORTED;
782}
783
784void __init efi_thunk_runtime_setup(void)
785{
786 if (!IS_ENABLED(CONFIG_EFI_MIXED))
787 return;
788
789 efi.get_time = efi_thunk_get_time;
790 efi.set_time = efi_thunk_set_time;
791 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
792 efi.set_wakeup_time = efi_thunk_set_wakeup_time;
793 efi.get_variable = efi_thunk_get_variable;
794 efi.get_next_variable = efi_thunk_get_next_variable;
795 efi.set_variable = efi_thunk_set_variable;
796 efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
797 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
798 efi.reset_system = efi_thunk_reset_system;
799 efi.query_variable_info = efi_thunk_query_variable_info;
800 efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
801 efi.update_capsule = efi_thunk_update_capsule;
802 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
803}
804
805efi_status_t __init __no_sanitize_address
806efi_set_virtual_address_map(unsigned long memory_map_size,
807 unsigned long descriptor_size,
808 u32 descriptor_version,
809 efi_memory_desc_t *virtual_map,
810 unsigned long systab_phys)
811{
812 const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
813 efi_status_t status;
814 unsigned long flags;
815
816 if (efi_is_mixed())
817 return efi_thunk_set_virtual_address_map(memory_map_size,
818 descriptor_size,
819 descriptor_version,
820 virtual_map);
821 efi_enter_mm();
822
823 efi_fpu_begin();
824
825 /* Disable interrupts around EFI calls: */
826 local_irq_save(flags);
827 status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
828 memory_map_size, descriptor_size,
829 descriptor_version, virtual_map);
830 local_irq_restore(flags);
831
832 efi_fpu_end();
833
834 /* grab the virtually remapped EFI runtime services table pointer */
835 efi.runtime = READ_ONCE(systab->runtime);
836
837 efi_leave_mm();
838
839 return status;
840}
841