| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Hibernation support for x86-64 | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> | 
|---|
| 6 | * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> | 
|---|
| 7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | 
|---|
| 8 | */ | 
|---|
| 9 |  | 
|---|
| 10 | #include <linux/gfp.h> | 
|---|
| 11 | #include <linux/smp.h> | 
|---|
| 12 | #include <linux/suspend.h> | 
|---|
| 13 | #include <linux/scatterlist.h> | 
|---|
| 14 | #include <linux/kdebug.h> | 
|---|
| 15 | #include <linux/pgtable.h> | 
|---|
| 16 |  | 
|---|
| 17 | #include <crypto/hash.h> | 
|---|
| 18 |  | 
|---|
| 19 | #include <asm/e820/api.h> | 
|---|
| 20 | #include <asm/init.h> | 
|---|
| 21 | #include <asm/proto.h> | 
|---|
| 22 | #include <asm/page.h> | 
|---|
| 23 | #include <asm/mtrr.h> | 
|---|
| 24 | #include <asm/sections.h> | 
|---|
| 25 | #include <asm/suspend.h> | 
|---|
| 26 | #include <asm/tlbflush.h> | 
|---|
| 27 |  | 
|---|
| 28 | static int set_up_temporary_text_mapping(pgd_t *pgd) | 
|---|
| 29 | { | 
|---|
| 30 | pmd_t *pmd; | 
|---|
| 31 | pud_t *pud; | 
|---|
| 32 | p4d_t *p4d = NULL; | 
|---|
| 33 | pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE); | 
|---|
| 34 | pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC); | 
|---|
| 35 |  | 
|---|
| 36 | /* Filter out unsupported __PAGE_KERNEL* bits: */ | 
|---|
| 37 | pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask; | 
|---|
| 38 | pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask; | 
|---|
| 39 |  | 
|---|
| 40 | /* | 
|---|
| 41 | * The new mapping only has to cover the page containing the image | 
|---|
| 42 | * kernel's entry point (jump_address_phys), because the switch over to | 
|---|
| 43 | * it is carried out by relocated code running from a page allocated | 
|---|
| 44 | * specifically for this purpose and covered by the identity mapping, so | 
|---|
| 45 | * the temporary kernel text mapping is only needed for the final jump. | 
|---|
| 46 | * Moreover, in that mapping the virtual address of the image kernel's | 
|---|
| 47 | * entry point must be the same as its virtual address in the image | 
|---|
| 48 | * kernel (restore_jump_address), so the image kernel's | 
|---|
| 49 | * restore_registers() code doesn't find itself in a different area of | 
|---|
| 50 | * the virtual address space after switching over to the original page | 
|---|
| 51 | * tables used by the image kernel. | 
|---|
| 52 | */ | 
|---|
| 53 |  | 
|---|
| 54 | if (pgtable_l5_enabled()) { | 
|---|
| 55 | p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); | 
|---|
| 56 | if (!p4d) | 
|---|
| 57 | return -ENOMEM; | 
|---|
| 58 | } | 
|---|
| 59 |  | 
|---|
| 60 | pud = (pud_t *)get_safe_page(GFP_ATOMIC); | 
|---|
| 61 | if (!pud) | 
|---|
| 62 | return -ENOMEM; | 
|---|
| 63 |  | 
|---|
| 64 | pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); | 
|---|
| 65 | if (!pmd) | 
|---|
| 66 | return -ENOMEM; | 
|---|
| 67 |  | 
|---|
| 68 | set_pmd(pmd + pmd_index(restore_jump_address), | 
|---|
| 69 | __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot))); | 
|---|
| 70 | set_pud(pud + pud_index(restore_jump_address), | 
|---|
| 71 | __pud(__pa(pmd) | pgprot_val(pgtable_prot))); | 
|---|
| 72 | if (p4d) { | 
|---|
| 73 | p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot)); | 
|---|
| 74 | pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); | 
|---|
| 75 |  | 
|---|
| 76 | set_p4d(p4d + p4d_index(restore_jump_address), new_p4d); | 
|---|
| 77 | set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); | 
|---|
| 78 | } else { | 
|---|
| 79 | /* No p4d for 4-level paging: point the pgd to the pud page table */ | 
|---|
| 80 | pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); | 
|---|
| 81 | set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); | 
|---|
| 82 | } | 
|---|
| 83 |  | 
|---|
| 84 | return 0; | 
|---|
| 85 | } | 
|---|
| 86 |  | 
|---|
| 87 | static void *alloc_pgt_page(void *context) | 
|---|
| 88 | { | 
|---|
| 89 | return (void *)get_safe_page(GFP_ATOMIC); | 
|---|
| 90 | } | 
|---|
| 91 |  | 
|---|
| 92 | static int set_up_temporary_mappings(void) | 
|---|
| 93 | { | 
|---|
| 94 | struct x86_mapping_info info = { | 
|---|
| 95 | .alloc_pgt_page	= alloc_pgt_page, | 
|---|
| 96 | .page_flag	= __PAGE_KERNEL_LARGE_EXEC, | 
|---|
| 97 | .offset		= __PAGE_OFFSET, | 
|---|
| 98 | }; | 
|---|
| 99 | unsigned long mstart, mend; | 
|---|
| 100 | pgd_t *pgd; | 
|---|
| 101 | int result; | 
|---|
| 102 | int i; | 
|---|
| 103 |  | 
|---|
| 104 | pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); | 
|---|
| 105 | if (!pgd) | 
|---|
| 106 | return -ENOMEM; | 
|---|
| 107 |  | 
|---|
| 108 | /* Prepare a temporary mapping for the kernel text */ | 
|---|
| 109 | result = set_up_temporary_text_mapping(pgd); | 
|---|
| 110 | if (result) | 
|---|
| 111 | return result; | 
|---|
| 112 |  | 
|---|
| 113 | /* Set up the direct mapping from scratch */ | 
|---|
| 114 | for (i = 0; i < nr_pfn_mapped; i++) { | 
|---|
| 115 | mstart = pfn_mapped[i].start << PAGE_SHIFT; | 
|---|
| 116 | mend   = pfn_mapped[i].end << PAGE_SHIFT; | 
|---|
| 117 |  | 
|---|
| 118 | result = kernel_ident_mapping_init(info: &info, pgd_page: pgd, pstart: mstart, pend: mend); | 
|---|
| 119 | if (result) | 
|---|
| 120 | return result; | 
|---|
| 121 | } | 
|---|
| 122 |  | 
|---|
| 123 | temp_pgt = __pa(pgd); | 
|---|
| 124 | return 0; | 
|---|
| 125 | } | 
|---|
| 126 |  | 
|---|
| 127 | asmlinkage int swsusp_arch_resume(void) | 
|---|
| 128 | { | 
|---|
| 129 | int error; | 
|---|
| 130 |  | 
|---|
| 131 | /* We have got enough memory and from now on we cannot recover */ | 
|---|
| 132 | error = set_up_temporary_mappings(); | 
|---|
| 133 | if (error) | 
|---|
| 134 | return error; | 
|---|
| 135 |  | 
|---|
| 136 | error = relocate_restore_code(); | 
|---|
| 137 | if (error) | 
|---|
| 138 | return error; | 
|---|
| 139 |  | 
|---|
| 140 | restore_image(); | 
|---|
| 141 | return 0; | 
|---|
| 142 | } | 
|---|
| 143 |  | 
|---|