| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | *  linux/arch/x86_64/mm/init.c | 
|---|
| 4 | * | 
|---|
| 5 | *  Copyright (C) 1995  Linus Torvalds | 
|---|
| 6 | *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz> | 
|---|
| 7 | *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> | 
|---|
| 8 | */ | 
|---|
| 9 |  | 
|---|
| 10 | #include <linux/signal.h> | 
|---|
| 11 | #include <linux/sched.h> | 
|---|
| 12 | #include <linux/kernel.h> | 
|---|
| 13 | #include <linux/errno.h> | 
|---|
| 14 | #include <linux/string.h> | 
|---|
| 15 | #include <linux/types.h> | 
|---|
| 16 | #include <linux/ptrace.h> | 
|---|
| 17 | #include <linux/mman.h> | 
|---|
| 18 | #include <linux/mm.h> | 
|---|
| 19 | #include <linux/swap.h> | 
|---|
| 20 | #include <linux/smp.h> | 
|---|
| 21 | #include <linux/init.h> | 
|---|
| 22 | #include <linux/initrd.h> | 
|---|
| 23 | #include <linux/pagemap.h> | 
|---|
| 24 | #include <linux/memblock.h> | 
|---|
| 25 | #include <linux/proc_fs.h> | 
|---|
| 26 | #include <linux/pci.h> | 
|---|
| 27 | #include <linux/pfn.h> | 
|---|
| 28 | #include <linux/poison.h> | 
|---|
| 29 | #include <linux/dma-mapping.h> | 
|---|
| 30 | #include <linux/memory.h> | 
|---|
| 31 | #include <linux/memory_hotplug.h> | 
|---|
| 32 | #include <linux/memremap.h> | 
|---|
| 33 | #include <linux/nmi.h> | 
|---|
| 34 | #include <linux/gfp.h> | 
|---|
| 35 | #include <linux/kcore.h> | 
|---|
| 36 | #include <linux/bootmem_info.h> | 
|---|
| 37 |  | 
|---|
| 38 | #include <asm/processor.h> | 
|---|
| 39 | #include <asm/bios_ebda.h> | 
|---|
| 40 | #include <linux/uaccess.h> | 
|---|
| 41 | #include <asm/pgalloc.h> | 
|---|
| 42 | #include <asm/dma.h> | 
|---|
| 43 | #include <asm/fixmap.h> | 
|---|
| 44 | #include <asm/e820/api.h> | 
|---|
| 45 | #include <asm/apic.h> | 
|---|
| 46 | #include <asm/tlb.h> | 
|---|
| 47 | #include <asm/mmu_context.h> | 
|---|
| 48 | #include <asm/proto.h> | 
|---|
| 49 | #include <asm/smp.h> | 
|---|
| 50 | #include <asm/sections.h> | 
|---|
| 51 | #include <asm/kdebug.h> | 
|---|
| 52 | #include <asm/numa.h> | 
|---|
| 53 | #include <asm/set_memory.h> | 
|---|
| 54 | #include <asm/init.h> | 
|---|
| 55 | #include <asm/uv/uv.h> | 
|---|
| 56 | #include <asm/setup.h> | 
|---|
| 57 | #include <asm/ftrace.h> | 
|---|
| 58 |  | 
|---|
| 59 | #include "mm_internal.h" | 
|---|
| 60 |  | 
|---|
| 61 | #include "ident_map.c" | 
|---|
| 62 |  | 
|---|
| 63 | #define DEFINE_POPULATE(fname, type1, type2, init)		\ | 
|---|
| 64 | static inline void fname##_init(struct mm_struct *mm,		\ | 
|---|
| 65 | type1##_t *arg1, type2##_t *arg2, bool init)	\ | 
|---|
| 66 | {								\ | 
|---|
| 67 | if (init)						\ | 
|---|
| 68 | fname##_safe(mm, arg1, arg2);			\ | 
|---|
| 69 | else							\ | 
|---|
| 70 | fname(mm, arg1, arg2);				\ | 
|---|
| 71 | } | 
|---|
| 72 |  | 
|---|
| 73 | DEFINE_POPULATE(p4d_populate, p4d, pud, init) | 
|---|
| 74 | DEFINE_POPULATE(pgd_populate, pgd, p4d, init) | 
|---|
| 75 | DEFINE_POPULATE(pud_populate, pud, pmd, init) | 
|---|
| 76 | DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) | 
|---|
| 77 |  | 
|---|
| 78 | #define DEFINE_ENTRY(type1, type2, init)			\ | 
|---|
| 79 | static inline void set_##type1##_init(type1##_t *arg1,		\ | 
|---|
| 80 | type2##_t arg2, bool init)		\ | 
|---|
| 81 | {								\ | 
|---|
| 82 | if (init)						\ | 
|---|
| 83 | set_##type1##_safe(arg1, arg2);			\ | 
|---|
| 84 | else							\ | 
|---|
| 85 | set_##type1(arg1, arg2);			\ | 
|---|
| 86 | } | 
|---|
| 87 |  | 
|---|
| 88 | DEFINE_ENTRY(p4d, p4d, init) | 
|---|
| 89 | DEFINE_ENTRY(pud, pud, init) | 
|---|
| 90 | DEFINE_ENTRY(pmd, pmd, init) | 
|---|
| 91 | DEFINE_ENTRY(pte, pte, init) | 
|---|
| 92 |  | 
|---|
| 93 | static inline pgprot_t prot_sethuge(pgprot_t prot) | 
|---|
| 94 | { | 
|---|
| 95 | WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PAT); | 
|---|
| 96 |  | 
|---|
| 97 | return __pgprot(pgprot_val(prot) | _PAGE_PSE); | 
|---|
| 98 | } | 
|---|
| 99 |  | 
|---|
| 100 | /* | 
|---|
| 101 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | 
|---|
| 102 | * physical space so we can cache the place of the first one and move | 
|---|
| 103 | * around without checking the pgd every time. | 
|---|
| 104 | */ | 
|---|
| 105 |  | 
|---|
| 106 | /* Bits supported by the hardware: */ | 
|---|
| 107 | pteval_t __supported_pte_mask __read_mostly = ~0; | 
|---|
| 108 | /* Bits allowed in normal kernel mappings: */ | 
|---|
| 109 | pteval_t __default_kernel_pte_mask __read_mostly = ~0; | 
|---|
| 110 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | 
|---|
| 111 | /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ | 
|---|
| 112 | EXPORT_SYMBOL(__default_kernel_pte_mask); | 
|---|
| 113 |  | 
|---|
| 114 | int force_personality32; | 
|---|
| 115 |  | 
|---|
| 116 | /* | 
|---|
| 117 | * noexec32=on|off | 
|---|
| 118 | * Control non executable heap for 32bit processes. | 
|---|
| 119 | * | 
|---|
| 120 | * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | 
|---|
| 121 | * off	PROT_READ implies PROT_EXEC | 
|---|
| 122 | */ | 
|---|
| 123 | static int __init nonx32_setup(char *str) | 
|---|
| 124 | { | 
|---|
| 125 | if (!strcmp(str, "on")) | 
|---|
| 126 | force_personality32 &= ~READ_IMPLIES_EXEC; | 
|---|
| 127 | else if (!strcmp(str, "off")) | 
|---|
| 128 | force_personality32 |= READ_IMPLIES_EXEC; | 
|---|
| 129 | return 1; | 
|---|
| 130 | } | 
|---|
| 131 | __setup( "noexec32=", nonx32_setup); | 
|---|
| 132 |  | 
|---|
| 133 | static void sync_global_pgds_l5(unsigned long start, unsigned long end) | 
|---|
| 134 | { | 
|---|
| 135 | unsigned long addr; | 
|---|
| 136 |  | 
|---|
| 137 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { | 
|---|
| 138 | const pgd_t *pgd_ref = pgd_offset_k(addr); | 
|---|
| 139 | struct page *page; | 
|---|
| 140 |  | 
|---|
| 141 | /* Check for overflow */ | 
|---|
| 142 | if (addr < start) | 
|---|
| 143 | break; | 
|---|
| 144 |  | 
|---|
| 145 | if (pgd_none(pgd: *pgd_ref)) | 
|---|
| 146 | continue; | 
|---|
| 147 |  | 
|---|
| 148 | spin_lock(lock: &pgd_lock); | 
|---|
| 149 | list_for_each_entry(page, &pgd_list, lru) { | 
|---|
| 150 | pgd_t *pgd; | 
|---|
| 151 | spinlock_t *pgt_lock; | 
|---|
| 152 |  | 
|---|
| 153 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); | 
|---|
| 154 | /* the pgt_lock only for Xen */ | 
|---|
| 155 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 
|---|
| 156 | spin_lock(lock: pgt_lock); | 
|---|
| 157 |  | 
|---|
| 158 | if (!pgd_none(pgd: *pgd_ref) && !pgd_none(pgd: *pgd)) | 
|---|
| 159 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | 
|---|
| 160 |  | 
|---|
| 161 | if (pgd_none(pgd: *pgd)) | 
|---|
| 162 | set_pgd(pgd, *pgd_ref); | 
|---|
| 163 |  | 
|---|
| 164 | spin_unlock(lock: pgt_lock); | 
|---|
| 165 | } | 
|---|
| 166 | spin_unlock(lock: &pgd_lock); | 
|---|
| 167 | } | 
|---|
| 168 | } | 
|---|
| 169 |  | 
|---|
| 170 | static void sync_global_pgds_l4(unsigned long start, unsigned long end) | 
|---|
| 171 | { | 
|---|
| 172 | unsigned long addr; | 
|---|
| 173 |  | 
|---|
| 174 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { | 
|---|
| 175 | pgd_t *pgd_ref = pgd_offset_k(addr); | 
|---|
| 176 | const p4d_t *p4d_ref; | 
|---|
| 177 | struct page *page; | 
|---|
| 178 |  | 
|---|
| 179 | /* | 
|---|
| 180 | * With folded p4d, pgd_none() is always false, we need to | 
|---|
| 181 | * handle synchronization on p4d level. | 
|---|
| 182 | */ | 
|---|
| 183 | MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); | 
|---|
| 184 | p4d_ref = p4d_offset(pgd: pgd_ref, address: addr); | 
|---|
| 185 |  | 
|---|
| 186 | if (p4d_none(p4d: *p4d_ref)) | 
|---|
| 187 | continue; | 
|---|
| 188 |  | 
|---|
| 189 | spin_lock(lock: &pgd_lock); | 
|---|
| 190 | list_for_each_entry(page, &pgd_list, lru) { | 
|---|
| 191 | pgd_t *pgd; | 
|---|
| 192 | p4d_t *p4d; | 
|---|
| 193 | spinlock_t *pgt_lock; | 
|---|
| 194 |  | 
|---|
| 195 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); | 
|---|
| 196 | p4d = p4d_offset(pgd, address: addr); | 
|---|
| 197 | /* the pgt_lock only for Xen */ | 
|---|
| 198 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 
|---|
| 199 | spin_lock(lock: pgt_lock); | 
|---|
| 200 |  | 
|---|
| 201 | if (!p4d_none(p4d: *p4d_ref) && !p4d_none(p4d: *p4d)) | 
|---|
| 202 | BUG_ON(p4d_pgtable(*p4d) | 
|---|
| 203 | != p4d_pgtable(*p4d_ref)); | 
|---|
| 204 |  | 
|---|
| 205 | if (p4d_none(p4d: *p4d)) | 
|---|
| 206 | set_p4d(p4d, *p4d_ref); | 
|---|
| 207 |  | 
|---|
| 208 | spin_unlock(lock: pgt_lock); | 
|---|
| 209 | } | 
|---|
| 210 | spin_unlock(lock: &pgd_lock); | 
|---|
| 211 | } | 
|---|
| 212 | } | 
|---|
| 213 |  | 
|---|
| 214 | /* | 
|---|
| 215 | * When memory was added make sure all the processes MM have | 
|---|
| 216 | * suitable PGD entries in the local PGD level page. | 
|---|
| 217 | */ | 
|---|
| 218 | static void sync_global_pgds(unsigned long start, unsigned long end) | 
|---|
| 219 | { | 
|---|
| 220 | if (pgtable_l5_enabled()) | 
|---|
| 221 | sync_global_pgds_l5(start, end); | 
|---|
| 222 | else | 
|---|
| 223 | sync_global_pgds_l4(start, end); | 
|---|
| 224 | } | 
|---|
| 225 |  | 
|---|
| 226 | /* | 
|---|
| 227 | * Make kernel mappings visible in all page tables in the system. | 
|---|
| 228 | * This is necessary except when the init task populates kernel mappings | 
|---|
| 229 | * during the boot process. In that case, all processes originating from | 
|---|
| 230 | * the init task copies the kernel mappings, so there is no issue. | 
|---|
| 231 | * Otherwise, missing synchronization could lead to kernel crashes due | 
|---|
| 232 | * to missing page table entries for certain kernel mappings. | 
|---|
| 233 | * | 
|---|
| 234 | * Synchronization is performed at the top level, which is the PGD in | 
|---|
| 235 | * 5-level paging systems. But in 4-level paging systems, however, | 
|---|
| 236 | * pgd_populate() is a no-op, so synchronization is done at the P4D level. | 
|---|
| 237 | * sync_global_pgds() handles this difference between paging levels. | 
|---|
| 238 | */ | 
|---|
| 239 | void arch_sync_kernel_mappings(unsigned long start, unsigned long end) | 
|---|
| 240 | { | 
|---|
| 241 | sync_global_pgds(start, end); | 
|---|
| 242 | } | 
|---|
| 243 |  | 
|---|
| 244 | /* | 
|---|
| 245 | * NOTE: This function is marked __ref because it calls __init function | 
|---|
| 246 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | 
|---|
| 247 | */ | 
|---|
| 248 | static __ref void *spp_getpage(void) | 
|---|
| 249 | { | 
|---|
| 250 | void *ptr; | 
|---|
| 251 |  | 
|---|
| 252 | if (after_bootmem) | 
|---|
| 253 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); | 
|---|
| 254 | else | 
|---|
| 255 | ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); | 
|---|
| 256 |  | 
|---|
| 257 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | 
|---|
| 258 | panic(fmt: "set_pte_phys: cannot allocate page data %s\n", | 
|---|
| 259 | after_bootmem ? "after bootmem": ""); | 
|---|
| 260 | } | 
|---|
| 261 |  | 
|---|
| 262 | pr_debug( "spp_getpage %p\n", ptr); | 
|---|
| 263 |  | 
|---|
| 264 | return ptr; | 
|---|
| 265 | } | 
|---|
| 266 |  | 
|---|
| 267 | static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) | 
|---|
| 268 | { | 
|---|
| 269 | if (pgd_none(pgd: *pgd)) { | 
|---|
| 270 | p4d_t *p4d = (p4d_t *)spp_getpage(); | 
|---|
| 271 | pgd_populate(mm: &init_mm, pgd, p4d); | 
|---|
| 272 | if (p4d != p4d_offset(pgd, address: 0)) | 
|---|
| 273 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", | 
|---|
| 274 | p4d, p4d_offset(pgd, 0)); | 
|---|
| 275 | } | 
|---|
| 276 | return p4d_offset(pgd, address: vaddr); | 
|---|
| 277 | } | 
|---|
| 278 |  | 
|---|
| 279 | static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) | 
|---|
| 280 | { | 
|---|
| 281 | if (p4d_none(p4d: *p4d)) { | 
|---|
| 282 | pud_t *pud = (pud_t *)spp_getpage(); | 
|---|
| 283 | p4d_populate(mm: &init_mm, p4d, pud); | 
|---|
| 284 | if (pud != pud_offset(p4d, address: 0)) | 
|---|
| 285 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", | 
|---|
| 286 | pud, pud_offset(p4d, 0)); | 
|---|
| 287 | } | 
|---|
| 288 | return pud_offset(p4d, address: vaddr); | 
|---|
| 289 | } | 
|---|
| 290 |  | 
|---|
| 291 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) | 
|---|
| 292 | { | 
|---|
| 293 | if (pud_none(pud: *pud)) { | 
|---|
| 294 | pmd_t *pmd = (pmd_t *) spp_getpage(); | 
|---|
| 295 | pud_populate(mm: &init_mm, pud, pmd); | 
|---|
| 296 | if (pmd != pmd_offset(pud, address: 0)) | 
|---|
| 297 | printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n", | 
|---|
| 298 | pmd, pmd_offset(pud, 0)); | 
|---|
| 299 | } | 
|---|
| 300 | return pmd_offset(pud, address: vaddr); | 
|---|
| 301 | } | 
|---|
| 302 |  | 
|---|
| 303 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) | 
|---|
| 304 | { | 
|---|
| 305 | if (pmd_none(pmd: *pmd)) { | 
|---|
| 306 | pte_t *pte = (pte_t *) spp_getpage(); | 
|---|
| 307 | pmd_populate_kernel(mm: &init_mm, pmd, pte); | 
|---|
| 308 | if (pte != pte_offset_kernel(pmd, address: 0)) | 
|---|
| 309 | printk(KERN_ERR "PAGETABLE BUG #03!\n"); | 
|---|
| 310 | } | 
|---|
| 311 | return pte_offset_kernel(pmd, address: vaddr); | 
|---|
| 312 | } | 
|---|
| 313 |  | 
|---|
| 314 | static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) | 
|---|
| 315 | { | 
|---|
| 316 | pmd_t *pmd = fill_pmd(pud, vaddr); | 
|---|
| 317 | pte_t *pte = fill_pte(pmd, vaddr); | 
|---|
| 318 |  | 
|---|
| 319 | set_pte(pte, new_pte); | 
|---|
| 320 |  | 
|---|
| 321 | /* | 
|---|
| 322 | * It's enough to flush this one mapping. | 
|---|
| 323 | * (PGE mappings get flushed as well) | 
|---|
| 324 | */ | 
|---|
| 325 | flush_tlb_one_kernel(addr: vaddr); | 
|---|
| 326 | } | 
|---|
| 327 |  | 
|---|
| 328 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) | 
|---|
| 329 | { | 
|---|
| 330 | p4d_t *p4d = p4d_page + p4d_index(address: vaddr); | 
|---|
| 331 | pud_t *pud = fill_pud(p4d, vaddr); | 
|---|
| 332 |  | 
|---|
| 333 | __set_pte_vaddr(pud, vaddr, new_pte); | 
|---|
| 334 | } | 
|---|
| 335 |  | 
|---|
| 336 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | 
|---|
| 337 | { | 
|---|
| 338 | pud_t *pud = pud_page + pud_index(address: vaddr); | 
|---|
| 339 |  | 
|---|
| 340 | __set_pte_vaddr(pud, vaddr, new_pte); | 
|---|
| 341 | } | 
|---|
| 342 |  | 
|---|
| 343 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) | 
|---|
| 344 | { | 
|---|
| 345 | pgd_t *pgd; | 
|---|
| 346 | p4d_t *p4d_page; | 
|---|
| 347 |  | 
|---|
| 348 | pr_debug( "set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | 
|---|
| 349 |  | 
|---|
| 350 | pgd = pgd_offset_k(vaddr); | 
|---|
| 351 | if (pgd_none(pgd: *pgd)) { | 
|---|
| 352 | printk(KERN_ERR | 
|---|
| 353 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | 
|---|
| 354 | return; | 
|---|
| 355 | } | 
|---|
| 356 |  | 
|---|
| 357 | p4d_page = p4d_offset(pgd, address: 0); | 
|---|
| 358 | set_pte_vaddr_p4d(p4d_page, vaddr, new_pte: pteval); | 
|---|
| 359 | } | 
|---|
| 360 |  | 
|---|
| 361 | pmd_t * __init (unsigned long vaddr) | 
|---|
| 362 | { | 
|---|
| 363 | pgd_t *pgd; | 
|---|
| 364 | p4d_t *p4d; | 
|---|
| 365 | pud_t *pud; | 
|---|
| 366 |  | 
|---|
| 367 | pgd = pgd_offset_k(vaddr); | 
|---|
| 368 | p4d = fill_p4d(pgd, vaddr); | 
|---|
| 369 | pud = fill_pud(p4d, vaddr); | 
|---|
| 370 | return fill_pmd(pud, vaddr); | 
|---|
| 371 | } | 
|---|
| 372 |  | 
|---|
| 373 | pte_t * __init (unsigned long vaddr) | 
|---|
| 374 | { | 
|---|
| 375 | pmd_t *pmd; | 
|---|
| 376 |  | 
|---|
| 377 | pmd = populate_extra_pmd(vaddr); | 
|---|
| 378 | return fill_pte(pmd, vaddr); | 
|---|
| 379 | } | 
|---|
| 380 |  | 
|---|
| 381 | /* | 
|---|
| 382 | * Create large page table mappings for a range of physical addresses. | 
|---|
| 383 | */ | 
|---|
| 384 | static void __init (unsigned long phys, unsigned long size, | 
|---|
| 385 | enum page_cache_mode cache) | 
|---|
| 386 | { | 
|---|
| 387 | pgd_t *pgd; | 
|---|
| 388 | p4d_t *p4d; | 
|---|
| 389 | pud_t *pud; | 
|---|
| 390 | pmd_t *pmd; | 
|---|
| 391 | pgprot_t prot; | 
|---|
| 392 |  | 
|---|
| 393 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | | 
|---|
| 394 | protval_4k_2_large(val: cachemode2protval(pcm: cache)); | 
|---|
| 395 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); | 
|---|
| 396 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | 
|---|
| 397 | pgd = pgd_offset_k((unsigned long)__va(phys)); | 
|---|
| 398 | if (pgd_none(pgd: *pgd)) { | 
|---|
| 399 | p4d = (p4d_t *) spp_getpage(); | 
|---|
| 400 | set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | | 
|---|
| 401 | _PAGE_USER)); | 
|---|
| 402 | } | 
|---|
| 403 | p4d = p4d_offset(pgd, address: (unsigned long)__va(phys)); | 
|---|
| 404 | if (p4d_none(p4d: *p4d)) { | 
|---|
| 405 | pud = (pud_t *) spp_getpage(); | 
|---|
| 406 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE | | 
|---|
| 407 | _PAGE_USER)); | 
|---|
| 408 | } | 
|---|
| 409 | pud = pud_offset(p4d, address: (unsigned long)__va(phys)); | 
|---|
| 410 | if (pud_none(pud: *pud)) { | 
|---|
| 411 | pmd = (pmd_t *) spp_getpage(); | 
|---|
| 412 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | 
|---|
| 413 | _PAGE_USER)); | 
|---|
| 414 | } | 
|---|
| 415 | pmd = pmd_offset(pud, address: phys); | 
|---|
| 416 | BUG_ON(!pmd_none(*pmd)); | 
|---|
| 417 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | 
|---|
| 418 | } | 
|---|
| 419 | } | 
|---|
| 420 |  | 
|---|
| 421 | void __init (unsigned long phys, unsigned long size) | 
|---|
| 422 | { | 
|---|
| 423 | __init_extra_mapping(phys, size, cache: _PAGE_CACHE_MODE_WB); | 
|---|
| 424 | } | 
|---|
| 425 |  | 
|---|
| 426 | void __init (unsigned long phys, unsigned long size) | 
|---|
| 427 | { | 
|---|
| 428 | __init_extra_mapping(phys, size, cache: _PAGE_CACHE_MODE_UC); | 
|---|
| 429 | } | 
|---|
| 430 |  | 
|---|
| 431 | /* | 
|---|
| 432 | * The head.S code sets up the kernel high mapping: | 
|---|
| 433 | * | 
|---|
| 434 | *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | 
|---|
| 435 | * | 
|---|
| 436 | * phys_base holds the negative offset to the kernel, which is added | 
|---|
| 437 | * to the compile time generated pmds. This results in invalid pmds up | 
|---|
| 438 | * to the point where we hit the physaddr 0 mapping. | 
|---|
| 439 | * | 
|---|
| 440 | * We limit the mappings to the region from _text to _brk_end.  _brk_end | 
|---|
| 441 | * is rounded up to the 2MB boundary. This catches the invalid pmds as | 
|---|
| 442 | * well, as they are located before _text: | 
|---|
| 443 | */ | 
|---|
| 444 | void __init cleanup_highmap(void) | 
|---|
| 445 | { | 
|---|
| 446 | unsigned long vaddr = __START_KERNEL_map; | 
|---|
| 447 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; | 
|---|
| 448 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; | 
|---|
| 449 | pmd_t *pmd = level2_kernel_pgt; | 
|---|
| 450 |  | 
|---|
| 451 | /* | 
|---|
| 452 | * Native path, max_pfn_mapped is not set yet. | 
|---|
| 453 | * Xen has valid max_pfn_mapped set in | 
|---|
| 454 | *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). | 
|---|
| 455 | */ | 
|---|
| 456 | if (max_pfn_mapped) | 
|---|
| 457 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); | 
|---|
| 458 |  | 
|---|
| 459 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { | 
|---|
| 460 | if (pmd_none(pmd: *pmd)) | 
|---|
| 461 | continue; | 
|---|
| 462 | if (vaddr < (unsigned long) _text || vaddr > end) | 
|---|
| 463 | set_pmd(pmd, __pmd(0)); | 
|---|
| 464 | } | 
|---|
| 465 | } | 
|---|
| 466 |  | 
|---|
| 467 | /* | 
|---|
| 468 | * Create PTE level page table mapping for physical addresses. | 
|---|
| 469 | * It returns the last physical address mapped. | 
|---|
| 470 | */ | 
|---|
| 471 | static unsigned long __meminit | 
|---|
| 472 | phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, | 
|---|
| 473 | pgprot_t prot, bool init) | 
|---|
| 474 | { | 
|---|
| 475 | unsigned long pages = 0, paddr_next; | 
|---|
| 476 | unsigned long paddr_last = paddr_end; | 
|---|
| 477 | pte_t *pte; | 
|---|
| 478 | int i; | 
|---|
| 479 |  | 
|---|
| 480 | pte = pte_page + pte_index(address: paddr); | 
|---|
| 481 | i = pte_index(address: paddr); | 
|---|
| 482 |  | 
|---|
| 483 | for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { | 
|---|
| 484 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; | 
|---|
| 485 | if (paddr >= paddr_end) { | 
|---|
| 486 | if (!after_bootmem && | 
|---|
| 487 | !e820__mapped_any(start: paddr & PAGE_MASK, end: paddr_next, | 
|---|
| 488 | type: E820_TYPE_RAM) && | 
|---|
| 489 | !e820__mapped_any(start: paddr & PAGE_MASK, end: paddr_next, | 
|---|
| 490 | type: E820_TYPE_ACPI)) | 
|---|
| 491 | set_pte_init(arg1: pte, __pte(0), init); | 
|---|
| 492 | continue; | 
|---|
| 493 | } | 
|---|
| 494 |  | 
|---|
| 495 | /* | 
|---|
| 496 | * We will re-use the existing mapping. | 
|---|
| 497 | * Xen for example has some special requirements, like mapping | 
|---|
| 498 | * pagetable pages as RO. So assume someone who pre-setup | 
|---|
| 499 | * these mappings are more intelligent. | 
|---|
| 500 | */ | 
|---|
| 501 | if (!pte_none(pte: *pte)) { | 
|---|
| 502 | if (!after_bootmem) | 
|---|
| 503 | pages++; | 
|---|
| 504 | continue; | 
|---|
| 505 | } | 
|---|
| 506 |  | 
|---|
| 507 | if (0) | 
|---|
| 508 | pr_info( "   pte=%p addr=%lx pte=%016lx\n", pte, paddr, | 
|---|
| 509 | pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); | 
|---|
| 510 | pages++; | 
|---|
| 511 | set_pte_init(arg1: pte, arg2: pfn_pte(page_nr: paddr >> PAGE_SHIFT, pgprot: prot), init); | 
|---|
| 512 | paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; | 
|---|
| 513 | } | 
|---|
| 514 |  | 
|---|
| 515 | update_page_count(level: PG_LEVEL_4K, pages); | 
|---|
| 516 |  | 
|---|
| 517 | return paddr_last; | 
|---|
| 518 | } | 
|---|
| 519 |  | 
|---|
| 520 | /* | 
|---|
| 521 | * Create PMD level page table mapping for physical addresses. The virtual | 
|---|
| 522 | * and physical address have to be aligned at this level. | 
|---|
| 523 | * It returns the last physical address mapped. | 
|---|
| 524 | */ | 
|---|
| 525 | static unsigned long __meminit | 
|---|
| 526 | phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, | 
|---|
| 527 | unsigned long page_size_mask, pgprot_t prot, bool init) | 
|---|
| 528 | { | 
|---|
| 529 | unsigned long pages = 0, paddr_next; | 
|---|
| 530 | unsigned long paddr_last = paddr_end; | 
|---|
| 531 |  | 
|---|
| 532 | int i = pmd_index(address: paddr); | 
|---|
| 533 |  | 
|---|
| 534 | for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { | 
|---|
| 535 | pmd_t *pmd = pmd_page + pmd_index(address: paddr); | 
|---|
| 536 | pte_t *pte; | 
|---|
| 537 | pgprot_t new_prot = prot; | 
|---|
| 538 |  | 
|---|
| 539 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; | 
|---|
| 540 | if (paddr >= paddr_end) { | 
|---|
| 541 | if (!after_bootmem && | 
|---|
| 542 | !e820__mapped_any(start: paddr & PMD_MASK, end: paddr_next, | 
|---|
| 543 | type: E820_TYPE_RAM) && | 
|---|
| 544 | !e820__mapped_any(start: paddr & PMD_MASK, end: paddr_next, | 
|---|
| 545 | type: E820_TYPE_ACPI)) | 
|---|
| 546 | set_pmd_init(arg1: pmd, __pmd(0), init); | 
|---|
| 547 | continue; | 
|---|
| 548 | } | 
|---|
| 549 |  | 
|---|
| 550 | if (!pmd_none(pmd: *pmd)) { | 
|---|
| 551 | if (!pmd_leaf(pte: *pmd)) { | 
|---|
| 552 | spin_lock(lock: &init_mm.page_table_lock); | 
|---|
| 553 | pte = (pte_t *)pmd_page_vaddr(pmd: *pmd); | 
|---|
| 554 | paddr_last = phys_pte_init(pte_page: pte, paddr, | 
|---|
| 555 | paddr_end, prot, | 
|---|
| 556 | init); | 
|---|
| 557 | spin_unlock(lock: &init_mm.page_table_lock); | 
|---|
| 558 | continue; | 
|---|
| 559 | } | 
|---|
| 560 | /* | 
|---|
| 561 | * If we are ok with PG_LEVEL_2M mapping, then we will | 
|---|
| 562 | * use the existing mapping, | 
|---|
| 563 | * | 
|---|
| 564 | * Otherwise, we will split the large page mapping but | 
|---|
| 565 | * use the same existing protection bits except for | 
|---|
| 566 | * large page, so that we don't violate Intel's TLB | 
|---|
| 567 | * Application note (317080) which says, while changing | 
|---|
| 568 | * the page sizes, new and old translations should | 
|---|
| 569 | * not differ with respect to page frame and | 
|---|
| 570 | * attributes. | 
|---|
| 571 | */ | 
|---|
| 572 | if (page_size_mask & (1 << PG_LEVEL_2M)) { | 
|---|
| 573 | if (!after_bootmem) | 
|---|
| 574 | pages++; | 
|---|
| 575 | paddr_last = paddr_next; | 
|---|
| 576 | continue; | 
|---|
| 577 | } | 
|---|
| 578 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); | 
|---|
| 579 | } | 
|---|
| 580 |  | 
|---|
| 581 | if (page_size_mask & (1<<PG_LEVEL_2M)) { | 
|---|
| 582 | pages++; | 
|---|
| 583 | spin_lock(lock: &init_mm.page_table_lock); | 
|---|
| 584 | set_pmd_init(arg1: pmd, | 
|---|
| 585 | arg2: pfn_pmd(page_nr: paddr >> PAGE_SHIFT, pgprot: prot_sethuge(prot)), | 
|---|
| 586 | init); | 
|---|
| 587 | spin_unlock(lock: &init_mm.page_table_lock); | 
|---|
| 588 | paddr_last = paddr_next; | 
|---|
| 589 | continue; | 
|---|
| 590 | } | 
|---|
| 591 |  | 
|---|
| 592 | pte = alloc_low_page(); | 
|---|
| 593 | paddr_last = phys_pte_init(pte_page: pte, paddr, paddr_end, prot: new_prot, init); | 
|---|
| 594 |  | 
|---|
| 595 | spin_lock(lock: &init_mm.page_table_lock); | 
|---|
| 596 | pmd_populate_kernel_init(mm: &init_mm, arg1: pmd, arg2: pte, init); | 
|---|
| 597 | spin_unlock(lock: &init_mm.page_table_lock); | 
|---|
| 598 | } | 
|---|
| 599 | update_page_count(level: PG_LEVEL_2M, pages); | 
|---|
| 600 | return paddr_last; | 
|---|
| 601 | } | 
|---|
| 602 |  | 
|---|
| 603 | /* | 
|---|
| 604 | * Create PUD level page table mapping for physical addresses. The virtual | 
|---|
| 605 | * and physical address do not have to be aligned at this level. KASLR can | 
|---|
| 606 | * randomize virtual addresses up to this level. | 
|---|
| 607 | * It returns the last physical address mapped. | 
|---|
| 608 | */ | 
|---|
| 609 | static unsigned long __meminit | 
|---|
| 610 | phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, | 
|---|
| 611 | unsigned long page_size_mask, pgprot_t _prot, bool init) | 
|---|
| 612 | { | 
|---|
| 613 | unsigned long pages = 0, paddr_next; | 
|---|
| 614 | unsigned long paddr_last = paddr_end; | 
|---|
| 615 | unsigned long vaddr = (unsigned long)__va(paddr); | 
|---|
| 616 | int i = pud_index(address: vaddr); | 
|---|
| 617 |  | 
|---|
| 618 | for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { | 
|---|
| 619 | pud_t *pud; | 
|---|
| 620 | pmd_t *pmd; | 
|---|
| 621 | pgprot_t prot = _prot; | 
|---|
| 622 |  | 
|---|
| 623 | vaddr = (unsigned long)__va(paddr); | 
|---|
| 624 | pud = pud_page + pud_index(address: vaddr); | 
|---|
| 625 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; | 
|---|
| 626 |  | 
|---|
| 627 | if (paddr >= paddr_end) { | 
|---|
| 628 | if (!after_bootmem && | 
|---|
| 629 | !e820__mapped_any(start: paddr & PUD_MASK, end: paddr_next, | 
|---|
| 630 | type: E820_TYPE_RAM) && | 
|---|
| 631 | !e820__mapped_any(start: paddr & PUD_MASK, end: paddr_next, | 
|---|
| 632 | type: E820_TYPE_ACPI)) | 
|---|
| 633 | set_pud_init(arg1: pud, __pud(0), init); | 
|---|
| 634 | continue; | 
|---|
| 635 | } | 
|---|
| 636 |  | 
|---|
| 637 | if (!pud_none(pud: *pud)) { | 
|---|
| 638 | if (!pud_leaf(pud: *pud)) { | 
|---|
| 639 | pmd = pmd_offset(pud, address: 0); | 
|---|
| 640 | paddr_last = phys_pmd_init(pmd_page: pmd, paddr, | 
|---|
| 641 | paddr_end, | 
|---|
| 642 | page_size_mask, | 
|---|
| 643 | prot, init); | 
|---|
| 644 | continue; | 
|---|
| 645 | } | 
|---|
| 646 | /* | 
|---|
| 647 | * If we are ok with PG_LEVEL_1G mapping, then we will | 
|---|
| 648 | * use the existing mapping. | 
|---|
| 649 | * | 
|---|
| 650 | * Otherwise, we will split the gbpage mapping but use | 
|---|
| 651 | * the same existing protection  bits except for large | 
|---|
| 652 | * page, so that we don't violate Intel's TLB | 
|---|
| 653 | * Application note (317080) which says, while changing | 
|---|
| 654 | * the page sizes, new and old translations should | 
|---|
| 655 | * not differ with respect to page frame and | 
|---|
| 656 | * attributes. | 
|---|
| 657 | */ | 
|---|
| 658 | if (page_size_mask & (1 << PG_LEVEL_1G)) { | 
|---|
| 659 | if (!after_bootmem) | 
|---|
| 660 | pages++; | 
|---|
| 661 | paddr_last = paddr_next; | 
|---|
| 662 | continue; | 
|---|
| 663 | } | 
|---|
| 664 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); | 
|---|
| 665 | } | 
|---|
| 666 |  | 
|---|
| 667 | if (page_size_mask & (1<<PG_LEVEL_1G)) { | 
|---|
| 668 | pages++; | 
|---|
| 669 | spin_lock(lock: &init_mm.page_table_lock); | 
|---|
| 670 | set_pud_init(arg1: pud, | 
|---|
| 671 | arg2: pfn_pud(page_nr: paddr >> PAGE_SHIFT, pgprot: prot_sethuge(prot)), | 
|---|
| 672 | init); | 
|---|
| 673 | spin_unlock(lock: &init_mm.page_table_lock); | 
|---|
| 674 | paddr_last = paddr_next; | 
|---|
| 675 | continue; | 
|---|
| 676 | } | 
|---|
| 677 |  | 
|---|
| 678 | pmd = alloc_low_page(); | 
|---|
| 679 | paddr_last = phys_pmd_init(pmd_page: pmd, paddr, paddr_end, | 
|---|
| 680 | page_size_mask, prot, init); | 
|---|
| 681 |  | 
|---|
| 682 | spin_lock(lock: &init_mm.page_table_lock); | 
|---|
| 683 | pud_populate_init(mm: &init_mm, arg1: pud, arg2: pmd, init); | 
|---|
| 684 | spin_unlock(lock: &init_mm.page_table_lock); | 
|---|
| 685 | } | 
|---|
| 686 |  | 
|---|
| 687 | update_page_count(level: PG_LEVEL_1G, pages); | 
|---|
| 688 |  | 
|---|
| 689 | return paddr_last; | 
|---|
| 690 | } | 
|---|
| 691 |  | 
|---|
| 692 | static unsigned long __meminit | 
|---|
| 693 | phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, | 
|---|
| 694 | unsigned long page_size_mask, pgprot_t prot, bool init) | 
|---|
| 695 | { | 
|---|
| 696 | unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; | 
|---|
| 697 |  | 
|---|
| 698 | paddr_last = paddr_end; | 
|---|
| 699 | vaddr = (unsigned long)__va(paddr); | 
|---|
| 700 | vaddr_end = (unsigned long)__va(paddr_end); | 
|---|
| 701 |  | 
|---|
| 702 | if (!pgtable_l5_enabled()) | 
|---|
| 703 | return phys_pud_init(pud_page: (pud_t *) p4d_page, paddr, paddr_end, | 
|---|
| 704 | page_size_mask, prot: prot, init); | 
|---|
| 705 |  | 
|---|
| 706 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { | 
|---|
| 707 | p4d_t *p4d = p4d_page + p4d_index(address: vaddr); | 
|---|
| 708 | pud_t *pud; | 
|---|
| 709 |  | 
|---|
| 710 | vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; | 
|---|
| 711 | paddr = __pa(vaddr); | 
|---|
| 712 |  | 
|---|
| 713 | if (paddr >= paddr_end) { | 
|---|
| 714 | paddr_next = __pa(vaddr_next); | 
|---|
| 715 | if (!after_bootmem && | 
|---|
| 716 | !e820__mapped_any(start: paddr & P4D_MASK, end: paddr_next, | 
|---|
| 717 | type: E820_TYPE_RAM) && | 
|---|
| 718 | !e820__mapped_any(start: paddr & P4D_MASK, end: paddr_next, | 
|---|
| 719 | type: E820_TYPE_ACPI)) | 
|---|
| 720 | set_p4d_init(arg1: p4d, __p4d(0), init); | 
|---|
| 721 | continue; | 
|---|
| 722 | } | 
|---|
| 723 |  | 
|---|
| 724 | if (!p4d_none(p4d: *p4d)) { | 
|---|
| 725 | pud = pud_offset(p4d, address: 0); | 
|---|
| 726 | paddr_last = phys_pud_init(pud_page: pud, paddr, __pa(vaddr_end), | 
|---|
| 727 | page_size_mask, prot: prot, init); | 
|---|
| 728 | continue; | 
|---|
| 729 | } | 
|---|
| 730 |  | 
|---|
| 731 | pud = alloc_low_page(); | 
|---|
| 732 | paddr_last = phys_pud_init(pud_page: pud, paddr, __pa(vaddr_end), | 
|---|
| 733 | page_size_mask, prot: prot, init); | 
|---|
| 734 |  | 
|---|
| 735 | spin_lock(lock: &init_mm.page_table_lock); | 
|---|
| 736 | p4d_populate_init(mm: &init_mm, arg1: p4d, arg2: pud, init); | 
|---|
| 737 | spin_unlock(lock: &init_mm.page_table_lock); | 
|---|
| 738 | } | 
|---|
| 739 |  | 
|---|
| 740 | return paddr_last; | 
|---|
| 741 | } | 
|---|
| 742 |  | 
|---|
| 743 | static unsigned long __meminit | 
|---|
| 744 | __kernel_physical_mapping_init(unsigned long paddr_start, | 
|---|
| 745 | unsigned long paddr_end, | 
|---|
| 746 | unsigned long page_size_mask, | 
|---|
| 747 | pgprot_t prot, bool init) | 
|---|
| 748 | { | 
|---|
| 749 | bool pgd_changed = false; | 
|---|
| 750 | unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; | 
|---|
| 751 |  | 
|---|
| 752 | paddr_last = paddr_end; | 
|---|
| 753 | vaddr = (unsigned long)__va(paddr_start); | 
|---|
| 754 | vaddr_end = (unsigned long)__va(paddr_end); | 
|---|
| 755 | vaddr_start = vaddr; | 
|---|
| 756 |  | 
|---|
| 757 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { | 
|---|
| 758 | pgd_t *pgd = pgd_offset_k(vaddr); | 
|---|
| 759 | p4d_t *p4d; | 
|---|
| 760 |  | 
|---|
| 761 | vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; | 
|---|
| 762 |  | 
|---|
| 763 | if (pgd_val(*pgd)) { | 
|---|
| 764 | p4d = (p4d_t *)pgd_page_vaddr(pgd: *pgd); | 
|---|
| 765 | paddr_last = phys_p4d_init(p4d_page: p4d, __pa(vaddr), | 
|---|
| 766 | __pa(vaddr_end), | 
|---|
| 767 | page_size_mask, | 
|---|
| 768 | prot, init); | 
|---|
| 769 | continue; | 
|---|
| 770 | } | 
|---|
| 771 |  | 
|---|
| 772 | p4d = alloc_low_page(); | 
|---|
| 773 | paddr_last = phys_p4d_init(p4d_page: p4d, __pa(vaddr), __pa(vaddr_end), | 
|---|
| 774 | page_size_mask, prot, init); | 
|---|
| 775 |  | 
|---|
| 776 | spin_lock(lock: &init_mm.page_table_lock); | 
|---|
| 777 | if (pgtable_l5_enabled()) | 
|---|
| 778 | pgd_populate_init(mm: &init_mm, arg1: pgd, arg2: p4d, init); | 
|---|
| 779 | else | 
|---|
| 780 | p4d_populate_init(mm: &init_mm, arg1: p4d_offset(pgd, address: vaddr), | 
|---|
| 781 | arg2: (pud_t *) p4d, init); | 
|---|
| 782 |  | 
|---|
| 783 | spin_unlock(lock: &init_mm.page_table_lock); | 
|---|
| 784 | pgd_changed = true; | 
|---|
| 785 | } | 
|---|
| 786 |  | 
|---|
| 787 | if (pgd_changed) | 
|---|
| 788 | sync_global_pgds(start: vaddr_start, end: vaddr_end - 1); | 
|---|
| 789 |  | 
|---|
| 790 | return paddr_last; | 
|---|
| 791 | } | 
|---|
| 792 |  | 
|---|
| 793 |  | 
|---|
| 794 | /* | 
|---|
| 795 | * Create page table mapping for the physical memory for specific physical | 
|---|
| 796 | * addresses. Note that it can only be used to populate non-present entries. | 
|---|
| 797 | * The virtual and physical addresses have to be aligned on PMD level | 
|---|
| 798 | * down. It returns the last physical address mapped. | 
|---|
| 799 | */ | 
|---|
| 800 | unsigned long __meminit | 
|---|
| 801 | kernel_physical_mapping_init(unsigned long paddr_start, | 
|---|
| 802 | unsigned long paddr_end, | 
|---|
| 803 | unsigned long page_size_mask, pgprot_t prot) | 
|---|
| 804 | { | 
|---|
| 805 | return __kernel_physical_mapping_init(paddr_start, paddr_end, | 
|---|
| 806 | page_size_mask, prot, init: true); | 
|---|
| 807 | } | 
|---|
| 808 |  | 
|---|
| 809 | /* | 
|---|
| 810 | * This function is similar to kernel_physical_mapping_init() above with the | 
|---|
| 811 | * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe() | 
|---|
| 812 | * when updating the mapping. The caller is responsible to flush the TLBs after | 
|---|
| 813 | * the function returns. | 
|---|
| 814 | */ | 
|---|
| 815 | unsigned long __meminit | 
|---|
| 816 | kernel_physical_mapping_change(unsigned long paddr_start, | 
|---|
| 817 | unsigned long paddr_end, | 
|---|
| 818 | unsigned long page_size_mask) | 
|---|
| 819 | { | 
|---|
| 820 | return __kernel_physical_mapping_init(paddr_start, paddr_end, | 
|---|
| 821 | page_size_mask, PAGE_KERNEL, | 
|---|
| 822 | init: false); | 
|---|
| 823 | } | 
|---|
| 824 |  | 
|---|
| 825 | #ifndef CONFIG_NUMA | 
|---|
| 826 | static __always_inline void x86_numa_init(void) | 
|---|
| 827 | { | 
|---|
| 828 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); | 
|---|
| 829 | } | 
|---|
| 830 | #endif | 
|---|
| 831 |  | 
|---|
| 832 | void __init initmem_init(void) | 
|---|
| 833 | { | 
|---|
| 834 | x86_numa_init(); | 
|---|
| 835 | } | 
|---|
| 836 |  | 
|---|
| 837 | void __init paging_init(void) | 
|---|
| 838 | { | 
|---|
| 839 | sparse_init(); | 
|---|
| 840 |  | 
|---|
| 841 | /* | 
|---|
| 842 | * clear the default setting with node 0 | 
|---|
| 843 | * note: don't use nodes_clear here, that is really clearing when | 
|---|
| 844 | *	 numa support is not compiled in, and later node_set_state | 
|---|
| 845 | *	 will not set it back. | 
|---|
| 846 | */ | 
|---|
| 847 | node_clear_state(node: 0, state: N_MEMORY); | 
|---|
| 848 | node_clear_state(node: 0, state: N_NORMAL_MEMORY); | 
|---|
| 849 |  | 
|---|
| 850 | zone_sizes_init(); | 
|---|
| 851 | } | 
|---|
| 852 |  | 
|---|
| 853 | #define PAGE_UNUSED 0xFD | 
|---|
| 854 |  | 
|---|
| 855 | /* | 
|---|
| 856 | * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges | 
|---|
| 857 | * from unused_pmd_start to next PMD_SIZE boundary. | 
|---|
| 858 | */ | 
|---|
| 859 | static unsigned long unused_pmd_start __meminitdata; | 
|---|
| 860 |  | 
|---|
| 861 | static void __meminit vmemmap_flush_unused_pmd(void) | 
|---|
| 862 | { | 
|---|
| 863 | if (!unused_pmd_start) | 
|---|
| 864 | return; | 
|---|
| 865 | /* | 
|---|
| 866 | * Clears (unused_pmd_start, PMD_END] | 
|---|
| 867 | */ | 
|---|
| 868 | memset(s: (void *)unused_pmd_start, PAGE_UNUSED, | 
|---|
| 869 | ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); | 
|---|
| 870 | unused_pmd_start = 0; | 
|---|
| 871 | } | 
|---|
| 872 |  | 
|---|
| 873 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|---|
| 874 | /* Returns true if the PMD is completely unused and thus it can be freed */ | 
|---|
| 875 | static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) | 
|---|
| 876 | { | 
|---|
| 877 | unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); | 
|---|
| 878 |  | 
|---|
| 879 | /* | 
|---|
| 880 | * Flush the unused range cache to ensure that memchr_inv() will work | 
|---|
| 881 | * for the whole range. | 
|---|
| 882 | */ | 
|---|
| 883 | vmemmap_flush_unused_pmd(); | 
|---|
| 884 | memset((void *)addr, PAGE_UNUSED, end - addr); | 
|---|
| 885 |  | 
|---|
| 886 | return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); | 
|---|
| 887 | } | 
|---|
| 888 | #endif | 
|---|
| 889 |  | 
|---|
| 890 | static void __meminit __vmemmap_use_sub_pmd(unsigned long start) | 
|---|
| 891 | { | 
|---|
| 892 | /* | 
|---|
| 893 | * As we expect to add in the same granularity as we remove, it's | 
|---|
| 894 | * sufficient to mark only some piece used to block the memmap page from | 
|---|
| 895 | * getting removed when removing some other adjacent memmap (just in | 
|---|
| 896 | * case the first memmap never gets initialized e.g., because the memory | 
|---|
| 897 | * block never gets onlined). | 
|---|
| 898 | */ | 
|---|
| 899 | memset(s: (void *)start, c: 0, n: sizeof(struct page)); | 
|---|
| 900 | } | 
|---|
| 901 |  | 
|---|
| 902 | static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end) | 
|---|
| 903 | { | 
|---|
| 904 | /* | 
|---|
| 905 | * We only optimize if the new used range directly follows the | 
|---|
| 906 | * previously unused range (esp., when populating consecutive sections). | 
|---|
| 907 | */ | 
|---|
| 908 | if (unused_pmd_start == start) { | 
|---|
| 909 | if (likely(IS_ALIGNED(end, PMD_SIZE))) | 
|---|
| 910 | unused_pmd_start = 0; | 
|---|
| 911 | else | 
|---|
| 912 | unused_pmd_start = end; | 
|---|
| 913 | return; | 
|---|
| 914 | } | 
|---|
| 915 |  | 
|---|
| 916 | /* | 
|---|
| 917 | * If the range does not contiguously follows previous one, make sure | 
|---|
| 918 | * to mark the unused range of the previous one so it can be removed. | 
|---|
| 919 | */ | 
|---|
| 920 | vmemmap_flush_unused_pmd(); | 
|---|
| 921 | __vmemmap_use_sub_pmd(start); | 
|---|
| 922 | } | 
|---|
| 923 |  | 
|---|
| 924 |  | 
|---|
| 925 | static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) | 
|---|
| 926 | { | 
|---|
| 927 | const unsigned long page = ALIGN_DOWN(start, PMD_SIZE); | 
|---|
| 928 |  | 
|---|
| 929 | vmemmap_flush_unused_pmd(); | 
|---|
| 930 |  | 
|---|
| 931 | /* | 
|---|
| 932 | * Could be our memmap page is filled with PAGE_UNUSED already from a | 
|---|
| 933 | * previous remove. Make sure to reset it. | 
|---|
| 934 | */ | 
|---|
| 935 | __vmemmap_use_sub_pmd(start); | 
|---|
| 936 |  | 
|---|
| 937 | /* | 
|---|
| 938 | * Mark with PAGE_UNUSED the unused parts of the new memmap range | 
|---|
| 939 | */ | 
|---|
| 940 | if (!IS_ALIGNED(start, PMD_SIZE)) | 
|---|
| 941 | memset(s: (void *)page, PAGE_UNUSED, n: start - page); | 
|---|
| 942 |  | 
|---|
| 943 | /* | 
|---|
| 944 | * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of | 
|---|
| 945 | * consecutive sections. Remember for the last added PMD where the | 
|---|
| 946 | * unused range begins. | 
|---|
| 947 | */ | 
|---|
| 948 | if (!IS_ALIGNED(end, PMD_SIZE)) | 
|---|
| 949 | unused_pmd_start = end; | 
|---|
| 950 | } | 
|---|
| 951 |  | 
|---|
| 952 | /* | 
|---|
| 953 | * Memory hotplug specific functions | 
|---|
| 954 | */ | 
|---|
| 955 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|---|
| 956 | /* | 
|---|
| 957 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | 
|---|
| 958 | * updating. | 
|---|
| 959 | */ | 
|---|
| 960 | static void update_end_of_memory_vars(u64 start, u64 size) | 
|---|
| 961 | { | 
|---|
| 962 | unsigned long end_pfn = PFN_UP(start + size); | 
|---|
| 963 |  | 
|---|
| 964 | if (end_pfn > max_pfn) { | 
|---|
| 965 | max_pfn = end_pfn; | 
|---|
| 966 | max_low_pfn = end_pfn; | 
|---|
| 967 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | 
|---|
| 968 | } | 
|---|
| 969 | } | 
|---|
| 970 |  | 
|---|
| 971 | int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, | 
|---|
| 972 | struct mhp_params *params) | 
|---|
| 973 | { | 
|---|
| 974 | unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1; | 
|---|
| 975 | int ret; | 
|---|
| 976 |  | 
|---|
| 977 | if (WARN_ON_ONCE(end > DIRECT_MAP_PHYSMEM_END)) | 
|---|
| 978 | return -ERANGE; | 
|---|
| 979 |  | 
|---|
| 980 | ret = __add_pages(nid, start_pfn, nr_pages, params); | 
|---|
| 981 | WARN_ON_ONCE(ret); | 
|---|
| 982 |  | 
|---|
| 983 | /* | 
|---|
| 984 | * Special case: add_pages() is called by memremap_pages() for adding device | 
|---|
| 985 | * private pages. Do not bump up max_pfn in the device private path, | 
|---|
| 986 | * because max_pfn changes affect dma_addressing_limited(). | 
|---|
| 987 | * | 
|---|
| 988 | * dma_addressing_limited() returning true when max_pfn is the device's | 
|---|
| 989 | * addressable memory can force device drivers to use bounce buffers | 
|---|
| 990 | * and impact their performance negatively: | 
|---|
| 991 | */ | 
|---|
| 992 | if (!params->pgmap) | 
|---|
| 993 | /* update max_pfn, max_low_pfn and high_memory */ | 
|---|
| 994 | update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); | 
|---|
| 995 |  | 
|---|
| 996 | return ret; | 
|---|
| 997 | } | 
|---|
| 998 |  | 
|---|
| 999 | int arch_add_memory(int nid, u64 start, u64 size, | 
|---|
| 1000 | struct mhp_params *params) | 
|---|
| 1001 | { | 
|---|
| 1002 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|---|
| 1003 | unsigned long nr_pages = size >> PAGE_SHIFT; | 
|---|
| 1004 |  | 
|---|
| 1005 | init_memory_mapping(start, start + size, params->pgprot); | 
|---|
| 1006 |  | 
|---|
| 1007 | return add_pages(nid, start_pfn, nr_pages, params); | 
|---|
| 1008 | } | 
|---|
| 1009 |  | 
|---|
| 1010 | static void free_reserved_pages(struct page *page, unsigned long nr_pages) | 
|---|
| 1011 | { | 
|---|
| 1012 | while (nr_pages--) | 
|---|
| 1013 | free_reserved_page(page++); | 
|---|
| 1014 | } | 
|---|
| 1015 |  | 
|---|
| 1016 | static void __meminit free_pagetable(struct page *page, int order) | 
|---|
| 1017 | { | 
|---|
| 1018 | /* bootmem page has reserved flag */ | 
|---|
| 1019 | if (PageReserved(page)) { | 
|---|
| 1020 | unsigned long nr_pages = 1 << order; | 
|---|
| 1021 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE | 
|---|
| 1022 | enum bootmem_type type = bootmem_type(page); | 
|---|
| 1023 |  | 
|---|
| 1024 | if (type == SECTION_INFO || type == MIX_SECTION_INFO) { | 
|---|
| 1025 | while (nr_pages--) | 
|---|
| 1026 | put_page_bootmem(page++); | 
|---|
| 1027 | } else { | 
|---|
| 1028 | free_reserved_pages(page, nr_pages); | 
|---|
| 1029 | } | 
|---|
| 1030 | #else | 
|---|
| 1031 | free_reserved_pages(page, nr_pages); | 
|---|
| 1032 | #endif | 
|---|
| 1033 | } else { | 
|---|
| 1034 | __free_pages(page, order); | 
|---|
| 1035 | } | 
|---|
| 1036 | } | 
|---|
| 1037 |  | 
|---|
| 1038 | static void __meminit free_hugepage_table(struct page *page, | 
|---|
| 1039 | struct vmem_altmap *altmap) | 
|---|
| 1040 | { | 
|---|
| 1041 | if (altmap) | 
|---|
| 1042 | vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); | 
|---|
| 1043 | else | 
|---|
| 1044 | free_pagetable(page, get_order(PMD_SIZE)); | 
|---|
| 1045 | } | 
|---|
| 1046 |  | 
|---|
| 1047 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | 
|---|
| 1048 | { | 
|---|
| 1049 | pte_t *pte; | 
|---|
| 1050 | int i; | 
|---|
| 1051 |  | 
|---|
| 1052 | for (i = 0; i < PTRS_PER_PTE; i++) { | 
|---|
| 1053 | pte = pte_start + i; | 
|---|
| 1054 | if (!pte_none(*pte)) | 
|---|
| 1055 | return; | 
|---|
| 1056 | } | 
|---|
| 1057 |  | 
|---|
| 1058 | /* free a pte table */ | 
|---|
| 1059 | free_pagetable(pmd_page(*pmd), 0); | 
|---|
| 1060 | spin_lock(&init_mm.page_table_lock); | 
|---|
| 1061 | pmd_clear(pmd); | 
|---|
| 1062 | spin_unlock(&init_mm.page_table_lock); | 
|---|
| 1063 | } | 
|---|
| 1064 |  | 
|---|
| 1065 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) | 
|---|
| 1066 | { | 
|---|
| 1067 | pmd_t *pmd; | 
|---|
| 1068 | int i; | 
|---|
| 1069 |  | 
|---|
| 1070 | for (i = 0; i < PTRS_PER_PMD; i++) { | 
|---|
| 1071 | pmd = pmd_start + i; | 
|---|
| 1072 | if (!pmd_none(*pmd)) | 
|---|
| 1073 | return; | 
|---|
| 1074 | } | 
|---|
| 1075 |  | 
|---|
| 1076 | /* free a pmd table */ | 
|---|
| 1077 | free_pagetable(pud_page(*pud), 0); | 
|---|
| 1078 | spin_lock(&init_mm.page_table_lock); | 
|---|
| 1079 | pud_clear(pud); | 
|---|
| 1080 | spin_unlock(&init_mm.page_table_lock); | 
|---|
| 1081 | } | 
|---|
| 1082 |  | 
|---|
| 1083 | static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) | 
|---|
| 1084 | { | 
|---|
| 1085 | pud_t *pud; | 
|---|
| 1086 | int i; | 
|---|
| 1087 |  | 
|---|
| 1088 | for (i = 0; i < PTRS_PER_PUD; i++) { | 
|---|
| 1089 | pud = pud_start + i; | 
|---|
| 1090 | if (!pud_none(*pud)) | 
|---|
| 1091 | return; | 
|---|
| 1092 | } | 
|---|
| 1093 |  | 
|---|
| 1094 | /* free a pud table */ | 
|---|
| 1095 | free_pagetable(p4d_page(*p4d), 0); | 
|---|
| 1096 | spin_lock(&init_mm.page_table_lock); | 
|---|
| 1097 | p4d_clear(p4d); | 
|---|
| 1098 | spin_unlock(&init_mm.page_table_lock); | 
|---|
| 1099 | } | 
|---|
| 1100 |  | 
|---|
| 1101 | static void __meminit | 
|---|
| 1102 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | 
|---|
| 1103 | bool direct) | 
|---|
| 1104 | { | 
|---|
| 1105 | unsigned long next, pages = 0; | 
|---|
| 1106 | pte_t *pte; | 
|---|
| 1107 | phys_addr_t phys_addr; | 
|---|
| 1108 |  | 
|---|
| 1109 | pte = pte_start + pte_index(addr); | 
|---|
| 1110 | for (; addr < end; addr = next, pte++) { | 
|---|
| 1111 | next = (addr + PAGE_SIZE) & PAGE_MASK; | 
|---|
| 1112 | if (next > end) | 
|---|
| 1113 | next = end; | 
|---|
| 1114 |  | 
|---|
| 1115 | if (!pte_present(*pte)) | 
|---|
| 1116 | continue; | 
|---|
| 1117 |  | 
|---|
| 1118 | /* | 
|---|
| 1119 | * We mapped [0,1G) memory as identity mapping when | 
|---|
| 1120 | * initializing, in arch/x86/kernel/head_64.S. These | 
|---|
| 1121 | * pagetables cannot be removed. | 
|---|
| 1122 | */ | 
|---|
| 1123 | phys_addr = pte_val(*pte) + (addr & PAGE_MASK); | 
|---|
| 1124 | if (phys_addr < (phys_addr_t)0x40000000) | 
|---|
| 1125 | return; | 
|---|
| 1126 |  | 
|---|
| 1127 | if (!direct) | 
|---|
| 1128 | free_pagetable(pte_page(*pte), 0); | 
|---|
| 1129 |  | 
|---|
| 1130 | spin_lock(&init_mm.page_table_lock); | 
|---|
| 1131 | pte_clear(&init_mm, addr, pte); | 
|---|
| 1132 | spin_unlock(&init_mm.page_table_lock); | 
|---|
| 1133 |  | 
|---|
| 1134 | /* For non-direct mapping, pages means nothing. */ | 
|---|
| 1135 | pages++; | 
|---|
| 1136 | } | 
|---|
| 1137 |  | 
|---|
| 1138 | /* Call free_pte_table() in remove_pmd_table(). */ | 
|---|
| 1139 | flush_tlb_all(); | 
|---|
| 1140 | if (direct) | 
|---|
| 1141 | update_page_count(PG_LEVEL_4K, -pages); | 
|---|
| 1142 | } | 
|---|
| 1143 |  | 
|---|
| 1144 | static void __meminit | 
|---|
| 1145 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | 
|---|
| 1146 | bool direct, struct vmem_altmap *altmap) | 
|---|
| 1147 | { | 
|---|
| 1148 | unsigned long next, pages = 0; | 
|---|
| 1149 | pte_t *pte_base; | 
|---|
| 1150 | pmd_t *pmd; | 
|---|
| 1151 |  | 
|---|
| 1152 | pmd = pmd_start + pmd_index(addr); | 
|---|
| 1153 | for (; addr < end; addr = next, pmd++) { | 
|---|
| 1154 | next = pmd_addr_end(addr, end); | 
|---|
| 1155 |  | 
|---|
| 1156 | if (!pmd_present(*pmd)) | 
|---|
| 1157 | continue; | 
|---|
| 1158 |  | 
|---|
| 1159 | if (pmd_leaf(*pmd)) { | 
|---|
| 1160 | if (IS_ALIGNED(addr, PMD_SIZE) && | 
|---|
| 1161 | IS_ALIGNED(next, PMD_SIZE)) { | 
|---|
| 1162 | if (!direct) | 
|---|
| 1163 | free_hugepage_table(pmd_page(*pmd), | 
|---|
| 1164 | altmap); | 
|---|
| 1165 |  | 
|---|
| 1166 | spin_lock(&init_mm.page_table_lock); | 
|---|
| 1167 | pmd_clear(pmd); | 
|---|
| 1168 | spin_unlock(&init_mm.page_table_lock); | 
|---|
| 1169 | pages++; | 
|---|
| 1170 | } else if (vmemmap_pmd_is_unused(addr, next)) { | 
|---|
| 1171 | free_hugepage_table(pmd_page(*pmd), | 
|---|
| 1172 | altmap); | 
|---|
| 1173 | spin_lock(&init_mm.page_table_lock); | 
|---|
| 1174 | pmd_clear(pmd); | 
|---|
| 1175 | spin_unlock(&init_mm.page_table_lock); | 
|---|
| 1176 | } | 
|---|
| 1177 | continue; | 
|---|
| 1178 | } | 
|---|
| 1179 |  | 
|---|
| 1180 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | 
|---|
| 1181 | remove_pte_table(pte_base, addr, next, direct); | 
|---|
| 1182 | free_pte_table(pte_base, pmd); | 
|---|
| 1183 | } | 
|---|
| 1184 |  | 
|---|
| 1185 | /* Call free_pmd_table() in remove_pud_table(). */ | 
|---|
| 1186 | if (direct) | 
|---|
| 1187 | update_page_count(PG_LEVEL_2M, -pages); | 
|---|
| 1188 | } | 
|---|
| 1189 |  | 
|---|
| 1190 | static void __meminit | 
|---|
| 1191 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | 
|---|
| 1192 | struct vmem_altmap *altmap, bool direct) | 
|---|
| 1193 | { | 
|---|
| 1194 | unsigned long next, pages = 0; | 
|---|
| 1195 | pmd_t *pmd_base; | 
|---|
| 1196 | pud_t *pud; | 
|---|
| 1197 |  | 
|---|
| 1198 | pud = pud_start + pud_index(addr); | 
|---|
| 1199 | for (; addr < end; addr = next, pud++) { | 
|---|
| 1200 | next = pud_addr_end(addr, end); | 
|---|
| 1201 |  | 
|---|
| 1202 | if (!pud_present(*pud)) | 
|---|
| 1203 | continue; | 
|---|
| 1204 |  | 
|---|
| 1205 | if (pud_leaf(*pud) && | 
|---|
| 1206 | IS_ALIGNED(addr, PUD_SIZE) && | 
|---|
| 1207 | IS_ALIGNED(next, PUD_SIZE)) { | 
|---|
| 1208 | spin_lock(&init_mm.page_table_lock); | 
|---|
| 1209 | pud_clear(pud); | 
|---|
| 1210 | spin_unlock(&init_mm.page_table_lock); | 
|---|
| 1211 | pages++; | 
|---|
| 1212 | continue; | 
|---|
| 1213 | } | 
|---|
| 1214 |  | 
|---|
| 1215 | pmd_base = pmd_offset(pud, 0); | 
|---|
| 1216 | remove_pmd_table(pmd_base, addr, next, direct, altmap); | 
|---|
| 1217 | free_pmd_table(pmd_base, pud); | 
|---|
| 1218 | } | 
|---|
| 1219 |  | 
|---|
| 1220 | if (direct) | 
|---|
| 1221 | update_page_count(PG_LEVEL_1G, -pages); | 
|---|
| 1222 | } | 
|---|
| 1223 |  | 
|---|
| 1224 | static void __meminit | 
|---|
| 1225 | remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, | 
|---|
| 1226 | struct vmem_altmap *altmap, bool direct) | 
|---|
| 1227 | { | 
|---|
| 1228 | unsigned long next, pages = 0; | 
|---|
| 1229 | pud_t *pud_base; | 
|---|
| 1230 | p4d_t *p4d; | 
|---|
| 1231 |  | 
|---|
| 1232 | p4d = p4d_start + p4d_index(addr); | 
|---|
| 1233 | for (; addr < end; addr = next, p4d++) { | 
|---|
| 1234 | next = p4d_addr_end(addr, end); | 
|---|
| 1235 |  | 
|---|
| 1236 | if (!p4d_present(*p4d)) | 
|---|
| 1237 | continue; | 
|---|
| 1238 |  | 
|---|
| 1239 | BUILD_BUG_ON(p4d_leaf(*p4d)); | 
|---|
| 1240 |  | 
|---|
| 1241 | pud_base = pud_offset(p4d, 0); | 
|---|
| 1242 | remove_pud_table(pud_base, addr, next, altmap, direct); | 
|---|
| 1243 | /* | 
|---|
| 1244 | * For 4-level page tables we do not want to free PUDs, but in the | 
|---|
| 1245 | * 5-level case we should free them. This code will have to change | 
|---|
| 1246 | * to adapt for boot-time switching between 4 and 5 level page tables. | 
|---|
| 1247 | */ | 
|---|
| 1248 | if (pgtable_l5_enabled()) | 
|---|
| 1249 | free_pud_table(pud_base, p4d); | 
|---|
| 1250 | } | 
|---|
| 1251 |  | 
|---|
| 1252 | if (direct) | 
|---|
| 1253 | update_page_count(PG_LEVEL_512G, -pages); | 
|---|
| 1254 | } | 
|---|
| 1255 |  | 
|---|
| 1256 | /* start and end are both virtual address. */ | 
|---|
| 1257 | static void __meminit | 
|---|
| 1258 | remove_pagetable(unsigned long start, unsigned long end, bool direct, | 
|---|
| 1259 | struct vmem_altmap *altmap) | 
|---|
| 1260 | { | 
|---|
| 1261 | unsigned long next; | 
|---|
| 1262 | unsigned long addr; | 
|---|
| 1263 | pgd_t *pgd; | 
|---|
| 1264 | p4d_t *p4d; | 
|---|
| 1265 |  | 
|---|
| 1266 | for (addr = start; addr < end; addr = next) { | 
|---|
| 1267 | next = pgd_addr_end(addr, end); | 
|---|
| 1268 |  | 
|---|
| 1269 | pgd = pgd_offset_k(addr); | 
|---|
| 1270 | if (!pgd_present(*pgd)) | 
|---|
| 1271 | continue; | 
|---|
| 1272 |  | 
|---|
| 1273 | p4d = p4d_offset(pgd, 0); | 
|---|
| 1274 | remove_p4d_table(p4d, addr, next, altmap, direct); | 
|---|
| 1275 | } | 
|---|
| 1276 |  | 
|---|
| 1277 | flush_tlb_all(); | 
|---|
| 1278 | } | 
|---|
| 1279 |  | 
|---|
| 1280 | void __ref vmemmap_free(unsigned long start, unsigned long end, | 
|---|
| 1281 | struct vmem_altmap *altmap) | 
|---|
| 1282 | { | 
|---|
| 1283 | VM_BUG_ON(!PAGE_ALIGNED(start)); | 
|---|
| 1284 | VM_BUG_ON(!PAGE_ALIGNED(end)); | 
|---|
| 1285 |  | 
|---|
| 1286 | remove_pagetable(start, end, false, altmap); | 
|---|
| 1287 | } | 
|---|
| 1288 |  | 
|---|
| 1289 | static void __meminit | 
|---|
| 1290 | kernel_physical_mapping_remove(unsigned long start, unsigned long end) | 
|---|
| 1291 | { | 
|---|
| 1292 | start = (unsigned long)__va(start); | 
|---|
| 1293 | end = (unsigned long)__va(end); | 
|---|
| 1294 |  | 
|---|
| 1295 | remove_pagetable(start, end, true, NULL); | 
|---|
| 1296 | } | 
|---|
| 1297 |  | 
|---|
| 1298 | void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) | 
|---|
| 1299 | { | 
|---|
| 1300 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|---|
| 1301 | unsigned long nr_pages = size >> PAGE_SHIFT; | 
|---|
| 1302 |  | 
|---|
| 1303 | __remove_pages(start_pfn, nr_pages, altmap); | 
|---|
| 1304 | kernel_physical_mapping_remove(start, start + size); | 
|---|
| 1305 | } | 
|---|
| 1306 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 
|---|
| 1307 |  | 
|---|
| 1308 | static struct kcore_list kcore_vsyscall; | 
|---|
| 1309 |  | 
|---|
| 1310 | static void __init register_page_bootmem_info(void) | 
|---|
| 1311 | { | 
|---|
| 1312 | #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) | 
|---|
| 1313 | int i; | 
|---|
| 1314 |  | 
|---|
| 1315 | for_each_online_node(i) | 
|---|
| 1316 | register_page_bootmem_info_node(NODE_DATA(i)); | 
|---|
| 1317 | #endif | 
|---|
| 1318 | } | 
|---|
| 1319 |  | 
|---|
| 1320 | /* | 
|---|
| 1321 | * Pre-allocates page-table pages for the vmalloc area in the kernel page-table. | 
|---|
| 1322 | * Only the level which needs to be synchronized between all page-tables is | 
|---|
| 1323 | * allocated because the synchronization can be expensive. | 
|---|
| 1324 | */ | 
|---|
| 1325 | static void __init preallocate_vmalloc_pages(void) | 
|---|
| 1326 | { | 
|---|
| 1327 | unsigned long addr; | 
|---|
| 1328 | const char *lvl; | 
|---|
| 1329 |  | 
|---|
| 1330 | for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) { | 
|---|
| 1331 | pgd_t *pgd = pgd_offset_k(addr); | 
|---|
| 1332 | p4d_t *p4d; | 
|---|
| 1333 | pud_t *pud; | 
|---|
| 1334 |  | 
|---|
| 1335 | lvl = "p4d"; | 
|---|
| 1336 | p4d = p4d_alloc(mm: &init_mm, pgd, address: addr); | 
|---|
| 1337 | if (!p4d) | 
|---|
| 1338 | goto failed; | 
|---|
| 1339 |  | 
|---|
| 1340 | if (pgtable_l5_enabled()) | 
|---|
| 1341 | continue; | 
|---|
| 1342 |  | 
|---|
| 1343 | /* | 
|---|
| 1344 | * The goal here is to allocate all possibly required | 
|---|
| 1345 | * hardware page tables pointed to by the top hardware | 
|---|
| 1346 | * level. | 
|---|
| 1347 | * | 
|---|
| 1348 | * On 4-level systems, the P4D layer is folded away and | 
|---|
| 1349 | * the above code does no preallocation.  Below, go down | 
|---|
| 1350 | * to the pud _software_ level to ensure the second | 
|---|
| 1351 | * hardware level is allocated on 4-level systems too. | 
|---|
| 1352 | */ | 
|---|
| 1353 | lvl = "pud"; | 
|---|
| 1354 | pud = pud_alloc(mm: &init_mm, p4d, address: addr); | 
|---|
| 1355 | if (!pud) | 
|---|
| 1356 | goto failed; | 
|---|
| 1357 | } | 
|---|
| 1358 |  | 
|---|
| 1359 | return; | 
|---|
| 1360 |  | 
|---|
| 1361 | failed: | 
|---|
| 1362 |  | 
|---|
| 1363 | /* | 
|---|
| 1364 | * The pages have to be there now or they will be missing in | 
|---|
| 1365 | * process page-tables later. | 
|---|
| 1366 | */ | 
|---|
| 1367 | panic(fmt: "Failed to pre-allocate %s pages for vmalloc area\n", lvl); | 
|---|
| 1368 | } | 
|---|
| 1369 |  | 
|---|
| 1370 | void __init arch_mm_preinit(void) | 
|---|
| 1371 | { | 
|---|
| 1372 | pci_iommu_alloc(); | 
|---|
| 1373 | } | 
|---|
| 1374 |  | 
|---|
| 1375 | void __init mem_init(void) | 
|---|
| 1376 | { | 
|---|
| 1377 | /* clear_bss() already clear the empty_zero_page */ | 
|---|
| 1378 |  | 
|---|
| 1379 | after_bootmem = 1; | 
|---|
| 1380 | x86_init.hyper.init_after_bootmem(); | 
|---|
| 1381 |  | 
|---|
| 1382 | /* | 
|---|
| 1383 | * Must be done after boot memory is put on freelist, because here we | 
|---|
| 1384 | * might set fields in deferred struct pages that have not yet been | 
|---|
| 1385 | * initialized, and memblock_free_all() initializes all the reserved | 
|---|
| 1386 | * deferred pages for us. | 
|---|
| 1387 | */ | 
|---|
| 1388 | register_page_bootmem_info(); | 
|---|
| 1389 |  | 
|---|
| 1390 | /* Register memory areas for /proc/kcore */ | 
|---|
| 1391 | if (get_gate_vma(mm: &init_mm)) | 
|---|
| 1392 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, type: KCORE_USER); | 
|---|
| 1393 |  | 
|---|
| 1394 | preallocate_vmalloc_pages(); | 
|---|
| 1395 | } | 
|---|
| 1396 |  | 
|---|
| 1397 | int kernel_set_to_readonly; | 
|---|
| 1398 |  | 
|---|
| 1399 | void mark_rodata_ro(void) | 
|---|
| 1400 | { | 
|---|
| 1401 | unsigned long start = PFN_ALIGN(_text); | 
|---|
| 1402 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); | 
|---|
| 1403 | unsigned long end = (unsigned long)__end_rodata_hpage_align; | 
|---|
| 1404 | unsigned long text_end = PFN_ALIGN(_etext); | 
|---|
| 1405 | unsigned long rodata_end = PFN_ALIGN(__end_rodata); | 
|---|
| 1406 | unsigned long all_end; | 
|---|
| 1407 |  | 
|---|
| 1408 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", | 
|---|
| 1409 | (end - start) >> 10); | 
|---|
| 1410 | set_memory_ro(addr: start, numpages: (end - start) >> PAGE_SHIFT); | 
|---|
| 1411 |  | 
|---|
| 1412 | kernel_set_to_readonly = 1; | 
|---|
| 1413 |  | 
|---|
| 1414 | /* | 
|---|
| 1415 | * The rodata/data/bss/brk section (but not the kernel text!) | 
|---|
| 1416 | * should also be not-executable. | 
|---|
| 1417 | * | 
|---|
| 1418 | * We align all_end to PMD_SIZE because the existing mapping | 
|---|
| 1419 | * is a full PMD. If we would align _brk_end to PAGE_SIZE we | 
|---|
| 1420 | * split the PMD and the reminder between _brk_end and the end | 
|---|
| 1421 | * of the PMD will remain mapped executable. | 
|---|
| 1422 | * | 
|---|
| 1423 | * Any PMD which was setup after the one which covers _brk_end | 
|---|
| 1424 | * has been zapped already via cleanup_highmem(). | 
|---|
| 1425 | */ | 
|---|
| 1426 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); | 
|---|
| 1427 | set_memory_nx(addr: text_end, numpages: (all_end - text_end) >> PAGE_SHIFT); | 
|---|
| 1428 |  | 
|---|
| 1429 | set_ftrace_ops_ro(); | 
|---|
| 1430 |  | 
|---|
| 1431 | #ifdef CONFIG_CPA_DEBUG | 
|---|
| 1432 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); | 
|---|
| 1433 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); | 
|---|
| 1434 |  | 
|---|
| 1435 | printk(KERN_INFO "Testing CPA: again\n"); | 
|---|
| 1436 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); | 
|---|
| 1437 | #endif | 
|---|
| 1438 |  | 
|---|
| 1439 | free_kernel_image_pages(what: "unused kernel image (text/rodata gap)", | 
|---|
| 1440 | begin: (void *)text_end, end: (void *)rodata_start); | 
|---|
| 1441 | free_kernel_image_pages(what: "unused kernel image (rodata/data gap)", | 
|---|
| 1442 | begin: (void *)rodata_end, end: (void *)_sdata); | 
|---|
| 1443 | } | 
|---|
| 1444 |  | 
|---|
| 1445 | /* | 
|---|
| 1446 | * Block size is the minimum amount of memory which can be hotplugged or | 
|---|
| 1447 | * hotremoved. It must be power of two and must be equal or larger than | 
|---|
| 1448 | * MIN_MEMORY_BLOCK_SIZE. | 
|---|
| 1449 | */ | 
|---|
| 1450 | #define MAX_BLOCK_SIZE (2UL << 30) | 
|---|
| 1451 |  | 
|---|
| 1452 | /* Amount of ram needed to start using large blocks */ | 
|---|
| 1453 | #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) | 
|---|
| 1454 |  | 
|---|
| 1455 | /* Adjustable memory block size */ | 
|---|
| 1456 | static unsigned long set_memory_block_size; | 
|---|
| 1457 | int __init set_memory_block_size_order(unsigned int order) | 
|---|
| 1458 | { | 
|---|
| 1459 | unsigned long size = 1UL << order; | 
|---|
| 1460 |  | 
|---|
| 1461 | if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) | 
|---|
| 1462 | return -EINVAL; | 
|---|
| 1463 |  | 
|---|
| 1464 | set_memory_block_size = size; | 
|---|
| 1465 | return 0; | 
|---|
| 1466 | } | 
|---|
| 1467 |  | 
|---|
| 1468 | static unsigned long probe_memory_block_size(void) | 
|---|
| 1469 | { | 
|---|
| 1470 | unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; | 
|---|
| 1471 | unsigned long bz; | 
|---|
| 1472 |  | 
|---|
| 1473 | /* If memory block size has been set, then use it */ | 
|---|
| 1474 | bz = set_memory_block_size; | 
|---|
| 1475 | if (bz) | 
|---|
| 1476 | goto done; | 
|---|
| 1477 |  | 
|---|
| 1478 | /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ | 
|---|
| 1479 | if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { | 
|---|
| 1480 | bz = MIN_MEMORY_BLOCK_SIZE; | 
|---|
| 1481 | goto done; | 
|---|
| 1482 | } | 
|---|
| 1483 |  | 
|---|
| 1484 | /* | 
|---|
| 1485 | * When hotplug alignment is not a concern, maximize blocksize | 
|---|
| 1486 | * to minimize overhead. Otherwise, align to the lesser of advice | 
|---|
| 1487 | * alignment and end of memory alignment. | 
|---|
| 1488 | */ | 
|---|
| 1489 | bz = memory_block_advised_max_size(); | 
|---|
| 1490 | if (!bz) { | 
|---|
| 1491 | bz = MAX_BLOCK_SIZE; | 
|---|
| 1492 | if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) | 
|---|
| 1493 | goto done; | 
|---|
| 1494 | } else { | 
|---|
| 1495 | bz = max(min(bz, MAX_BLOCK_SIZE), MIN_MEMORY_BLOCK_SIZE); | 
|---|
| 1496 | } | 
|---|
| 1497 |  | 
|---|
| 1498 | /* Find the largest allowed block size that aligns to memory end */ | 
|---|
| 1499 | for (; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { | 
|---|
| 1500 | if (IS_ALIGNED(boot_mem_end, bz)) | 
|---|
| 1501 | break; | 
|---|
| 1502 | } | 
|---|
| 1503 | done: | 
|---|
| 1504 | pr_info( "x86/mm: Memory block size: %ldMB\n", bz >> 20); | 
|---|
| 1505 |  | 
|---|
| 1506 | return bz; | 
|---|
| 1507 | } | 
|---|
| 1508 |  | 
|---|
| 1509 | static unsigned long memory_block_size_probed; | 
|---|
| 1510 | unsigned long memory_block_size_bytes(void) | 
|---|
| 1511 | { | 
|---|
| 1512 | if (!memory_block_size_probed) | 
|---|
| 1513 | memory_block_size_probed = probe_memory_block_size(); | 
|---|
| 1514 |  | 
|---|
| 1515 | return memory_block_size_probed; | 
|---|
| 1516 | } | 
|---|
| 1517 |  | 
|---|
| 1518 | /* | 
|---|
| 1519 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | 
|---|
| 1520 | */ | 
|---|
| 1521 | static long __meminitdata addr_start, addr_end; | 
|---|
| 1522 | static void __meminitdata *p_start, *p_end; | 
|---|
| 1523 | static int __meminitdata node_start; | 
|---|
| 1524 |  | 
|---|
| 1525 | void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, | 
|---|
| 1526 | unsigned long addr, unsigned long next) | 
|---|
| 1527 | { | 
|---|
| 1528 | pte_t entry; | 
|---|
| 1529 |  | 
|---|
| 1530 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | 
|---|
| 1531 | PAGE_KERNEL_LARGE); | 
|---|
| 1532 | set_pmd(pmd, __pmd(pte_val(entry))); | 
|---|
| 1533 |  | 
|---|
| 1534 | /* check to see if we have contiguous blocks */ | 
|---|
| 1535 | if (p_end != p || node_start != node) { | 
|---|
| 1536 | if (p_start) | 
|---|
| 1537 | pr_debug( " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | 
|---|
| 1538 | addr_start, addr_end-1, p_start, p_end-1, node_start); | 
|---|
| 1539 | addr_start = addr; | 
|---|
| 1540 | node_start = node; | 
|---|
| 1541 | p_start = p; | 
|---|
| 1542 | } | 
|---|
| 1543 |  | 
|---|
| 1544 | addr_end = addr + PMD_SIZE; | 
|---|
| 1545 | p_end = p + PMD_SIZE; | 
|---|
| 1546 |  | 
|---|
| 1547 | if (!IS_ALIGNED(addr, PMD_SIZE) || | 
|---|
| 1548 | !IS_ALIGNED(next, PMD_SIZE)) | 
|---|
| 1549 | vmemmap_use_new_sub_pmd(start: addr, end: next); | 
|---|
| 1550 | } | 
|---|
| 1551 |  | 
|---|
| 1552 | int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, | 
|---|
| 1553 | unsigned long addr, unsigned long next) | 
|---|
| 1554 | { | 
|---|
| 1555 | int large = pmd_leaf(pte: *pmd); | 
|---|
| 1556 |  | 
|---|
| 1557 | if (pmd_leaf(pte: *pmd)) { | 
|---|
| 1558 | vmemmap_verify((pte_t *)pmd, node, addr, next); | 
|---|
| 1559 | vmemmap_use_sub_pmd(start: addr, end: next); | 
|---|
| 1560 | } | 
|---|
| 1561 |  | 
|---|
| 1562 | return large; | 
|---|
| 1563 | } | 
|---|
| 1564 |  | 
|---|
| 1565 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, | 
|---|
| 1566 | struct vmem_altmap *altmap) | 
|---|
| 1567 | { | 
|---|
| 1568 | int err; | 
|---|
| 1569 |  | 
|---|
| 1570 | VM_BUG_ON(!PAGE_ALIGNED(start)); | 
|---|
| 1571 | VM_BUG_ON(!PAGE_ALIGNED(end)); | 
|---|
| 1572 |  | 
|---|
| 1573 | if (end - start < PAGES_PER_SECTION * sizeof(struct page)) | 
|---|
| 1574 | err = vmemmap_populate_basepages(start, end, node, NULL); | 
|---|
| 1575 | else if (boot_cpu_has(X86_FEATURE_PSE)) | 
|---|
| 1576 | err = vmemmap_populate_hugepages(start, end, node, altmap); | 
|---|
| 1577 | else if (altmap) { | 
|---|
| 1578 | pr_err_once( "%s: no cpu support for altmap allocations\n", | 
|---|
| 1579 | __func__); | 
|---|
| 1580 | err = -ENOMEM; | 
|---|
| 1581 | } else | 
|---|
| 1582 | err = vmemmap_populate_basepages(start, end, node, NULL); | 
|---|
| 1583 | if (!err) | 
|---|
| 1584 | sync_global_pgds(start, end: end - 1); | 
|---|
| 1585 | return err; | 
|---|
| 1586 | } | 
|---|
| 1587 |  | 
|---|
| 1588 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE | 
|---|
| 1589 | void register_page_bootmem_memmap(unsigned long section_nr, | 
|---|
| 1590 | struct page *start_page, unsigned long nr_pages) | 
|---|
| 1591 | { | 
|---|
| 1592 | unsigned long addr = (unsigned long)start_page; | 
|---|
| 1593 | unsigned long end = (unsigned long)(start_page + nr_pages); | 
|---|
| 1594 | unsigned long next; | 
|---|
| 1595 | pgd_t *pgd; | 
|---|
| 1596 | p4d_t *p4d; | 
|---|
| 1597 | pud_t *pud; | 
|---|
| 1598 | pmd_t *pmd; | 
|---|
| 1599 | unsigned int nr_pmd_pages; | 
|---|
| 1600 | struct page *page; | 
|---|
| 1601 |  | 
|---|
| 1602 | for (; addr < end; addr = next) { | 
|---|
| 1603 | pte_t *pte = NULL; | 
|---|
| 1604 |  | 
|---|
| 1605 | pgd = pgd_offset_k(addr); | 
|---|
| 1606 | if (pgd_none(*pgd)) { | 
|---|
| 1607 | next = (addr + PAGE_SIZE) & PAGE_MASK; | 
|---|
| 1608 | continue; | 
|---|
| 1609 | } | 
|---|
| 1610 | get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); | 
|---|
| 1611 |  | 
|---|
| 1612 | p4d = p4d_offset(pgd, addr); | 
|---|
| 1613 | if (p4d_none(*p4d)) { | 
|---|
| 1614 | next = (addr + PAGE_SIZE) & PAGE_MASK; | 
|---|
| 1615 | continue; | 
|---|
| 1616 | } | 
|---|
| 1617 | get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO); | 
|---|
| 1618 |  | 
|---|
| 1619 | pud = pud_offset(p4d, addr); | 
|---|
| 1620 | if (pud_none(*pud)) { | 
|---|
| 1621 | next = (addr + PAGE_SIZE) & PAGE_MASK; | 
|---|
| 1622 | continue; | 
|---|
| 1623 | } | 
|---|
| 1624 | get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); | 
|---|
| 1625 |  | 
|---|
| 1626 | pmd = pmd_offset(pud, addr); | 
|---|
| 1627 | if (pmd_none(*pmd)) { | 
|---|
| 1628 | next = (addr + PAGE_SIZE) & PAGE_MASK; | 
|---|
| 1629 | continue; | 
|---|
| 1630 | } | 
|---|
| 1631 |  | 
|---|
| 1632 | if (!boot_cpu_has(X86_FEATURE_PSE) || !pmd_leaf(*pmd)) { | 
|---|
| 1633 | next = (addr + PAGE_SIZE) & PAGE_MASK; | 
|---|
| 1634 | get_page_bootmem(section_nr, pmd_page(*pmd), | 
|---|
| 1635 | MIX_SECTION_INFO); | 
|---|
| 1636 |  | 
|---|
| 1637 | pte = pte_offset_kernel(pmd, addr); | 
|---|
| 1638 | if (pte_none(*pte)) | 
|---|
| 1639 | continue; | 
|---|
| 1640 | get_page_bootmem(section_nr, pte_page(*pte), | 
|---|
| 1641 | SECTION_INFO); | 
|---|
| 1642 | } else { | 
|---|
| 1643 | next = pmd_addr_end(addr, end); | 
|---|
| 1644 | nr_pmd_pages = (next - addr) >> PAGE_SHIFT; | 
|---|
| 1645 | page = pmd_page(*pmd); | 
|---|
| 1646 | while (nr_pmd_pages--) | 
|---|
| 1647 | get_page_bootmem(section_nr, page++, | 
|---|
| 1648 | SECTION_INFO); | 
|---|
| 1649 | } | 
|---|
| 1650 | } | 
|---|
| 1651 | } | 
|---|
| 1652 | #endif | 
|---|
| 1653 |  | 
|---|
| 1654 | void __meminit vmemmap_populate_print_last(void) | 
|---|
| 1655 | { | 
|---|
| 1656 | if (p_start) { | 
|---|
| 1657 | pr_debug( " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | 
|---|
| 1658 | addr_start, addr_end-1, p_start, p_end-1, node_start); | 
|---|
| 1659 | p_start = NULL; | 
|---|
| 1660 | p_end = NULL; | 
|---|
| 1661 | node_start = 0; | 
|---|
| 1662 | } | 
|---|
| 1663 | } | 
|---|
| 1664 |  | 
|---|