| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * crash.c - kernel crash support code. |
| 4 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> |
| 5 | */ |
| 6 | |
| 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 8 | |
| 9 | #include <linux/buildid.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/utsname.h> |
| 12 | #include <linux/vmalloc.h> |
| 13 | #include <linux/sizes.h> |
| 14 | #include <linux/kexec.h> |
| 15 | #include <linux/memory.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/cpuhotplug.h> |
| 18 | #include <linux/memblock.h> |
| 19 | #include <linux/kmemleak.h> |
| 20 | #include <linux/crash_core.h> |
| 21 | #include <linux/reboot.h> |
| 22 | #include <linux/btf.h> |
| 23 | #include <linux/objtool.h> |
| 24 | #include <linux/delay.h> |
| 25 | #include <linux/panic.h> |
| 26 | |
| 27 | #include <asm/page.h> |
| 28 | #include <asm/sections.h> |
| 29 | |
| 30 | #include <crypto/sha1.h> |
| 31 | |
| 32 | #include "kallsyms_internal.h" |
| 33 | #include "kexec_internal.h" |
| 34 | |
| 35 | /* Per cpu memory for storing cpu states in case of system crash. */ |
| 36 | note_buf_t __percpu *crash_notes; |
| 37 | |
| 38 | /* time to wait for possible DMA to finish before starting the kdump kernel |
| 39 | * when a CMA reservation is used |
| 40 | */ |
| 41 | #define CMA_DMA_TIMEOUT_SEC 10 |
| 42 | |
| 43 | #ifdef CONFIG_CRASH_DUMP |
| 44 | |
| 45 | int kimage_crash_copy_vmcoreinfo(struct kimage *image) |
| 46 | { |
| 47 | struct page *vmcoreinfo_page; |
| 48 | void *safecopy; |
| 49 | |
| 50 | if (!IS_ENABLED(CONFIG_CRASH_DUMP)) |
| 51 | return 0; |
| 52 | if (image->type != KEXEC_TYPE_CRASH) |
| 53 | return 0; |
| 54 | |
| 55 | /* |
| 56 | * For kdump, allocate one vmcoreinfo safe copy from the |
| 57 | * crash memory. as we have arch_kexec_protect_crashkres() |
| 58 | * after kexec syscall, we naturally protect it from write |
| 59 | * (even read) access under kernel direct mapping. But on |
| 60 | * the other hand, we still need to operate it when crash |
| 61 | * happens to generate vmcoreinfo note, hereby we rely on |
| 62 | * vmap for this purpose. |
| 63 | */ |
| 64 | vmcoreinfo_page = kimage_alloc_control_pages(image, order: 0); |
| 65 | if (!vmcoreinfo_page) { |
| 66 | pr_warn("Could not allocate vmcoreinfo buffer\n" ); |
| 67 | return -ENOMEM; |
| 68 | } |
| 69 | safecopy = vmap(pages: &vmcoreinfo_page, count: 1, VM_MAP, PAGE_KERNEL); |
| 70 | if (!safecopy) { |
| 71 | pr_warn("Could not vmap vmcoreinfo buffer\n" ); |
| 72 | return -ENOMEM; |
| 73 | } |
| 74 | |
| 75 | image->vmcoreinfo_data_copy = safecopy; |
| 76 | crash_update_vmcoreinfo_safecopy(ptr: safecopy); |
| 77 | |
| 78 | return 0; |
| 79 | } |
| 80 | |
| 81 | |
| 82 | |
| 83 | int kexec_should_crash(struct task_struct *p) |
| 84 | { |
| 85 | /* |
| 86 | * If crash_kexec_post_notifiers is enabled, don't run |
| 87 | * crash_kexec() here yet, which must be run after panic |
| 88 | * notifiers in panic(). |
| 89 | */ |
| 90 | if (crash_kexec_post_notifiers) |
| 91 | return 0; |
| 92 | /* |
| 93 | * There are 4 panic() calls in make_task_dead() path, each of which |
| 94 | * corresponds to each of these 4 conditions. |
| 95 | */ |
| 96 | if (in_interrupt() || !p->pid || is_global_init(tsk: p) || panic_on_oops) |
| 97 | return 1; |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | int kexec_crash_loaded(void) |
| 102 | { |
| 103 | return !!kexec_crash_image; |
| 104 | } |
| 105 | EXPORT_SYMBOL_GPL(kexec_crash_loaded); |
| 106 | |
| 107 | static void crash_cma_clear_pending_dma(void) |
| 108 | { |
| 109 | if (!crashk_cma_cnt) |
| 110 | return; |
| 111 | |
| 112 | mdelay(CMA_DMA_TIMEOUT_SEC * 1000); |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * No panic_cpu check version of crash_kexec(). This function is called |
| 117 | * only when panic_cpu holds the current CPU number; this is the only CPU |
| 118 | * which processes crash_kexec routines. |
| 119 | */ |
| 120 | void __noclone __crash_kexec(struct pt_regs *regs) |
| 121 | { |
| 122 | /* Take the kexec_lock here to prevent sys_kexec_load |
| 123 | * running on one cpu from replacing the crash kernel |
| 124 | * we are using after a panic on a different cpu. |
| 125 | * |
| 126 | * If the crash kernel was not located in a fixed area |
| 127 | * of memory the xchg(&kexec_crash_image) would be |
| 128 | * sufficient. But since I reuse the memory... |
| 129 | */ |
| 130 | if (kexec_trylock()) { |
| 131 | if (kexec_crash_image) { |
| 132 | struct pt_regs fixed_regs; |
| 133 | |
| 134 | crash_setup_regs(newregs: &fixed_regs, oldregs: regs); |
| 135 | crash_save_vmcoreinfo(); |
| 136 | machine_crash_shutdown(&fixed_regs); |
| 137 | crash_cma_clear_pending_dma(); |
| 138 | machine_kexec(image: kexec_crash_image); |
| 139 | } |
| 140 | kexec_unlock(); |
| 141 | } |
| 142 | } |
| 143 | STACK_FRAME_NON_STANDARD(__crash_kexec); |
| 144 | |
| 145 | __bpf_kfunc void crash_kexec(struct pt_regs *regs) |
| 146 | { |
| 147 | if (panic_try_start()) { |
| 148 | /* This is the 1st CPU which comes here, so go ahead. */ |
| 149 | __crash_kexec(regs); |
| 150 | |
| 151 | /* |
| 152 | * Reset panic_cpu to allow another panic()/crash_kexec() |
| 153 | * call. |
| 154 | */ |
| 155 | panic_reset(); |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | static inline resource_size_t crash_resource_size(const struct resource *res) |
| 160 | { |
| 161 | return !res->end ? 0 : resource_size(res); |
| 162 | } |
| 163 | |
| 164 | |
| 165 | |
| 166 | |
| 167 | int (struct crash_mem *mem, int need_kernel_map, |
| 168 | void **addr, unsigned long *sz) |
| 169 | { |
| 170 | Elf64_Ehdr *ehdr; |
| 171 | Elf64_Phdr *phdr; |
| 172 | unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; |
| 173 | unsigned char *buf; |
| 174 | unsigned int cpu, i; |
| 175 | unsigned long long notes_addr; |
| 176 | unsigned long mstart, mend; |
| 177 | |
| 178 | /* extra phdr for vmcoreinfo ELF note */ |
| 179 | nr_phdr = nr_cpus + 1; |
| 180 | nr_phdr += mem->nr_ranges; |
| 181 | |
| 182 | /* |
| 183 | * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping |
| 184 | * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64). |
| 185 | * I think this is required by tools like gdb. So same physical |
| 186 | * memory will be mapped in two ELF headers. One will contain kernel |
| 187 | * text virtual addresses and other will have __va(physical) addresses. |
| 188 | */ |
| 189 | |
| 190 | nr_phdr++; |
| 191 | elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr); |
| 192 | elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN); |
| 193 | |
| 194 | buf = vzalloc(elf_sz); |
| 195 | if (!buf) |
| 196 | return -ENOMEM; |
| 197 | |
| 198 | ehdr = (Elf64_Ehdr *)buf; |
| 199 | phdr = (Elf64_Phdr *)(ehdr + 1); |
| 200 | memcpy(to: ehdr->e_ident, ELFMAG, SELFMAG); |
| 201 | ehdr->e_ident[EI_CLASS] = ELFCLASS64; |
| 202 | ehdr->e_ident[EI_DATA] = ELFDATA2LSB; |
| 203 | ehdr->e_ident[EI_VERSION] = EV_CURRENT; |
| 204 | ehdr->e_ident[EI_OSABI] = ELF_OSABI; |
| 205 | memset(s: ehdr->e_ident + EI_PAD, c: 0, EI_NIDENT - EI_PAD); |
| 206 | ehdr->e_type = ET_CORE; |
| 207 | ehdr->e_machine = ELF_ARCH; |
| 208 | ehdr->e_version = EV_CURRENT; |
| 209 | ehdr->e_phoff = sizeof(Elf64_Ehdr); |
| 210 | ehdr->e_ehsize = sizeof(Elf64_Ehdr); |
| 211 | ehdr->e_phentsize = sizeof(Elf64_Phdr); |
| 212 | |
| 213 | /* Prepare one phdr of type PT_NOTE for each possible CPU */ |
| 214 | for_each_possible_cpu(cpu) { |
| 215 | phdr->p_type = PT_NOTE; |
| 216 | notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); |
| 217 | phdr->p_offset = phdr->p_paddr = notes_addr; |
| 218 | phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t); |
| 219 | (ehdr->e_phnum)++; |
| 220 | phdr++; |
| 221 | } |
| 222 | |
| 223 | /* Prepare one PT_NOTE header for vmcoreinfo */ |
| 224 | phdr->p_type = PT_NOTE; |
| 225 | phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); |
| 226 | phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE; |
| 227 | (ehdr->e_phnum)++; |
| 228 | phdr++; |
| 229 | |
| 230 | /* Prepare PT_LOAD type program header for kernel text region */ |
| 231 | if (need_kernel_map) { |
| 232 | phdr->p_type = PT_LOAD; |
| 233 | phdr->p_flags = PF_R|PF_W|PF_X; |
| 234 | phdr->p_vaddr = (unsigned long) _text; |
| 235 | phdr->p_filesz = phdr->p_memsz = _end - _text; |
| 236 | phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); |
| 237 | ehdr->e_phnum++; |
| 238 | phdr++; |
| 239 | } |
| 240 | |
| 241 | /* Go through all the ranges in mem->ranges[] and prepare phdr */ |
| 242 | for (i = 0; i < mem->nr_ranges; i++) { |
| 243 | mstart = mem->ranges[i].start; |
| 244 | mend = mem->ranges[i].end; |
| 245 | |
| 246 | phdr->p_type = PT_LOAD; |
| 247 | phdr->p_flags = PF_R|PF_W|PF_X; |
| 248 | phdr->p_offset = mstart; |
| 249 | |
| 250 | phdr->p_paddr = mstart; |
| 251 | phdr->p_vaddr = (unsigned long) __va(mstart); |
| 252 | phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; |
| 253 | phdr->p_align = 0; |
| 254 | ehdr->e_phnum++; |
| 255 | #ifdef CONFIG_KEXEC_FILE |
| 256 | kexec_dprintk("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n" , |
| 257 | phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, |
| 258 | ehdr->e_phnum, phdr->p_offset); |
| 259 | #endif |
| 260 | phdr++; |
| 261 | } |
| 262 | |
| 263 | *addr = buf; |
| 264 | *sz = elf_sz; |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | /** |
| 269 | * crash_exclude_mem_range - exclude a mem range for existing ranges |
| 270 | * @mem: mem->range contains an array of ranges sorted in ascending order |
| 271 | * @mstart: the start of to-be-excluded range |
| 272 | * @mend: the start of to-be-excluded range |
| 273 | * |
| 274 | * If you are unsure if a range split will happen, to avoid function call |
| 275 | * failure because of -ENOMEM, always make sure |
| 276 | * mem->max_nr_ranges == mem->nr_ranges + 1 |
| 277 | * before calling the function each time. |
| 278 | * |
| 279 | * returns 0 if a memory range is excluded successfully |
| 280 | * return -ENOMEM if mem->ranges doesn't have space to hold split ranges |
| 281 | */ |
| 282 | int crash_exclude_mem_range(struct crash_mem *mem, |
| 283 | unsigned long long mstart, unsigned long long mend) |
| 284 | { |
| 285 | int i; |
| 286 | unsigned long long start, end, p_start, p_end; |
| 287 | |
| 288 | for (i = 0; i < mem->nr_ranges; i++) { |
| 289 | start = mem->ranges[i].start; |
| 290 | end = mem->ranges[i].end; |
| 291 | p_start = mstart; |
| 292 | p_end = mend; |
| 293 | |
| 294 | if (p_start > end) |
| 295 | continue; |
| 296 | |
| 297 | /* |
| 298 | * Because the memory ranges in mem->ranges are stored in |
| 299 | * ascending order, when we detect `p_end < start`, we can |
| 300 | * immediately exit the for loop, as the subsequent memory |
| 301 | * ranges will definitely be outside the range we are looking |
| 302 | * for. |
| 303 | */ |
| 304 | if (p_end < start) |
| 305 | break; |
| 306 | |
| 307 | /* Truncate any area outside of range */ |
| 308 | if (p_start < start) |
| 309 | p_start = start; |
| 310 | if (p_end > end) |
| 311 | p_end = end; |
| 312 | |
| 313 | /* Found completely overlapping range */ |
| 314 | if (p_start == start && p_end == end) { |
| 315 | memmove(dest: &mem->ranges[i], src: &mem->ranges[i + 1], |
| 316 | count: (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i])); |
| 317 | i--; |
| 318 | mem->nr_ranges--; |
| 319 | } else if (p_start > start && p_end < end) { |
| 320 | /* Split original range */ |
| 321 | if (mem->nr_ranges >= mem->max_nr_ranges) |
| 322 | return -ENOMEM; |
| 323 | |
| 324 | memmove(dest: &mem->ranges[i + 2], src: &mem->ranges[i + 1], |
| 325 | count: (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i])); |
| 326 | |
| 327 | mem->ranges[i].end = p_start - 1; |
| 328 | mem->ranges[i + 1].start = p_end + 1; |
| 329 | mem->ranges[i + 1].end = end; |
| 330 | |
| 331 | i++; |
| 332 | mem->nr_ranges++; |
| 333 | } else if (p_start != start) |
| 334 | mem->ranges[i].end = p_start - 1; |
| 335 | else |
| 336 | mem->ranges[i].start = p_end + 1; |
| 337 | } |
| 338 | |
| 339 | return 0; |
| 340 | } |
| 341 | EXPORT_SYMBOL_GPL(crash_exclude_mem_range); |
| 342 | |
| 343 | ssize_t crash_get_memory_size(void) |
| 344 | { |
| 345 | ssize_t size = 0; |
| 346 | |
| 347 | if (!kexec_trylock()) |
| 348 | return -EBUSY; |
| 349 | |
| 350 | size += crash_resource_size(res: &crashk_res); |
| 351 | size += crash_resource_size(res: &crashk_low_res); |
| 352 | |
| 353 | kexec_unlock(); |
| 354 | return size; |
| 355 | } |
| 356 | |
| 357 | static int __crash_shrink_memory(struct resource *old_res, |
| 358 | unsigned long new_size) |
| 359 | { |
| 360 | struct resource *ram_res; |
| 361 | |
| 362 | ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); |
| 363 | if (!ram_res) |
| 364 | return -ENOMEM; |
| 365 | |
| 366 | ram_res->start = old_res->start + new_size; |
| 367 | ram_res->end = old_res->end; |
| 368 | ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; |
| 369 | ram_res->name = "System RAM" ; |
| 370 | |
| 371 | if (!new_size) { |
| 372 | release_resource(new: old_res); |
| 373 | old_res->start = 0; |
| 374 | old_res->end = 0; |
| 375 | } else { |
| 376 | crashk_res.end = ram_res->start - 1; |
| 377 | } |
| 378 | |
| 379 | crash_free_reserved_phys_range(begin: ram_res->start, end: ram_res->end); |
| 380 | insert_resource(parent: &iomem_resource, new: ram_res); |
| 381 | |
| 382 | return 0; |
| 383 | } |
| 384 | |
| 385 | int crash_shrink_memory(unsigned long new_size) |
| 386 | { |
| 387 | int ret = 0; |
| 388 | unsigned long old_size, low_size; |
| 389 | |
| 390 | if (!kexec_trylock()) |
| 391 | return -EBUSY; |
| 392 | |
| 393 | if (kexec_crash_image) { |
| 394 | ret = -ENOENT; |
| 395 | goto unlock; |
| 396 | } |
| 397 | |
| 398 | low_size = crash_resource_size(res: &crashk_low_res); |
| 399 | old_size = crash_resource_size(res: &crashk_res) + low_size; |
| 400 | new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN); |
| 401 | if (new_size >= old_size) { |
| 402 | ret = (new_size == old_size) ? 0 : -EINVAL; |
| 403 | goto unlock; |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * (low_size > new_size) implies that low_size is greater than zero. |
| 408 | * This also means that if low_size is zero, the else branch is taken. |
| 409 | * |
| 410 | * If low_size is greater than 0, (low_size > new_size) indicates that |
| 411 | * crashk_low_res also needs to be shrunken. Otherwise, only crashk_res |
| 412 | * needs to be shrunken. |
| 413 | */ |
| 414 | if (low_size > new_size) { |
| 415 | ret = __crash_shrink_memory(old_res: &crashk_res, new_size: 0); |
| 416 | if (ret) |
| 417 | goto unlock; |
| 418 | |
| 419 | ret = __crash_shrink_memory(old_res: &crashk_low_res, new_size); |
| 420 | } else { |
| 421 | ret = __crash_shrink_memory(old_res: &crashk_res, new_size: new_size - low_size); |
| 422 | } |
| 423 | |
| 424 | /* Swap crashk_res and crashk_low_res if needed */ |
| 425 | if (!crashk_res.end && crashk_low_res.end) { |
| 426 | crashk_res.start = crashk_low_res.start; |
| 427 | crashk_res.end = crashk_low_res.end; |
| 428 | release_resource(new: &crashk_low_res); |
| 429 | crashk_low_res.start = 0; |
| 430 | crashk_low_res.end = 0; |
| 431 | insert_resource(parent: &iomem_resource, new: &crashk_res); |
| 432 | } |
| 433 | |
| 434 | unlock: |
| 435 | kexec_unlock(); |
| 436 | return ret; |
| 437 | } |
| 438 | |
| 439 | void crash_save_cpu(struct pt_regs *regs, int cpu) |
| 440 | { |
| 441 | struct elf_prstatus prstatus; |
| 442 | u32 *buf; |
| 443 | |
| 444 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) |
| 445 | return; |
| 446 | |
| 447 | /* Using ELF notes here is opportunistic. |
| 448 | * I need a well defined structure format |
| 449 | * for the data I pass, and I need tags |
| 450 | * on the data to indicate what information I have |
| 451 | * squirrelled away. ELF notes happen to provide |
| 452 | * all of that, so there is no need to invent something new. |
| 453 | */ |
| 454 | buf = (u32 *)per_cpu_ptr(crash_notes, cpu); |
| 455 | if (!buf) |
| 456 | return; |
| 457 | memset(s: &prstatus, c: 0, n: sizeof(prstatus)); |
| 458 | prstatus.common.pr_pid = current->pid; |
| 459 | elf_core_copy_regs(elfregs: &prstatus.pr_reg, regs); |
| 460 | buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS, |
| 461 | data: &prstatus, data_len: sizeof(prstatus)); |
| 462 | final_note(buf); |
| 463 | } |
| 464 | |
| 465 | |
| 466 | |
| 467 | static int __init crash_notes_memory_init(void) |
| 468 | { |
| 469 | /* Allocate memory for saving cpu registers. */ |
| 470 | size_t size, align; |
| 471 | |
| 472 | /* |
| 473 | * crash_notes could be allocated across 2 vmalloc pages when percpu |
| 474 | * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc |
| 475 | * pages are also on 2 continuous physical pages. In this case the |
| 476 | * 2nd part of crash_notes in 2nd page could be lost since only the |
| 477 | * starting address and size of crash_notes are exported through sysfs. |
| 478 | * Here round up the size of crash_notes to the nearest power of two |
| 479 | * and pass it to __alloc_percpu as align value. This can make sure |
| 480 | * crash_notes is allocated inside one physical page. |
| 481 | */ |
| 482 | size = sizeof(note_buf_t); |
| 483 | align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE); |
| 484 | |
| 485 | /* |
| 486 | * Break compile if size is bigger than PAGE_SIZE since crash_notes |
| 487 | * definitely will be in 2 pages with that. |
| 488 | */ |
| 489 | BUILD_BUG_ON(size > PAGE_SIZE); |
| 490 | |
| 491 | crash_notes = __alloc_percpu(size, align); |
| 492 | if (!crash_notes) { |
| 493 | pr_warn("Memory allocation for saving cpu register states failed\n" ); |
| 494 | return -ENOMEM; |
| 495 | } |
| 496 | return 0; |
| 497 | } |
| 498 | subsys_initcall(crash_notes_memory_init); |
| 499 | |
| 500 | #endif /*CONFIG_CRASH_DUMP*/ |
| 501 | |
| 502 | #ifdef CONFIG_CRASH_HOTPLUG |
| 503 | #undef pr_fmt |
| 504 | #define pr_fmt(fmt) "crash hp: " fmt |
| 505 | |
| 506 | /* |
| 507 | * Different than kexec/kdump loading/unloading/jumping/shrinking which |
| 508 | * usually rarely happen, there will be many crash hotplug events notified |
| 509 | * during one short period, e.g one memory board is hot added and memory |
| 510 | * regions are online. So mutex lock __crash_hotplug_lock is used to |
| 511 | * serialize the crash hotplug handling specifically. |
| 512 | */ |
| 513 | static DEFINE_MUTEX(__crash_hotplug_lock); |
| 514 | #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock) |
| 515 | #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock) |
| 516 | |
| 517 | /* |
| 518 | * This routine utilized when the crash_hotplug sysfs node is read. |
| 519 | * It reflects the kernel's ability/permission to update the kdump |
| 520 | * image directly. |
| 521 | */ |
| 522 | int crash_check_hotplug_support(void) |
| 523 | { |
| 524 | int rc = 0; |
| 525 | |
| 526 | crash_hotplug_lock(); |
| 527 | /* Obtain lock while reading crash information */ |
| 528 | if (!kexec_trylock()) { |
| 529 | if (!kexec_in_progress) |
| 530 | pr_info("kexec_trylock() failed, kdump image may be inaccurate\n" ); |
| 531 | crash_hotplug_unlock(); |
| 532 | return 0; |
| 533 | } |
| 534 | if (kexec_crash_image) { |
| 535 | rc = kexec_crash_image->hotplug_support; |
| 536 | } |
| 537 | /* Release lock now that update complete */ |
| 538 | kexec_unlock(); |
| 539 | crash_hotplug_unlock(); |
| 540 | |
| 541 | return rc; |
| 542 | } |
| 543 | |
| 544 | /* |
| 545 | * To accurately reflect hot un/plug changes of CPU and Memory resources |
| 546 | * (including onling and offlining of those resources), the relevant |
| 547 | * kexec segments must be updated with latest CPU and Memory resources. |
| 548 | * |
| 549 | * Architectures must ensure two things for all segments that need |
| 550 | * updating during hotplug events: |
| 551 | * |
| 552 | * 1. Segments must be large enough to accommodate a growing number of |
| 553 | * resources. |
| 554 | * 2. Exclude the segments from SHA verification. |
| 555 | * |
| 556 | * For example, on most architectures, the elfcorehdr (which is passed |
| 557 | * to the crash kernel via the elfcorehdr= parameter) must include the |
| 558 | * new list of CPUs and memory. To make changes to the elfcorehdr, it |
| 559 | * should be large enough to permit a growing number of CPU and Memory |
| 560 | * resources. One can estimate the elfcorehdr memory size based on |
| 561 | * NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES. The elfcorehdr is |
| 562 | * excluded from SHA verification by default if the architecture |
| 563 | * supports crash hotplug. |
| 564 | */ |
| 565 | static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, void *arg) |
| 566 | { |
| 567 | struct kimage *image; |
| 568 | |
| 569 | crash_hotplug_lock(); |
| 570 | /* Obtain lock while changing crash information */ |
| 571 | if (!kexec_trylock()) { |
| 572 | if (!kexec_in_progress) |
| 573 | pr_info("kexec_trylock() failed, kdump image may be inaccurate\n" ); |
| 574 | crash_hotplug_unlock(); |
| 575 | return; |
| 576 | } |
| 577 | |
| 578 | /* Check kdump is not loaded */ |
| 579 | if (!kexec_crash_image) |
| 580 | goto out; |
| 581 | |
| 582 | image = kexec_crash_image; |
| 583 | |
| 584 | /* Check that kexec segments update is permitted */ |
| 585 | if (!image->hotplug_support) |
| 586 | goto out; |
| 587 | |
| 588 | if (hp_action == KEXEC_CRASH_HP_ADD_CPU || |
| 589 | hp_action == KEXEC_CRASH_HP_REMOVE_CPU) |
| 590 | pr_debug("hp_action %u, cpu %u\n" , hp_action, cpu); |
| 591 | else |
| 592 | pr_debug("hp_action %u\n" , hp_action); |
| 593 | |
| 594 | /* |
| 595 | * The elfcorehdr_index is set to -1 when the struct kimage |
| 596 | * is allocated. Find the segment containing the elfcorehdr, |
| 597 | * if not already found. |
| 598 | */ |
| 599 | if (image->elfcorehdr_index < 0) { |
| 600 | unsigned long mem; |
| 601 | unsigned char *ptr; |
| 602 | unsigned int n; |
| 603 | |
| 604 | for (n = 0; n < image->nr_segments; n++) { |
| 605 | mem = image->segment[n].mem; |
| 606 | ptr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT)); |
| 607 | if (ptr) { |
| 608 | /* The segment containing elfcorehdr */ |
| 609 | if (memcmp(ptr, ELFMAG, SELFMAG) == 0) |
| 610 | image->elfcorehdr_index = (int)n; |
| 611 | kunmap_local(ptr); |
| 612 | } |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | if (image->elfcorehdr_index < 0) { |
| 617 | pr_err("unable to locate elfcorehdr segment" ); |
| 618 | goto out; |
| 619 | } |
| 620 | |
| 621 | /* Needed in order for the segments to be updated */ |
| 622 | arch_kexec_unprotect_crashkres(); |
| 623 | |
| 624 | /* Differentiate between normal load and hotplug update */ |
| 625 | image->hp_action = hp_action; |
| 626 | |
| 627 | /* Now invoke arch-specific update handler */ |
| 628 | arch_crash_handle_hotplug_event(image, arg); |
| 629 | |
| 630 | /* No longer handling a hotplug event */ |
| 631 | image->hp_action = KEXEC_CRASH_HP_NONE; |
| 632 | image->elfcorehdr_updated = true; |
| 633 | |
| 634 | /* Change back to read-only */ |
| 635 | arch_kexec_protect_crashkres(); |
| 636 | |
| 637 | /* Errors in the callback is not a reason to rollback state */ |
| 638 | out: |
| 639 | /* Release lock now that update complete */ |
| 640 | kexec_unlock(); |
| 641 | crash_hotplug_unlock(); |
| 642 | } |
| 643 | |
| 644 | static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *arg) |
| 645 | { |
| 646 | switch (val) { |
| 647 | case MEM_ONLINE: |
| 648 | crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_MEMORY, |
| 649 | KEXEC_CRASH_HP_INVALID_CPU, arg); |
| 650 | break; |
| 651 | |
| 652 | case MEM_OFFLINE: |
| 653 | crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_MEMORY, |
| 654 | KEXEC_CRASH_HP_INVALID_CPU, arg); |
| 655 | break; |
| 656 | } |
| 657 | return NOTIFY_OK; |
| 658 | } |
| 659 | |
| 660 | static struct notifier_block crash_memhp_nb = { |
| 661 | .notifier_call = crash_memhp_notifier, |
| 662 | .priority = 0 |
| 663 | }; |
| 664 | |
| 665 | static int crash_cpuhp_online(unsigned int cpu) |
| 666 | { |
| 667 | crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu, NULL); |
| 668 | return 0; |
| 669 | } |
| 670 | |
| 671 | static int crash_cpuhp_offline(unsigned int cpu) |
| 672 | { |
| 673 | crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu, NULL); |
| 674 | return 0; |
| 675 | } |
| 676 | |
| 677 | static int __init crash_hotplug_init(void) |
| 678 | { |
| 679 | int result = 0; |
| 680 | |
| 681 | if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) |
| 682 | register_memory_notifier(nb: &crash_memhp_nb); |
| 683 | |
| 684 | if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { |
| 685 | result = cpuhp_setup_state_nocalls(state: CPUHP_BP_PREPARE_DYN, |
| 686 | name: "crash/cpuhp" , startup: crash_cpuhp_online, teardown: crash_cpuhp_offline); |
| 687 | } |
| 688 | |
| 689 | return result; |
| 690 | } |
| 691 | |
| 692 | subsys_initcall(crash_hotplug_init); |
| 693 | #endif |
| 694 | |