| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|---|
| 3 |  | 
|---|
| 4 | #include <linux/kernel.h> | 
|---|
| 5 | #include <linux/export.h> | 
|---|
| 6 | #include <linux/init.h> | 
|---|
| 7 | #include <linux/memblock.h> | 
|---|
| 8 | #include <linux/percpu.h> | 
|---|
| 9 | #include <linux/kexec.h> | 
|---|
| 10 | #include <linux/crash_dump.h> | 
|---|
| 11 | #include <linux/smp.h> | 
|---|
| 12 | #include <linux/topology.h> | 
|---|
| 13 | #include <linux/pfn.h> | 
|---|
| 14 | #include <linux/stackprotector.h> | 
|---|
| 15 | #include <asm/sections.h> | 
|---|
| 16 | #include <asm/processor.h> | 
|---|
| 17 | #include <asm/desc.h> | 
|---|
| 18 | #include <asm/setup.h> | 
|---|
| 19 | #include <asm/mpspec.h> | 
|---|
| 20 | #include <asm/apicdef.h> | 
|---|
| 21 | #include <asm/highmem.h> | 
|---|
| 22 | #include <asm/proto.h> | 
|---|
| 23 | #include <asm/cpumask.h> | 
|---|
| 24 | #include <asm/cpu.h> | 
|---|
| 25 |  | 
|---|
| 26 | DEFINE_PER_CPU_CACHE_HOT(int, cpu_number); | 
|---|
| 27 | EXPORT_PER_CPU_SYMBOL(cpu_number); | 
|---|
| 28 |  | 
|---|
| 29 | DEFINE_PER_CPU_CACHE_HOT(unsigned long, this_cpu_off); | 
|---|
| 30 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | 
|---|
| 31 |  | 
|---|
| 32 | unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init; | 
|---|
| 33 | EXPORT_SYMBOL(__per_cpu_offset); | 
|---|
| 34 |  | 
|---|
| 35 | /* | 
|---|
| 36 | * On x86_64 symbols referenced from code should be reachable using | 
|---|
| 37 | * 32bit relocations.  Reserve space for static percpu variables in | 
|---|
| 38 | * modules so that they are always served from the first chunk which | 
|---|
| 39 | * is located at the percpu segment base.  On x86_32, anything can | 
|---|
| 40 | * address anywhere.  No need to reserve space in the first chunk. | 
|---|
| 41 | */ | 
|---|
| 42 | #ifdef CONFIG_X86_64 | 
|---|
| 43 | #define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE | 
|---|
| 44 | #else | 
|---|
| 45 | #define PERCPU_FIRST_CHUNK_RESERVE	0 | 
|---|
| 46 | #endif | 
|---|
| 47 |  | 
|---|
| 48 | #ifdef CONFIG_X86_32 | 
|---|
| 49 | /** | 
|---|
| 50 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | 
|---|
| 51 | * | 
|---|
| 52 | * If NUMA is not configured or there is only one NUMA node available, | 
|---|
| 53 | * there is no reason to consider NUMA.  This function determines | 
|---|
| 54 | * whether percpu allocation should consider NUMA or not. | 
|---|
| 55 | * | 
|---|
| 56 | * RETURNS: | 
|---|
| 57 | * true if NUMA should be considered; otherwise, false. | 
|---|
| 58 | */ | 
|---|
| 59 | static bool __init pcpu_need_numa(void) | 
|---|
| 60 | { | 
|---|
| 61 | #ifdef CONFIG_NUMA | 
|---|
| 62 | pg_data_t *last = NULL; | 
|---|
| 63 | unsigned int cpu; | 
|---|
| 64 |  | 
|---|
| 65 | for_each_possible_cpu(cpu) { | 
|---|
| 66 | int node = early_cpu_to_node(cpu); | 
|---|
| 67 |  | 
|---|
| 68 | if (node_online(node) && NODE_DATA(node) && | 
|---|
| 69 | last && last != NODE_DATA(node)) | 
|---|
| 70 | return true; | 
|---|
| 71 |  | 
|---|
| 72 | last = NODE_DATA(node); | 
|---|
| 73 | } | 
|---|
| 74 | #endif | 
|---|
| 75 | return false; | 
|---|
| 76 | } | 
|---|
| 77 | #endif | 
|---|
| 78 |  | 
|---|
| 79 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) | 
|---|
| 80 | { | 
|---|
| 81 | #ifdef CONFIG_NUMA | 
|---|
| 82 | if (early_cpu_to_node(cpu: from) == early_cpu_to_node(cpu: to)) | 
|---|
| 83 | return LOCAL_DISTANCE; | 
|---|
| 84 | else | 
|---|
| 85 | return REMOTE_DISTANCE; | 
|---|
| 86 | #else | 
|---|
| 87 | return LOCAL_DISTANCE; | 
|---|
| 88 | #endif | 
|---|
| 89 | } | 
|---|
| 90 |  | 
|---|
| 91 | static int __init pcpu_cpu_to_node(int cpu) | 
|---|
| 92 | { | 
|---|
| 93 | return early_cpu_to_node(cpu); | 
|---|
| 94 | } | 
|---|
| 95 |  | 
|---|
| 96 | void __init pcpu_populate_pte(unsigned long addr) | 
|---|
| 97 | { | 
|---|
| 98 | populate_extra_pte(vaddr: addr); | 
|---|
| 99 | } | 
|---|
| 100 |  | 
|---|
| 101 | static inline void setup_percpu_segment(int cpu) | 
|---|
| 102 | { | 
|---|
| 103 | #ifdef CONFIG_X86_32 | 
|---|
| 104 | struct desc_struct d = GDT_ENTRY_INIT(DESC_DATA32, | 
|---|
| 105 | per_cpu_offset(cpu), 0xFFFFF); | 
|---|
| 106 |  | 
|---|
| 107 | write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S); | 
|---|
| 108 | #endif | 
|---|
| 109 | } | 
|---|
| 110 |  | 
|---|
| 111 | void __init setup_per_cpu_areas(void) | 
|---|
| 112 | { | 
|---|
| 113 | unsigned int cpu; | 
|---|
| 114 | unsigned long delta; | 
|---|
| 115 | int rc; | 
|---|
| 116 |  | 
|---|
| 117 | pr_info( "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n", | 
|---|
| 118 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 
|---|
| 119 |  | 
|---|
| 120 | /* | 
|---|
| 121 | * Allocate percpu area.  Embedding allocator is our favorite; | 
|---|
| 122 | * however, on NUMA configurations, it can result in very | 
|---|
| 123 | * sparse unit mapping and vmalloc area isn't spacious enough | 
|---|
| 124 | * on 32bit.  Use page in that case. | 
|---|
| 125 | */ | 
|---|
| 126 | #ifdef CONFIG_X86_32 | 
|---|
| 127 | if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa()) | 
|---|
| 128 | pcpu_chosen_fc = PCPU_FC_PAGE; | 
|---|
| 129 | #endif | 
|---|
| 130 | rc = -EINVAL; | 
|---|
| 131 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { | 
|---|
| 132 | const size_t dyn_size = PERCPU_MODULE_RESERVE + | 
|---|
| 133 | PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; | 
|---|
| 134 | size_t atom_size; | 
|---|
| 135 |  | 
|---|
| 136 | /* | 
|---|
| 137 | * On 64bit, use PMD_SIZE for atom_size so that embedded | 
|---|
| 138 | * percpu areas are aligned to PMD.  This, in the future, | 
|---|
| 139 | * can also allow using PMD mappings in vmalloc area.  Use | 
|---|
| 140 | * PAGE_SIZE on 32bit as vmalloc space is highly contended | 
|---|
| 141 | * and large vmalloc area allocs can easily fail. | 
|---|
| 142 | */ | 
|---|
| 143 | #ifdef CONFIG_X86_64 | 
|---|
| 144 | atom_size = PMD_SIZE; | 
|---|
| 145 | #else | 
|---|
| 146 | atom_size = PAGE_SIZE; | 
|---|
| 147 | #endif | 
|---|
| 148 | rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, | 
|---|
| 149 | dyn_size, atom_size, | 
|---|
| 150 | cpu_distance_fn: pcpu_cpu_distance, | 
|---|
| 151 | cpu_to_nd_fn: pcpu_cpu_to_node); | 
|---|
| 152 | if (rc < 0) | 
|---|
| 153 | pr_warn( "%s allocator failed (%d), falling back to page size\n", | 
|---|
| 154 | pcpu_fc_names[pcpu_chosen_fc], rc); | 
|---|
| 155 | } | 
|---|
| 156 | if (rc < 0) | 
|---|
| 157 | rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, | 
|---|
| 158 | cpu_to_nd_fn: pcpu_cpu_to_node); | 
|---|
| 159 | if (rc < 0) | 
|---|
| 160 | panic(fmt: "cannot initialize percpu area (err=%d)", rc); | 
|---|
| 161 |  | 
|---|
| 162 | /* alrighty, percpu areas up and running */ | 
|---|
| 163 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | 
|---|
| 164 | for_each_possible_cpu(cpu) { | 
|---|
| 165 | per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; | 
|---|
| 166 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); | 
|---|
| 167 | per_cpu(cpu_number, cpu) = cpu; | 
|---|
| 168 | setup_percpu_segment(cpu); | 
|---|
| 169 | /* | 
|---|
| 170 | * Copy data used in early init routines from the | 
|---|
| 171 | * initial arrays to the per cpu data areas.  These | 
|---|
| 172 | * arrays then become expendable and the *_early_ptr's | 
|---|
| 173 | * are zeroed indicating that the static arrays are | 
|---|
| 174 | * gone. | 
|---|
| 175 | */ | 
|---|
| 176 | #ifdef CONFIG_X86_LOCAL_APIC | 
|---|
| 177 | per_cpu(x86_cpu_to_apicid, cpu) = | 
|---|
| 178 | early_per_cpu_map(x86_cpu_to_apicid, cpu); | 
|---|
| 179 | per_cpu(x86_cpu_to_acpiid, cpu) = | 
|---|
| 180 | early_per_cpu_map(x86_cpu_to_acpiid, cpu); | 
|---|
| 181 | #endif | 
|---|
| 182 | #ifdef CONFIG_NUMA | 
|---|
| 183 | per_cpu(x86_cpu_to_node_map, cpu) = | 
|---|
| 184 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | 
|---|
| 185 | /* | 
|---|
| 186 | * Ensure that the boot cpu numa_node is correct when the boot | 
|---|
| 187 | * cpu is on a node that doesn't have memory installed. | 
|---|
| 188 | * Also cpu_up() will call cpu_to_node() for APs when | 
|---|
| 189 | * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set | 
|---|
| 190 | * up later with c_init aka intel_init/amd_init. | 
|---|
| 191 | * So set them all (boot cpu and all APs). | 
|---|
| 192 | */ | 
|---|
| 193 | set_cpu_numa_node(cpu, node: early_cpu_to_node(cpu)); | 
|---|
| 194 | #endif | 
|---|
| 195 | /* | 
|---|
| 196 | * Up to this point, the boot CPU has been using .init.data | 
|---|
| 197 | * area.  Reload any changed state for the boot CPU. | 
|---|
| 198 | */ | 
|---|
| 199 | if (!cpu) | 
|---|
| 200 | switch_gdt_and_percpu_base(cpu); | 
|---|
| 201 | } | 
|---|
| 202 |  | 
|---|
| 203 | /* indicate the early static arrays will soon be gone */ | 
|---|
| 204 | #ifdef CONFIG_X86_LOCAL_APIC | 
|---|
| 205 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; | 
|---|
| 206 | early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL; | 
|---|
| 207 | #endif | 
|---|
| 208 | #ifdef CONFIG_NUMA | 
|---|
| 209 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | 
|---|
| 210 | #endif | 
|---|
| 211 |  | 
|---|
| 212 | /* Setup node to cpumask map */ | 
|---|
| 213 | setup_node_to_cpumask_map(); | 
|---|
| 214 |  | 
|---|
| 215 | /* Setup cpu initialized, callin, callout masks */ | 
|---|
| 216 | setup_cpu_local_masks(); | 
|---|
| 217 |  | 
|---|
| 218 | /* | 
|---|
| 219 | * Sync back kernel address range again.  We already did this in | 
|---|
| 220 | * setup_arch(), but percpu data also needs to be available in | 
|---|
| 221 | * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to | 
|---|
| 222 | * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available | 
|---|
| 223 | * there too. | 
|---|
| 224 | * | 
|---|
| 225 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace | 
|---|
| 226 | * this call? | 
|---|
| 227 | */ | 
|---|
| 228 | sync_initial_page_table(); | 
|---|
| 229 | } | 
|---|
| 230 |  | 
|---|