| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | #include <linux/export.h> | 
|---|
| 3 | #include <linux/bitops.h> | 
|---|
| 4 | #include <linux/elf.h> | 
|---|
| 5 | #include <linux/mm.h> | 
|---|
| 6 |  | 
|---|
| 7 | #include <linux/io.h> | 
|---|
| 8 | #include <linux/sched.h> | 
|---|
| 9 | #include <linux/sched/clock.h> | 
|---|
| 10 | #include <linux/random.h> | 
|---|
| 11 | #include <linux/topology.h> | 
|---|
| 12 | #include <linux/platform_data/x86/amd-fch.h> | 
|---|
| 13 | #include <asm/processor.h> | 
|---|
| 14 | #include <asm/apic.h> | 
|---|
| 15 | #include <asm/cacheinfo.h> | 
|---|
| 16 | #include <asm/cpu.h> | 
|---|
| 17 | #include <asm/cpu_device_id.h> | 
|---|
| 18 | #include <asm/spec-ctrl.h> | 
|---|
| 19 | #include <asm/smp.h> | 
|---|
| 20 | #include <asm/numa.h> | 
|---|
| 21 | #include <asm/pci-direct.h> | 
|---|
| 22 | #include <asm/delay.h> | 
|---|
| 23 | #include <asm/debugreg.h> | 
|---|
| 24 | #include <asm/resctrl.h> | 
|---|
| 25 | #include <asm/msr.h> | 
|---|
| 26 | #include <asm/sev.h> | 
|---|
| 27 |  | 
|---|
| 28 | #ifdef CONFIG_X86_64 | 
|---|
| 29 | # include <asm/mmconfig.h> | 
|---|
| 30 | #endif | 
|---|
| 31 |  | 
|---|
| 32 | #include "cpu.h" | 
|---|
| 33 |  | 
|---|
| 34 | u16 invlpgb_count_max __ro_after_init = 1; | 
|---|
| 35 |  | 
|---|
| 36 | static inline int rdmsrq_amd_safe(unsigned msr, u64 *p) | 
|---|
| 37 | { | 
|---|
| 38 | u32 gprs[8] = { 0 }; | 
|---|
| 39 | int err; | 
|---|
| 40 |  | 
|---|
| 41 | WARN_ONCE((boot_cpu_data.x86 != 0xf), | 
|---|
| 42 | "%s should only be used on K8!\n", __func__); | 
|---|
| 43 |  | 
|---|
| 44 | gprs[1] = msr; | 
|---|
| 45 | gprs[7] = 0x9c5a203a; | 
|---|
| 46 |  | 
|---|
| 47 | err = rdmsr_safe_regs(regs: gprs); | 
|---|
| 48 |  | 
|---|
| 49 | *p = gprs[0] | ((u64)gprs[2] << 32); | 
|---|
| 50 |  | 
|---|
| 51 | return err; | 
|---|
| 52 | } | 
|---|
| 53 |  | 
|---|
| 54 | static inline int wrmsrq_amd_safe(unsigned msr, u64 val) | 
|---|
| 55 | { | 
|---|
| 56 | u32 gprs[8] = { 0 }; | 
|---|
| 57 |  | 
|---|
| 58 | WARN_ONCE((boot_cpu_data.x86 != 0xf), | 
|---|
| 59 | "%s should only be used on K8!\n", __func__); | 
|---|
| 60 |  | 
|---|
| 61 | gprs[0] = (u32)val; | 
|---|
| 62 | gprs[1] = msr; | 
|---|
| 63 | gprs[2] = val >> 32; | 
|---|
| 64 | gprs[7] = 0x9c5a203a; | 
|---|
| 65 |  | 
|---|
| 66 | return wrmsr_safe_regs(regs: gprs); | 
|---|
| 67 | } | 
|---|
| 68 |  | 
|---|
| 69 | /* | 
|---|
| 70 | *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause | 
|---|
| 71 | *	misexecution of code under Linux. Owners of such processors should | 
|---|
| 72 | *	contact AMD for precise details and a CPU swap. | 
|---|
| 73 | * | 
|---|
| 74 | *	See	http://www.multimania.com/poulot/k6bug.html | 
|---|
| 75 | *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" | 
|---|
| 76 | *		(Publication # 21266  Issue Date: August 1998) | 
|---|
| 77 | * | 
|---|
| 78 | *	The following test is erm.. interesting. AMD neglected to up | 
|---|
| 79 | *	the chip setting when fixing the bug but they also tweaked some | 
|---|
| 80 | *	performance at the same time.. | 
|---|
| 81 | */ | 
|---|
| 82 |  | 
|---|
| 83 | #ifdef CONFIG_X86_32 | 
|---|
| 84 | extern __visible void vide(void); | 
|---|
| 85 | __asm__( ".text\n" | 
|---|
| 86 | ".globl vide\n" | 
|---|
| 87 | ".type vide, @function\n" | 
|---|
| 88 | ".align 4\n" | 
|---|
| 89 | "vide: ret\n"); | 
|---|
| 90 | #endif | 
|---|
| 91 |  | 
|---|
| 92 | static void init_amd_k5(struct cpuinfo_x86 *c) | 
|---|
| 93 | { | 
|---|
| 94 | #ifdef CONFIG_X86_32 | 
|---|
| 95 | /* | 
|---|
| 96 | * General Systems BIOSen alias the cpu frequency registers | 
|---|
| 97 | * of the Elan at 0x000df000. Unfortunately, one of the Linux | 
|---|
| 98 | * drivers subsequently pokes it, and changes the CPU speed. | 
|---|
| 99 | * Workaround : Remove the unneeded alias. | 
|---|
| 100 | */ | 
|---|
| 101 | #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */ | 
|---|
| 102 | #define CBAR_ENB	(0x80000000) | 
|---|
| 103 | #define CBAR_KEY	(0X000000CB) | 
|---|
| 104 | if (c->x86_model == 9 || c->x86_model == 10) { | 
|---|
| 105 | if (inl(CBAR) & CBAR_ENB) | 
|---|
| 106 | outl(0 | CBAR_KEY, CBAR); | 
|---|
| 107 | } | 
|---|
| 108 | #endif | 
|---|
| 109 | } | 
|---|
| 110 |  | 
|---|
| 111 | static void init_amd_k6(struct cpuinfo_x86 *c) | 
|---|
| 112 | { | 
|---|
| 113 | #ifdef CONFIG_X86_32 | 
|---|
| 114 | u32 l, h; | 
|---|
| 115 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); | 
|---|
| 116 |  | 
|---|
| 117 | if (c->x86_model < 6) { | 
|---|
| 118 | /* Based on AMD doc 20734R - June 2000 */ | 
|---|
| 119 | if (c->x86_model == 0) { | 
|---|
| 120 | clear_cpu_cap(c, X86_FEATURE_APIC); | 
|---|
| 121 | set_cpu_cap(c, X86_FEATURE_PGE); | 
|---|
| 122 | } | 
|---|
| 123 | return; | 
|---|
| 124 | } | 
|---|
| 125 |  | 
|---|
| 126 | if (c->x86_model == 6 && c->x86_stepping == 1) { | 
|---|
| 127 | const int K6_BUG_LOOP = 1000000; | 
|---|
| 128 | int n; | 
|---|
| 129 | void (*f_vide)(void); | 
|---|
| 130 | u64 d, d2; | 
|---|
| 131 |  | 
|---|
| 132 | pr_info( "AMD K6 stepping B detected - "); | 
|---|
| 133 |  | 
|---|
| 134 | /* | 
|---|
| 135 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | 
|---|
| 136 | * calls at the same time. | 
|---|
| 137 | */ | 
|---|
| 138 |  | 
|---|
| 139 | n = K6_BUG_LOOP; | 
|---|
| 140 | f_vide = vide; | 
|---|
| 141 | OPTIMIZER_HIDE_VAR(f_vide); | 
|---|
| 142 | d = rdtsc(); | 
|---|
| 143 | while (n--) | 
|---|
| 144 | f_vide(); | 
|---|
| 145 | d2 = rdtsc(); | 
|---|
| 146 | d = d2-d; | 
|---|
| 147 |  | 
|---|
| 148 | if (d > 20*K6_BUG_LOOP) | 
|---|
| 149 | pr_cont( "system stability may be impaired when more than 32 MB are used.\n"); | 
|---|
| 150 | else | 
|---|
| 151 | pr_cont( "probably OK (after B9730xxxx).\n"); | 
|---|
| 152 | } | 
|---|
| 153 |  | 
|---|
| 154 | /* K6 with old style WHCR */ | 
|---|
| 155 | if (c->x86_model < 8 || | 
|---|
| 156 | (c->x86_model == 8 && c->x86_stepping < 8)) { | 
|---|
| 157 | /* We can only write allocate on the low 508Mb */ | 
|---|
| 158 | if (mbytes > 508) | 
|---|
| 159 | mbytes = 508; | 
|---|
| 160 |  | 
|---|
| 161 | rdmsr(MSR_K6_WHCR, l, h); | 
|---|
| 162 | if ((l&0x0000FFFF) == 0) { | 
|---|
| 163 | unsigned long flags; | 
|---|
| 164 | l = (1<<0)|((mbytes/4)<<1); | 
|---|
| 165 | local_irq_save(flags); | 
|---|
| 166 | wbinvd(); | 
|---|
| 167 | wrmsr(MSR_K6_WHCR, l, h); | 
|---|
| 168 | local_irq_restore(flags); | 
|---|
| 169 | pr_info( "Enabling old style K6 write allocation for %d Mb\n", | 
|---|
| 170 | mbytes); | 
|---|
| 171 | } | 
|---|
| 172 | return; | 
|---|
| 173 | } | 
|---|
| 174 |  | 
|---|
| 175 | if ((c->x86_model == 8 && c->x86_stepping > 7) || | 
|---|
| 176 | c->x86_model == 9 || c->x86_model == 13) { | 
|---|
| 177 | /* The more serious chips .. */ | 
|---|
| 178 |  | 
|---|
| 179 | if (mbytes > 4092) | 
|---|
| 180 | mbytes = 4092; | 
|---|
| 181 |  | 
|---|
| 182 | rdmsr(MSR_K6_WHCR, l, h); | 
|---|
| 183 | if ((l&0xFFFF0000) == 0) { | 
|---|
| 184 | unsigned long flags; | 
|---|
| 185 | l = ((mbytes>>2)<<22)|(1<<16); | 
|---|
| 186 | local_irq_save(flags); | 
|---|
| 187 | wbinvd(); | 
|---|
| 188 | wrmsr(MSR_K6_WHCR, l, h); | 
|---|
| 189 | local_irq_restore(flags); | 
|---|
| 190 | pr_info( "Enabling new style K6 write allocation for %d Mb\n", | 
|---|
| 191 | mbytes); | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | return; | 
|---|
| 195 | } | 
|---|
| 196 |  | 
|---|
| 197 | if (c->x86_model == 10) { | 
|---|
| 198 | /* AMD Geode LX is model 10 */ | 
|---|
| 199 | /* placeholder for any needed mods */ | 
|---|
| 200 | return; | 
|---|
| 201 | } | 
|---|
| 202 | #endif | 
|---|
| 203 | } | 
|---|
| 204 |  | 
|---|
| 205 | static void init_amd_k7(struct cpuinfo_x86 *c) | 
|---|
| 206 | { | 
|---|
| 207 | #ifdef CONFIG_X86_32 | 
|---|
| 208 | u32 l, h; | 
|---|
| 209 |  | 
|---|
| 210 | /* | 
|---|
| 211 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | 
|---|
| 212 | * to enable SSE on Palomino/Morgan/Barton CPU's. | 
|---|
| 213 | * If the BIOS didn't enable it already, enable it here. | 
|---|
| 214 | */ | 
|---|
| 215 | if (c->x86_model >= 6 && c->x86_model <= 10) { | 
|---|
| 216 | if (!cpu_has(c, X86_FEATURE_XMM)) { | 
|---|
| 217 | pr_info( "Enabling disabled K7/SSE Support.\n"); | 
|---|
| 218 | msr_clear_bit(MSR_K7_HWCR, 15); | 
|---|
| 219 | set_cpu_cap(c, X86_FEATURE_XMM); | 
|---|
| 220 | } | 
|---|
| 221 | } | 
|---|
| 222 |  | 
|---|
| 223 | /* | 
|---|
| 224 | * It's been determined by AMD that Athlons since model 8 stepping 1 | 
|---|
| 225 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | 
|---|
| 226 | * As per AMD technical note 27212 0.2 | 
|---|
| 227 | */ | 
|---|
| 228 | if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { | 
|---|
| 229 | rdmsr(MSR_K7_CLK_CTL, l, h); | 
|---|
| 230 | if ((l & 0xfff00000) != 0x20000000) { | 
|---|
| 231 | pr_info( "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", | 
|---|
| 232 | l, ((l & 0x000fffff)|0x20000000)); | 
|---|
| 233 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | 
|---|
| 234 | } | 
|---|
| 235 | } | 
|---|
| 236 |  | 
|---|
| 237 | /* calling is from identify_secondary_cpu() ? */ | 
|---|
| 238 | if (!c->cpu_index) | 
|---|
| 239 | return; | 
|---|
| 240 |  | 
|---|
| 241 | /* | 
|---|
| 242 | * Certain Athlons might work (for various values of 'work') in SMP | 
|---|
| 243 | * but they are not certified as MP capable. | 
|---|
| 244 | */ | 
|---|
| 245 | /* Athlon 660/661 is valid. */ | 
|---|
| 246 | if ((c->x86_model == 6) && ((c->x86_stepping == 0) || | 
|---|
| 247 | (c->x86_stepping == 1))) | 
|---|
| 248 | return; | 
|---|
| 249 |  | 
|---|
| 250 | /* Duron 670 is valid */ | 
|---|
| 251 | if ((c->x86_model == 7) && (c->x86_stepping == 0)) | 
|---|
| 252 | return; | 
|---|
| 253 |  | 
|---|
| 254 | /* | 
|---|
| 255 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | 
|---|
| 256 | * bit. It's worth noting that the A5 stepping (662) of some | 
|---|
| 257 | * Athlon XP's have the MP bit set. | 
|---|
| 258 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | 
|---|
| 259 | * more. | 
|---|
| 260 | */ | 
|---|
| 261 | if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || | 
|---|
| 262 | ((c->x86_model == 7) && (c->x86_stepping >= 1)) || | 
|---|
| 263 | (c->x86_model > 7)) | 
|---|
| 264 | if (cpu_has(c, X86_FEATURE_MP)) | 
|---|
| 265 | return; | 
|---|
| 266 |  | 
|---|
| 267 | /* If we get here, not a certified SMP capable AMD system. */ | 
|---|
| 268 |  | 
|---|
| 269 | /* | 
|---|
| 270 | * Don't taint if we are running SMP kernel on a single non-MP | 
|---|
| 271 | * approved Athlon | 
|---|
| 272 | */ | 
|---|
| 273 | WARN_ONCE(1, "WARNING: This combination of AMD" | 
|---|
| 274 | " processors is not suitable for SMP.\n"); | 
|---|
| 275 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); | 
|---|
| 276 | #endif | 
|---|
| 277 | } | 
|---|
| 278 |  | 
|---|
| 279 | #ifdef CONFIG_NUMA | 
|---|
| 280 | /* | 
|---|
| 281 | * To workaround broken NUMA config.  Read the comment in | 
|---|
| 282 | * srat_detect_node(). | 
|---|
| 283 | */ | 
|---|
| 284 | static int nearby_node(int apicid) | 
|---|
| 285 | { | 
|---|
| 286 | int i, node; | 
|---|
| 287 |  | 
|---|
| 288 | for (i = apicid - 1; i >= 0; i--) { | 
|---|
| 289 | node = __apicid_to_node[i]; | 
|---|
| 290 | if (node != NUMA_NO_NODE && node_online(node)) | 
|---|
| 291 | return node; | 
|---|
| 292 | } | 
|---|
| 293 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | 
|---|
| 294 | node = __apicid_to_node[i]; | 
|---|
| 295 | if (node != NUMA_NO_NODE && node_online(node)) | 
|---|
| 296 | return node; | 
|---|
| 297 | } | 
|---|
| 298 | return first_node(node_online_map); /* Shouldn't happen */ | 
|---|
| 299 | } | 
|---|
| 300 | #endif | 
|---|
| 301 |  | 
|---|
| 302 | static void srat_detect_node(struct cpuinfo_x86 *c) | 
|---|
| 303 | { | 
|---|
| 304 | #ifdef CONFIG_NUMA | 
|---|
| 305 | int cpu = smp_processor_id(); | 
|---|
| 306 | int node; | 
|---|
| 307 | unsigned apicid = c->topo.apicid; | 
|---|
| 308 |  | 
|---|
| 309 | node = numa_cpu_node(cpu); | 
|---|
| 310 | if (node == NUMA_NO_NODE) | 
|---|
| 311 | node = per_cpu_llc_id(cpu); | 
|---|
| 312 |  | 
|---|
| 313 | /* | 
|---|
| 314 | * On multi-fabric platform (e.g. Numascale NumaChip) a | 
|---|
| 315 | * platform-specific handler needs to be called to fixup some | 
|---|
| 316 | * IDs of the CPU. | 
|---|
| 317 | */ | 
|---|
| 318 | if (x86_cpuinit.fixup_cpu_id) | 
|---|
| 319 | x86_cpuinit.fixup_cpu_id(c, node); | 
|---|
| 320 |  | 
|---|
| 321 | if (!node_online(node)) { | 
|---|
| 322 | /* | 
|---|
| 323 | * Two possibilities here: | 
|---|
| 324 | * | 
|---|
| 325 | * - The CPU is missing memory and no node was created.  In | 
|---|
| 326 | *   that case try picking one from a nearby CPU. | 
|---|
| 327 | * | 
|---|
| 328 | * - The APIC IDs differ from the HyperTransport node IDs | 
|---|
| 329 | *   which the K8 northbridge parsing fills in.  Assume | 
|---|
| 330 | *   they are all increased by a constant offset, but in | 
|---|
| 331 | *   the same order as the HT nodeids.  If that doesn't | 
|---|
| 332 | *   result in a usable node fall back to the path for the | 
|---|
| 333 | *   previous case. | 
|---|
| 334 | * | 
|---|
| 335 | * This workaround operates directly on the mapping between | 
|---|
| 336 | * APIC ID and NUMA node, assuming certain relationship | 
|---|
| 337 | * between APIC ID, HT node ID and NUMA topology.  As going | 
|---|
| 338 | * through CPU mapping may alter the outcome, directly | 
|---|
| 339 | * access __apicid_to_node[]. | 
|---|
| 340 | */ | 
|---|
| 341 | int ht_nodeid = c->topo.initial_apicid; | 
|---|
| 342 |  | 
|---|
| 343 | if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | 
|---|
| 344 | node = __apicid_to_node[ht_nodeid]; | 
|---|
| 345 | /* Pick a nearby node */ | 
|---|
| 346 | if (!node_online(node)) | 
|---|
| 347 | node = nearby_node(apicid); | 
|---|
| 348 | } | 
|---|
| 349 | numa_set_node(cpu, node); | 
|---|
| 350 | #endif | 
|---|
| 351 | } | 
|---|
| 352 |  | 
|---|
| 353 | static void bsp_determine_snp(struct cpuinfo_x86 *c) | 
|---|
| 354 | { | 
|---|
| 355 | #ifdef CONFIG_ARCH_HAS_CC_PLATFORM | 
|---|
| 356 | cc_vendor = CC_VENDOR_AMD; | 
|---|
| 357 |  | 
|---|
| 358 | if (cpu_has(c, X86_FEATURE_SEV_SNP)) { | 
|---|
| 359 | /* | 
|---|
| 360 | * RMP table entry format is not architectural and is defined by the | 
|---|
| 361 | * per-processor PPR. Restrict SNP support on the known CPU models | 
|---|
| 362 | * for which the RMP table entry format is currently defined or for | 
|---|
| 363 | * processors which support the architecturally defined RMPREAD | 
|---|
| 364 | * instruction. | 
|---|
| 365 | */ | 
|---|
| 366 | if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && | 
|---|
| 367 | (cpu_feature_enabled(X86_FEATURE_ZEN3) || | 
|---|
| 368 | cpu_feature_enabled(X86_FEATURE_ZEN4) || | 
|---|
| 369 | cpu_feature_enabled(X86_FEATURE_RMPREAD)) && | 
|---|
| 370 | snp_probe_rmptable_info()) { | 
|---|
| 371 | cc_platform_set(CC_ATTR_HOST_SEV_SNP); | 
|---|
| 372 | } else { | 
|---|
| 373 | setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); | 
|---|
| 374 | cc_platform_clear(CC_ATTR_HOST_SEV_SNP); | 
|---|
| 375 | } | 
|---|
| 376 | } | 
|---|
| 377 | #endif | 
|---|
| 378 | } | 
|---|
| 379 |  | 
|---|
| 380 | #define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \ | 
|---|
| 381 | X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \ | 
|---|
| 382 | step, step, ucode) | 
|---|
| 383 |  | 
|---|
| 384 | static const struct x86_cpu_id amd_tsa_microcode[] = { | 
|---|
| 385 | ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7), | 
|---|
| 386 | ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b), | 
|---|
| 387 | ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d), | 
|---|
| 388 | ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c), | 
|---|
| 389 | ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c), | 
|---|
| 390 | ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109), | 
|---|
| 391 | ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e), | 
|---|
| 392 | ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211), | 
|---|
| 393 | ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108), | 
|---|
| 394 | ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012), | 
|---|
| 395 | ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a), | 
|---|
| 396 | ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108), | 
|---|
| 397 | ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208), | 
|---|
| 398 | ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008), | 
|---|
| 399 | ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008), | 
|---|
| 400 | ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216), | 
|---|
| 401 | {}, | 
|---|
| 402 | }; | 
|---|
| 403 |  | 
|---|
| 404 | static void tsa_init(struct cpuinfo_x86 *c) | 
|---|
| 405 | { | 
|---|
| 406 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) | 
|---|
| 407 | return; | 
|---|
| 408 |  | 
|---|
| 409 | if (cpu_has(c, X86_FEATURE_ZEN3) || | 
|---|
| 410 | cpu_has(c, X86_FEATURE_ZEN4)) { | 
|---|
| 411 | if (x86_match_min_microcode_rev(table: amd_tsa_microcode)) | 
|---|
| 412 | setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR); | 
|---|
| 413 | else | 
|---|
| 414 | pr_debug( "%s: current revision: 0x%x\n", __func__, c->microcode); | 
|---|
| 415 | } else { | 
|---|
| 416 | setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO); | 
|---|
| 417 | setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO); | 
|---|
| 418 | } | 
|---|
| 419 | } | 
|---|
| 420 |  | 
|---|
| 421 | static void bsp_init_amd(struct cpuinfo_x86 *c) | 
|---|
| 422 | { | 
|---|
| 423 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | 
|---|
| 424 |  | 
|---|
| 425 | if (c->x86 > 0x10 || | 
|---|
| 426 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | 
|---|
| 427 | u64 val; | 
|---|
| 428 |  | 
|---|
| 429 | rdmsrq(MSR_K7_HWCR, val); | 
|---|
| 430 | if (!(val & BIT(24))) | 
|---|
| 431 | pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); | 
|---|
| 432 | } | 
|---|
| 433 | } | 
|---|
| 434 |  | 
|---|
| 435 | if (c->x86 == 0x15) { | 
|---|
| 436 | unsigned long upperbit; | 
|---|
| 437 | u32 cpuid, assoc; | 
|---|
| 438 |  | 
|---|
| 439 | cpuid	 = cpuid_edx(op: 0x80000005); | 
|---|
| 440 | assoc	 = cpuid >> 16 & 0xff; | 
|---|
| 441 | upperbit = ((cpuid >> 24) << 10) / assoc; | 
|---|
| 442 |  | 
|---|
| 443 | va_align.mask	  = (upperbit - 1) & PAGE_MASK; | 
|---|
| 444 | va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64; | 
|---|
| 445 |  | 
|---|
| 446 | /* A random value per boot for bit slice [12:upper_bit) */ | 
|---|
| 447 | va_align.bits = get_random_u32() & va_align.mask; | 
|---|
| 448 | } | 
|---|
| 449 |  | 
|---|
| 450 | if (cpu_has(c, X86_FEATURE_MWAITX)) | 
|---|
| 451 | use_mwaitx_delay(); | 
|---|
| 452 |  | 
|---|
| 453 | if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && | 
|---|
| 454 | !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && | 
|---|
| 455 | c->x86 >= 0x15 && c->x86 <= 0x17) { | 
|---|
| 456 | unsigned int bit; | 
|---|
| 457 |  | 
|---|
| 458 | switch (c->x86) { | 
|---|
| 459 | case 0x15: bit = 54; break; | 
|---|
| 460 | case 0x16: bit = 33; break; | 
|---|
| 461 | case 0x17: bit = 10; break; | 
|---|
| 462 | default: return; | 
|---|
| 463 | } | 
|---|
| 464 | /* | 
|---|
| 465 | * Try to cache the base value so further operations can | 
|---|
| 466 | * avoid RMW. If that faults, do not enable SSBD. | 
|---|
| 467 | */ | 
|---|
| 468 | if (!rdmsrq_safe(MSR_AMD64_LS_CFG, p: &x86_amd_ls_cfg_base)) { | 
|---|
| 469 | setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); | 
|---|
| 470 | setup_force_cpu_cap(X86_FEATURE_SSBD); | 
|---|
| 471 | x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; | 
|---|
| 472 | } | 
|---|
| 473 | } | 
|---|
| 474 |  | 
|---|
| 475 | resctrl_cpu_detect(c); | 
|---|
| 476 |  | 
|---|
| 477 | /* Figure out Zen generations: */ | 
|---|
| 478 | switch (c->x86) { | 
|---|
| 479 | case 0x17: | 
|---|
| 480 | switch (c->x86_model) { | 
|---|
| 481 | case 0x00 ... 0x2f: | 
|---|
| 482 | case 0x50 ... 0x5f: | 
|---|
| 483 | setup_force_cpu_cap(X86_FEATURE_ZEN1); | 
|---|
| 484 | break; | 
|---|
| 485 | case 0x30 ... 0x4f: | 
|---|
| 486 | case 0x60 ... 0x7f: | 
|---|
| 487 | case 0x90 ... 0x91: | 
|---|
| 488 | case 0xa0 ... 0xaf: | 
|---|
| 489 | setup_force_cpu_cap(X86_FEATURE_ZEN2); | 
|---|
| 490 | break; | 
|---|
| 491 | default: | 
|---|
| 492 | goto warn; | 
|---|
| 493 | } | 
|---|
| 494 | break; | 
|---|
| 495 |  | 
|---|
| 496 | case 0x19: | 
|---|
| 497 | switch (c->x86_model) { | 
|---|
| 498 | case 0x00 ... 0x0f: | 
|---|
| 499 | case 0x20 ... 0x5f: | 
|---|
| 500 | setup_force_cpu_cap(X86_FEATURE_ZEN3); | 
|---|
| 501 | break; | 
|---|
| 502 | case 0x10 ... 0x1f: | 
|---|
| 503 | case 0x60 ... 0xaf: | 
|---|
| 504 | setup_force_cpu_cap(X86_FEATURE_ZEN4); | 
|---|
| 505 | break; | 
|---|
| 506 | default: | 
|---|
| 507 | goto warn; | 
|---|
| 508 | } | 
|---|
| 509 | break; | 
|---|
| 510 |  | 
|---|
| 511 | case 0x1a: | 
|---|
| 512 | switch (c->x86_model) { | 
|---|
| 513 | case 0x00 ... 0x2f: | 
|---|
| 514 | case 0x40 ... 0x4f: | 
|---|
| 515 | case 0x60 ... 0x7f: | 
|---|
| 516 | setup_force_cpu_cap(X86_FEATURE_ZEN5); | 
|---|
| 517 | break; | 
|---|
| 518 | case 0x50 ... 0x5f: | 
|---|
| 519 | case 0x90 ... 0xaf: | 
|---|
| 520 | case 0xc0 ... 0xcf: | 
|---|
| 521 | setup_force_cpu_cap(X86_FEATURE_ZEN6); | 
|---|
| 522 | break; | 
|---|
| 523 | default: | 
|---|
| 524 | goto warn; | 
|---|
| 525 | } | 
|---|
| 526 | break; | 
|---|
| 527 |  | 
|---|
| 528 | default: | 
|---|
| 529 | break; | 
|---|
| 530 | } | 
|---|
| 531 |  | 
|---|
| 532 | bsp_determine_snp(c); | 
|---|
| 533 | tsa_init(c); | 
|---|
| 534 |  | 
|---|
| 535 | if (cpu_has(c, X86_FEATURE_GP_ON_USER_CPUID)) | 
|---|
| 536 | setup_force_cpu_cap(X86_FEATURE_CPUID_FAULT); | 
|---|
| 537 |  | 
|---|
| 538 | return; | 
|---|
| 539 |  | 
|---|
| 540 | warn: | 
|---|
| 541 | WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model); | 
|---|
| 542 | } | 
|---|
| 543 |  | 
|---|
| 544 | static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) | 
|---|
| 545 | { | 
|---|
| 546 | u64 msr; | 
|---|
| 547 |  | 
|---|
| 548 | /* | 
|---|
| 549 | * Mark using WBINVD is needed during kexec on processors that | 
|---|
| 550 | * support SME. This provides support for performing a successful | 
|---|
| 551 | * kexec when going from SME inactive to SME active (or vice-versa). | 
|---|
| 552 | * | 
|---|
| 553 | * The cache must be cleared so that if there are entries with the | 
|---|
| 554 | * same physical address, both with and without the encryption bit, | 
|---|
| 555 | * they don't race each other when flushed and potentially end up | 
|---|
| 556 | * with the wrong entry being committed to memory. | 
|---|
| 557 | * | 
|---|
| 558 | * Test the CPUID bit directly because with mem_encrypt=off the | 
|---|
| 559 | * BSP will clear the X86_FEATURE_SME bit and the APs will not | 
|---|
| 560 | * see it set after that. | 
|---|
| 561 | */ | 
|---|
| 562 | if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(op: 0x8000001f) & BIT(0))) | 
|---|
| 563 | __this_cpu_write(cache_state_incoherent, true); | 
|---|
| 564 |  | 
|---|
| 565 | /* | 
|---|
| 566 | * BIOS support is required for SME and SEV. | 
|---|
| 567 | *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by | 
|---|
| 568 | *	      the SME physical address space reduction value. | 
|---|
| 569 | *	      If BIOS has not enabled SME then don't advertise the | 
|---|
| 570 | *	      SME feature (set in scattered.c). | 
|---|
| 571 | *	      If the kernel has not enabled SME via any means then | 
|---|
| 572 | *	      don't advertise the SME feature. | 
|---|
| 573 | *   For SEV: If BIOS has not enabled SEV then don't advertise SEV and | 
|---|
| 574 | *	      any additional functionality based on it. | 
|---|
| 575 | * | 
|---|
| 576 | *   In all cases, since support for SME and SEV requires long mode, | 
|---|
| 577 | *   don't advertise the feature under CONFIG_X86_32. | 
|---|
| 578 | */ | 
|---|
| 579 | if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { | 
|---|
| 580 | /* Check if memory encryption is enabled */ | 
|---|
| 581 | rdmsrq(MSR_AMD64_SYSCFG, msr); | 
|---|
| 582 | if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) | 
|---|
| 583 | goto clear_all; | 
|---|
| 584 |  | 
|---|
| 585 | /* | 
|---|
| 586 | * Always adjust physical address bits. Even though this | 
|---|
| 587 | * will be a value above 32-bits this is still done for | 
|---|
| 588 | * CONFIG_X86_32 so that accurate values are reported. | 
|---|
| 589 | */ | 
|---|
| 590 | c->x86_phys_bits -= (cpuid_ebx(op: 0x8000001f) >> 6) & 0x3f; | 
|---|
| 591 |  | 
|---|
| 592 | if (IS_ENABLED(CONFIG_X86_32)) | 
|---|
| 593 | goto clear_all; | 
|---|
| 594 |  | 
|---|
| 595 | if (!sme_me_mask) | 
|---|
| 596 | setup_clear_cpu_cap(X86_FEATURE_SME); | 
|---|
| 597 |  | 
|---|
| 598 | rdmsrq(MSR_K7_HWCR, msr); | 
|---|
| 599 | if (!(msr & MSR_K7_HWCR_SMMLOCK)) | 
|---|
| 600 | goto clear_sev; | 
|---|
| 601 |  | 
|---|
| 602 | return; | 
|---|
| 603 |  | 
|---|
| 604 | clear_all: | 
|---|
| 605 | setup_clear_cpu_cap(X86_FEATURE_SME); | 
|---|
| 606 | clear_sev: | 
|---|
| 607 | setup_clear_cpu_cap(X86_FEATURE_SEV); | 
|---|
| 608 | setup_clear_cpu_cap(X86_FEATURE_SEV_ES); | 
|---|
| 609 | setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); | 
|---|
| 610 | } | 
|---|
| 611 | } | 
|---|
| 612 |  | 
|---|
| 613 | static void early_init_amd(struct cpuinfo_x86 *c) | 
|---|
| 614 | { | 
|---|
| 615 | u32 dummy; | 
|---|
| 616 |  | 
|---|
| 617 | if (c->x86 >= 0xf) | 
|---|
| 618 | set_cpu_cap(c, X86_FEATURE_K8); | 
|---|
| 619 |  | 
|---|
| 620 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | 
|---|
| 621 |  | 
|---|
| 622 | /* | 
|---|
| 623 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | 
|---|
| 624 | * with P/T states and does not stop in deep C-states | 
|---|
| 625 | */ | 
|---|
| 626 | if (c->x86_power & (1 << 8)) { | 
|---|
| 627 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 
|---|
| 628 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 
|---|
| 629 | } | 
|---|
| 630 |  | 
|---|
| 631 | /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ | 
|---|
| 632 | if (c->x86_power & BIT(12)) | 
|---|
| 633 | set_cpu_cap(c, X86_FEATURE_ACC_POWER); | 
|---|
| 634 |  | 
|---|
| 635 | /* Bit 14 indicates the Runtime Average Power Limit interface. */ | 
|---|
| 636 | if (c->x86_power & BIT(14)) | 
|---|
| 637 | set_cpu_cap(c, X86_FEATURE_RAPL); | 
|---|
| 638 |  | 
|---|
| 639 | #ifdef CONFIG_X86_64 | 
|---|
| 640 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | 
|---|
| 641 | #else | 
|---|
| 642 | /*  Set MTRR capability flag if appropriate */ | 
|---|
| 643 | if (c->x86 == 5) | 
|---|
| 644 | if (c->x86_model == 13 || c->x86_model == 9 || | 
|---|
| 645 | (c->x86_model == 8 && c->x86_stepping >= 8)) | 
|---|
| 646 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | 
|---|
| 647 | #endif | 
|---|
| 648 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) | 
|---|
| 649 | /* | 
|---|
| 650 | * ApicID can always be treated as an 8-bit value for AMD APIC versions | 
|---|
| 651 | * >= 0x10, but even old K8s came out of reset with version 0x10. So, we | 
|---|
| 652 | * can safely set X86_FEATURE_EXTD_APICID unconditionally for families | 
|---|
| 653 | * after 16h. | 
|---|
| 654 | */ | 
|---|
| 655 | if (boot_cpu_has(X86_FEATURE_APIC)) { | 
|---|
| 656 | if (c->x86 > 0x16) | 
|---|
| 657 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | 
|---|
| 658 | else if (c->x86 >= 0xf) { | 
|---|
| 659 | /* check CPU config space for extended APIC ID */ | 
|---|
| 660 | unsigned int val; | 
|---|
| 661 |  | 
|---|
| 662 | val = read_pci_config(bus: 0, slot: 24, func: 0, offset: 0x68); | 
|---|
| 663 | if ((val >> 17 & 0x3) == 0x3) | 
|---|
| 664 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | 
|---|
| 665 | } | 
|---|
| 666 | } | 
|---|
| 667 | #endif | 
|---|
| 668 |  | 
|---|
| 669 | /* | 
|---|
| 670 | * This is only needed to tell the kernel whether to use VMCALL | 
|---|
| 671 | * and VMMCALL.  VMMCALL is never executed except under virt, so | 
|---|
| 672 | * we can set it unconditionally. | 
|---|
| 673 | */ | 
|---|
| 674 | set_cpu_cap(c, X86_FEATURE_VMMCALL); | 
|---|
| 675 |  | 
|---|
| 676 | /* F16h erratum 793, CVE-2013-6885 */ | 
|---|
| 677 | if (c->x86 == 0x16 && c->x86_model <= 0xf) | 
|---|
| 678 | msr_set_bit(MSR_AMD64_LS_CFG, bit: 15); | 
|---|
| 679 |  | 
|---|
| 680 | early_detect_mem_encrypt(c); | 
|---|
| 681 |  | 
|---|
| 682 | if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) { | 
|---|
| 683 | if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB)) | 
|---|
| 684 | setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); | 
|---|
| 685 | else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { | 
|---|
| 686 | setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); | 
|---|
| 687 | setup_force_cpu_cap(X86_FEATURE_SBPB); | 
|---|
| 688 | } | 
|---|
| 689 | } | 
|---|
| 690 | } | 
|---|
| 691 |  | 
|---|
| 692 | static void init_amd_k8(struct cpuinfo_x86 *c) | 
|---|
| 693 | { | 
|---|
| 694 | u32 level; | 
|---|
| 695 | u64 value; | 
|---|
| 696 |  | 
|---|
| 697 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | 
|---|
| 698 | level = cpuid_eax(op: 1); | 
|---|
| 699 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | 
|---|
| 700 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 
|---|
| 701 |  | 
|---|
| 702 | /* | 
|---|
| 703 | * Some BIOSes incorrectly force this feature, but only K8 revision D | 
|---|
| 704 | * (model = 0x14) and later actually support it. | 
|---|
| 705 | * (AMD Erratum #110, docId: 25759). | 
|---|
| 706 | */ | 
|---|
| 707 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) { | 
|---|
| 708 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | 
|---|
| 709 | if (!rdmsrq_amd_safe(msr: 0xc001100d, p: &value)) { | 
|---|
| 710 | value &= ~BIT_64(32); | 
|---|
| 711 | wrmsrq_amd_safe(msr: 0xc001100d, val: value); | 
|---|
| 712 | } | 
|---|
| 713 | } | 
|---|
| 714 |  | 
|---|
| 715 | if (!c->x86_model_id[0]) | 
|---|
| 716 | strscpy(c->x86_model_id, "Hammer"); | 
|---|
| 717 |  | 
|---|
| 718 | #ifdef CONFIG_SMP | 
|---|
| 719 | /* | 
|---|
| 720 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | 
|---|
| 721 | * bit 6 of msr C001_0015 | 
|---|
| 722 | * | 
|---|
| 723 | * Errata 63 for SH-B3 steppings | 
|---|
| 724 | * Errata 122 for all steppings (F+ have it disabled by default) | 
|---|
| 725 | */ | 
|---|
| 726 | msr_set_bit(MSR_K7_HWCR, bit: 6); | 
|---|
| 727 | #endif | 
|---|
| 728 | set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); | 
|---|
| 729 |  | 
|---|
| 730 | /* | 
|---|
| 731 | * Check models and steppings affected by erratum 400. This is | 
|---|
| 732 | * used to select the proper idle routine and to enable the | 
|---|
| 733 | * check whether the machine is affected in arch_post_acpi_subsys_init() | 
|---|
| 734 | * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. | 
|---|
| 735 | */ | 
|---|
| 736 | if (c->x86_model > 0x41 || | 
|---|
| 737 | (c->x86_model == 0x41 && c->x86_stepping >= 0x2)) | 
|---|
| 738 | setup_force_cpu_bug(X86_BUG_AMD_E400); | 
|---|
| 739 | } | 
|---|
| 740 |  | 
|---|
| 741 | static void init_amd_gh(struct cpuinfo_x86 *c) | 
|---|
| 742 | { | 
|---|
| 743 | #ifdef CONFIG_MMCONF_FAM10H | 
|---|
| 744 | /* do this for boot cpu */ | 
|---|
| 745 | if (c == &boot_cpu_data) | 
|---|
| 746 | check_enable_amd_mmconf_dmi(); | 
|---|
| 747 |  | 
|---|
| 748 | fam10h_check_enable_mmcfg(); | 
|---|
| 749 | #endif | 
|---|
| 750 |  | 
|---|
| 751 | /* | 
|---|
| 752 | * Disable GART TLB Walk Errors on Fam10h. We do this here because this | 
|---|
| 753 | * is always needed when GART is enabled, even in a kernel which has no | 
|---|
| 754 | * MCE support built in. BIOS should disable GartTlbWlk Errors already. | 
|---|
| 755 | * If it doesn't, we do it here as suggested by the BKDG. | 
|---|
| 756 | * | 
|---|
| 757 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | 
|---|
| 758 | */ | 
|---|
| 759 | msr_set_bit(MSR_AMD64_MCx_MASK(4), bit: 10); | 
|---|
| 760 |  | 
|---|
| 761 | /* | 
|---|
| 762 | * On family 10h BIOS may not have properly enabled WC+ support, causing | 
|---|
| 763 | * it to be converted to CD memtype. This may result in performance | 
|---|
| 764 | * degradation for certain nested-paging guests. Prevent this conversion | 
|---|
| 765 | * by clearing bit 24 in MSR_AMD64_BU_CFG2. | 
|---|
| 766 | * | 
|---|
| 767 | * NOTE: we want to use the _safe accessors so as not to #GP kvm | 
|---|
| 768 | * guests on older kvm hosts. | 
|---|
| 769 | */ | 
|---|
| 770 | msr_clear_bit(MSR_AMD64_BU_CFG2, bit: 24); | 
|---|
| 771 |  | 
|---|
| 772 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); | 
|---|
| 773 |  | 
|---|
| 774 | /* | 
|---|
| 775 | * Check models and steppings affected by erratum 400. This is | 
|---|
| 776 | * used to select the proper idle routine and to enable the | 
|---|
| 777 | * check whether the machine is affected in arch_post_acpi_subsys_init() | 
|---|
| 778 | * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. | 
|---|
| 779 | */ | 
|---|
| 780 | if (c->x86_model > 0x2 || | 
|---|
| 781 | (c->x86_model == 0x2 && c->x86_stepping >= 0x1)) | 
|---|
| 782 | setup_force_cpu_bug(X86_BUG_AMD_E400); | 
|---|
| 783 | } | 
|---|
| 784 |  | 
|---|
| 785 | static void init_amd_ln(struct cpuinfo_x86 *c) | 
|---|
| 786 | { | 
|---|
| 787 | /* | 
|---|
| 788 | * Apply erratum 665 fix unconditionally so machines without a BIOS | 
|---|
| 789 | * fix work. | 
|---|
| 790 | */ | 
|---|
| 791 | msr_set_bit(MSR_AMD64_DE_CFG, bit: 31); | 
|---|
| 792 | } | 
|---|
| 793 |  | 
|---|
| 794 | static bool rdrand_force; | 
|---|
| 795 |  | 
|---|
| 796 | static int __init rdrand_cmdline(char *str) | 
|---|
| 797 | { | 
|---|
| 798 | if (!str) | 
|---|
| 799 | return -EINVAL; | 
|---|
| 800 |  | 
|---|
| 801 | if (!strcmp(str, "force")) | 
|---|
| 802 | rdrand_force = true; | 
|---|
| 803 | else | 
|---|
| 804 | return -EINVAL; | 
|---|
| 805 |  | 
|---|
| 806 | return 0; | 
|---|
| 807 | } | 
|---|
| 808 | early_param( "rdrand", rdrand_cmdline); | 
|---|
| 809 |  | 
|---|
| 810 | static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) | 
|---|
| 811 | { | 
|---|
| 812 | /* | 
|---|
| 813 | * Saving of the MSR used to hide the RDRAND support during | 
|---|
| 814 | * suspend/resume is done by arch/x86/power/cpu.c, which is | 
|---|
| 815 | * dependent on CONFIG_PM_SLEEP. | 
|---|
| 816 | */ | 
|---|
| 817 | if (!IS_ENABLED(CONFIG_PM_SLEEP)) | 
|---|
| 818 | return; | 
|---|
| 819 |  | 
|---|
| 820 | /* | 
|---|
| 821 | * The self-test can clear X86_FEATURE_RDRAND, so check for | 
|---|
| 822 | * RDRAND support using the CPUID function directly. | 
|---|
| 823 | */ | 
|---|
| 824 | if (!(cpuid_ecx(op: 1) & BIT(30)) || rdrand_force) | 
|---|
| 825 | return; | 
|---|
| 826 |  | 
|---|
| 827 | msr_clear_bit(MSR_AMD64_CPUID_FN_1, bit: 62); | 
|---|
| 828 |  | 
|---|
| 829 | /* | 
|---|
| 830 | * Verify that the CPUID change has occurred in case the kernel is | 
|---|
| 831 | * running virtualized and the hypervisor doesn't support the MSR. | 
|---|
| 832 | */ | 
|---|
| 833 | if (cpuid_ecx(op: 1) & BIT(30)) { | 
|---|
| 834 | pr_info_once( "BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); | 
|---|
| 835 | return; | 
|---|
| 836 | } | 
|---|
| 837 |  | 
|---|
| 838 | clear_cpu_cap(c, X86_FEATURE_RDRAND); | 
|---|
| 839 | pr_info_once( "BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); | 
|---|
| 840 | } | 
|---|
| 841 |  | 
|---|
| 842 | static void init_amd_jg(struct cpuinfo_x86 *c) | 
|---|
| 843 | { | 
|---|
| 844 | /* | 
|---|
| 845 | * Some BIOS implementations do not restore proper RDRAND support | 
|---|
| 846 | * across suspend and resume. Check on whether to hide the RDRAND | 
|---|
| 847 | * instruction support via CPUID. | 
|---|
| 848 | */ | 
|---|
| 849 | clear_rdrand_cpuid_bit(c); | 
|---|
| 850 | } | 
|---|
| 851 |  | 
|---|
| 852 | static void init_amd_bd(struct cpuinfo_x86 *c) | 
|---|
| 853 | { | 
|---|
| 854 | u64 value; | 
|---|
| 855 |  | 
|---|
| 856 | /* | 
|---|
| 857 | * The way access filter has a performance penalty on some workloads. | 
|---|
| 858 | * Disable it on the affected CPUs. | 
|---|
| 859 | */ | 
|---|
| 860 | if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { | 
|---|
| 861 | if (!rdmsrq_safe(MSR_F15H_IC_CFG, p: &value) && !(value & 0x1E)) { | 
|---|
| 862 | value |= 0x1E; | 
|---|
| 863 | wrmsrq_safe(MSR_F15H_IC_CFG, val: value); | 
|---|
| 864 | } | 
|---|
| 865 | } | 
|---|
| 866 |  | 
|---|
| 867 | /* | 
|---|
| 868 | * Some BIOS implementations do not restore proper RDRAND support | 
|---|
| 869 | * across suspend and resume. Check on whether to hide the RDRAND | 
|---|
| 870 | * instruction support via CPUID. | 
|---|
| 871 | */ | 
|---|
| 872 | clear_rdrand_cpuid_bit(c); | 
|---|
| 873 | } | 
|---|
| 874 |  | 
|---|
| 875 | static const struct x86_cpu_id erratum_1386_microcode[] = { | 
|---|
| 876 | X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e), | 
|---|
| 877 | X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052), | 
|---|
| 878 | {} | 
|---|
| 879 | }; | 
|---|
| 880 |  | 
|---|
| 881 | static void fix_erratum_1386(struct cpuinfo_x86 *c) | 
|---|
| 882 | { | 
|---|
| 883 | /* | 
|---|
| 884 | * Work around Erratum 1386.  The XSAVES instruction malfunctions in | 
|---|
| 885 | * certain circumstances on Zen1/2 uarch, and not all parts have had | 
|---|
| 886 | * updated microcode at the time of writing (March 2023). | 
|---|
| 887 | * | 
|---|
| 888 | * Affected parts all have no supervisor XSAVE states, meaning that | 
|---|
| 889 | * the XSAVEC instruction (which works fine) is equivalent. | 
|---|
| 890 | * | 
|---|
| 891 | * Clear the feature flag only on microcode revisions which | 
|---|
| 892 | * don't have the fix. | 
|---|
| 893 | */ | 
|---|
| 894 | if (x86_match_min_microcode_rev(table: erratum_1386_microcode)) | 
|---|
| 895 | return; | 
|---|
| 896 |  | 
|---|
| 897 | clear_cpu_cap(c, X86_FEATURE_XSAVES); | 
|---|
| 898 | } | 
|---|
| 899 |  | 
|---|
| 900 | void init_spectral_chicken(struct cpuinfo_x86 *c) | 
|---|
| 901 | { | 
|---|
| 902 | #ifdef CONFIG_MITIGATION_UNRET_ENTRY | 
|---|
| 903 | u64 value; | 
|---|
| 904 |  | 
|---|
| 905 | /* | 
|---|
| 906 | * On Zen2 we offer this chicken (bit) on the altar of Speculation. | 
|---|
| 907 | * | 
|---|
| 908 | * This suppresses speculation from the middle of a basic block, i.e. it | 
|---|
| 909 | * suppresses non-branch predictions. | 
|---|
| 910 | */ | 
|---|
| 911 | if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { | 
|---|
| 912 | if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, p: &value)) { | 
|---|
| 913 | value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; | 
|---|
| 914 | wrmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, val: value); | 
|---|
| 915 | } | 
|---|
| 916 | } | 
|---|
| 917 | #endif | 
|---|
| 918 | } | 
|---|
| 919 |  | 
|---|
| 920 | static void init_amd_zen_common(void) | 
|---|
| 921 | { | 
|---|
| 922 | setup_force_cpu_cap(X86_FEATURE_ZEN); | 
|---|
| 923 | #ifdef CONFIG_NUMA | 
|---|
| 924 | node_reclaim_distance = 32; | 
|---|
| 925 | #endif | 
|---|
| 926 | } | 
|---|
| 927 |  | 
|---|
| 928 | static void init_amd_zen1(struct cpuinfo_x86 *c) | 
|---|
| 929 | { | 
|---|
| 930 | fix_erratum_1386(c); | 
|---|
| 931 |  | 
|---|
| 932 | /* Fix up CPUID bits, but only if not virtualised. */ | 
|---|
| 933 | if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { | 
|---|
| 934 |  | 
|---|
| 935 | /* Erratum 1076: CPB feature bit not being set in CPUID. */ | 
|---|
| 936 | if (!cpu_has(c, X86_FEATURE_CPB)) | 
|---|
| 937 | set_cpu_cap(c, X86_FEATURE_CPB); | 
|---|
| 938 | } | 
|---|
| 939 |  | 
|---|
| 940 | pr_notice_once( "AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); | 
|---|
| 941 | setup_force_cpu_bug(X86_BUG_DIV0); | 
|---|
| 942 |  | 
|---|
| 943 | /* | 
|---|
| 944 | * Turn off the Instructions Retired free counter on machines that are | 
|---|
| 945 | * susceptible to erratum #1054 "Instructions Retired Performance | 
|---|
| 946 | * Counter May Be Inaccurate". | 
|---|
| 947 | */ | 
|---|
| 948 | if (c->x86_model < 0x30) { | 
|---|
| 949 | msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); | 
|---|
| 950 | clear_cpu_cap(c, X86_FEATURE_IRPERF); | 
|---|
| 951 | } | 
|---|
| 952 | } | 
|---|
| 953 |  | 
|---|
| 954 | static bool cpu_has_zenbleed_microcode(void) | 
|---|
| 955 | { | 
|---|
| 956 | u32 good_rev = 0; | 
|---|
| 957 |  | 
|---|
| 958 | switch (boot_cpu_data.x86_model) { | 
|---|
| 959 | case 0x30 ... 0x3f: good_rev = 0x0830107b; break; | 
|---|
| 960 | case 0x60 ... 0x67: good_rev = 0x0860010c; break; | 
|---|
| 961 | case 0x68 ... 0x6f: good_rev = 0x08608107; break; | 
|---|
| 962 | case 0x70 ... 0x7f: good_rev = 0x08701033; break; | 
|---|
| 963 | case 0xa0 ... 0xaf: good_rev = 0x08a00009; break; | 
|---|
| 964 |  | 
|---|
| 965 | default: | 
|---|
| 966 | return false; | 
|---|
| 967 | } | 
|---|
| 968 |  | 
|---|
| 969 | if (boot_cpu_data.microcode < good_rev) | 
|---|
| 970 | return false; | 
|---|
| 971 |  | 
|---|
| 972 | return true; | 
|---|
| 973 | } | 
|---|
| 974 |  | 
|---|
| 975 | static void zen2_zenbleed_check(struct cpuinfo_x86 *c) | 
|---|
| 976 | { | 
|---|
| 977 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) | 
|---|
| 978 | return; | 
|---|
| 979 |  | 
|---|
| 980 | if (!cpu_has(c, X86_FEATURE_AVX)) | 
|---|
| 981 | return; | 
|---|
| 982 |  | 
|---|
| 983 | if (!cpu_has_zenbleed_microcode()) { | 
|---|
| 984 | pr_notice_once( "Zenbleed: please update your microcode for the most optimal fix\n"); | 
|---|
| 985 | msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); | 
|---|
| 986 | } else { | 
|---|
| 987 | msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); | 
|---|
| 988 | } | 
|---|
| 989 | } | 
|---|
| 990 |  | 
|---|
| 991 | static void init_amd_zen2(struct cpuinfo_x86 *c) | 
|---|
| 992 | { | 
|---|
| 993 | init_spectral_chicken(c); | 
|---|
| 994 | fix_erratum_1386(c); | 
|---|
| 995 | zen2_zenbleed_check(c); | 
|---|
| 996 |  | 
|---|
| 997 | /* Disable RDSEED on AMD Cyan Skillfish because of an error. */ | 
|---|
| 998 | if (c->x86_model == 0x47 && c->x86_stepping == 0x0) { | 
|---|
| 999 | clear_cpu_cap(c, X86_FEATURE_RDSEED); | 
|---|
| 1000 | msr_clear_bit(MSR_AMD64_CPUID_FN_7, bit: 18); | 
|---|
| 1001 | pr_emerg( "RDSEED is not reliable on this platform; disabling.\n"); | 
|---|
| 1002 | } | 
|---|
| 1003 |  | 
|---|
| 1004 | /* Correct misconfigured CPUID on some clients. */ | 
|---|
| 1005 | clear_cpu_cap(c, X86_FEATURE_INVLPGB); | 
|---|
| 1006 | } | 
|---|
| 1007 |  | 
|---|
| 1008 | static void init_amd_zen3(struct cpuinfo_x86 *c) | 
|---|
| 1009 | { | 
|---|
| 1010 | if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { | 
|---|
| 1011 | /* | 
|---|
| 1012 | * Zen3 (Fam19 model < 0x10) parts are not susceptible to | 
|---|
| 1013 | * Branch Type Confusion, but predate the allocation of the | 
|---|
| 1014 | * BTC_NO bit. | 
|---|
| 1015 | */ | 
|---|
| 1016 | if (!cpu_has(c, X86_FEATURE_BTC_NO)) | 
|---|
| 1017 | set_cpu_cap(c, X86_FEATURE_BTC_NO); | 
|---|
| 1018 | } | 
|---|
| 1019 | } | 
|---|
| 1020 |  | 
|---|
| 1021 | static void init_amd_zen4(struct cpuinfo_x86 *c) | 
|---|
| 1022 | { | 
|---|
| 1023 | if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) | 
|---|
| 1024 | msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); | 
|---|
| 1025 |  | 
|---|
| 1026 | /* | 
|---|
| 1027 | * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE | 
|---|
| 1028 | * in some BIOS versions but they can lead to random host reboots. | 
|---|
| 1029 | */ | 
|---|
| 1030 | switch (c->x86_model) { | 
|---|
| 1031 | case 0x18 ... 0x1f: | 
|---|
| 1032 | case 0x60 ... 0x7f: | 
|---|
| 1033 | clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD); | 
|---|
| 1034 | break; | 
|---|
| 1035 | } | 
|---|
| 1036 | } | 
|---|
| 1037 |  | 
|---|
| 1038 | static void init_amd_zen5(struct cpuinfo_x86 *c) | 
|---|
| 1039 | { | 
|---|
| 1040 | } | 
|---|
| 1041 |  | 
|---|
| 1042 | static void init_amd(struct cpuinfo_x86 *c) | 
|---|
| 1043 | { | 
|---|
| 1044 | u64 vm_cr; | 
|---|
| 1045 |  | 
|---|
| 1046 | early_init_amd(c); | 
|---|
| 1047 |  | 
|---|
| 1048 | /* | 
|---|
| 1049 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 
|---|
| 1050 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | 
|---|
| 1051 | */ | 
|---|
| 1052 | clear_cpu_cap(c, bit: 0*32+31); | 
|---|
| 1053 |  | 
|---|
| 1054 | if (c->x86 >= 0x10) | 
|---|
| 1055 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 
|---|
| 1056 |  | 
|---|
| 1057 | /* AMD FSRM also implies FSRS */ | 
|---|
| 1058 | if (cpu_has(c, X86_FEATURE_FSRM)) | 
|---|
| 1059 | set_cpu_cap(c, X86_FEATURE_FSRS); | 
|---|
| 1060 |  | 
|---|
| 1061 | /* K6s reports MCEs but don't actually have all the MSRs */ | 
|---|
| 1062 | if (c->x86 < 6) | 
|---|
| 1063 | clear_cpu_cap(c, X86_FEATURE_MCE); | 
|---|
| 1064 |  | 
|---|
| 1065 | switch (c->x86) { | 
|---|
| 1066 | case 4:    init_amd_k5(c); break; | 
|---|
| 1067 | case 5:    init_amd_k6(c); break; | 
|---|
| 1068 | case 6:	   init_amd_k7(c); break; | 
|---|
| 1069 | case 0xf:  init_amd_k8(c); break; | 
|---|
| 1070 | case 0x10: init_amd_gh(c); break; | 
|---|
| 1071 | case 0x12: init_amd_ln(c); break; | 
|---|
| 1072 | case 0x15: init_amd_bd(c); break; | 
|---|
| 1073 | case 0x16: init_amd_jg(c); break; | 
|---|
| 1074 | } | 
|---|
| 1075 |  | 
|---|
| 1076 | /* | 
|---|
| 1077 | * Save up on some future enablement work and do common Zen | 
|---|
| 1078 | * settings. | 
|---|
| 1079 | */ | 
|---|
| 1080 | if (c->x86 >= 0x17) | 
|---|
| 1081 | init_amd_zen_common(); | 
|---|
| 1082 |  | 
|---|
| 1083 | if (boot_cpu_has(X86_FEATURE_ZEN1)) | 
|---|
| 1084 | init_amd_zen1(c); | 
|---|
| 1085 | else if (boot_cpu_has(X86_FEATURE_ZEN2)) | 
|---|
| 1086 | init_amd_zen2(c); | 
|---|
| 1087 | else if (boot_cpu_has(X86_FEATURE_ZEN3)) | 
|---|
| 1088 | init_amd_zen3(c); | 
|---|
| 1089 | else if (boot_cpu_has(X86_FEATURE_ZEN4)) | 
|---|
| 1090 | init_amd_zen4(c); | 
|---|
| 1091 | else if (boot_cpu_has(X86_FEATURE_ZEN5)) | 
|---|
| 1092 | init_amd_zen5(c); | 
|---|
| 1093 |  | 
|---|
| 1094 | /* | 
|---|
| 1095 | * Enable workaround for FXSAVE leak on CPUs | 
|---|
| 1096 | * without a XSaveErPtr feature | 
|---|
| 1097 | */ | 
|---|
| 1098 | if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) | 
|---|
| 1099 | set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); | 
|---|
| 1100 |  | 
|---|
| 1101 | cpu_detect_cache_sizes(c); | 
|---|
| 1102 |  | 
|---|
| 1103 | srat_detect_node(c); | 
|---|
| 1104 |  | 
|---|
| 1105 | init_amd_cacheinfo(c); | 
|---|
| 1106 |  | 
|---|
| 1107 | if (cpu_has(c, X86_FEATURE_SVM)) { | 
|---|
| 1108 | rdmsrq(MSR_VM_CR, vm_cr); | 
|---|
| 1109 | if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { | 
|---|
| 1110 | pr_notice_once( "SVM disabled (by BIOS) in MSR_VM_CR\n"); | 
|---|
| 1111 | clear_cpu_cap(c, X86_FEATURE_SVM); | 
|---|
| 1112 | } | 
|---|
| 1113 | } | 
|---|
| 1114 |  | 
|---|
| 1115 | if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) { | 
|---|
| 1116 | /* | 
|---|
| 1117 | * Use LFENCE for execution serialization.  On families which | 
|---|
| 1118 | * don't have that MSR, LFENCE is already serializing. | 
|---|
| 1119 | * msr_set_bit() uses the safe accessors, too, even if the MSR | 
|---|
| 1120 | * is not present. | 
|---|
| 1121 | */ | 
|---|
| 1122 | msr_set_bit(MSR_AMD64_DE_CFG, | 
|---|
| 1123 | MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); | 
|---|
| 1124 |  | 
|---|
| 1125 | /* A serializing LFENCE stops RDTSC speculation */ | 
|---|
| 1126 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 
|---|
| 1127 | } | 
|---|
| 1128 |  | 
|---|
| 1129 | /* | 
|---|
| 1130 | * Family 0x12 and above processors have APIC timer | 
|---|
| 1131 | * running in deep C states. | 
|---|
| 1132 | */ | 
|---|
| 1133 | if (c->x86 > 0x11) | 
|---|
| 1134 | set_cpu_cap(c, X86_FEATURE_ARAT); | 
|---|
| 1135 |  | 
|---|
| 1136 | /* 3DNow or LM implies PREFETCHW */ | 
|---|
| 1137 | if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) | 
|---|
| 1138 | if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) | 
|---|
| 1139 | set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); | 
|---|
| 1140 |  | 
|---|
| 1141 | /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ | 
|---|
| 1142 | if (!cpu_feature_enabled(X86_FEATURE_XENPV)) | 
|---|
| 1143 | set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); | 
|---|
| 1144 |  | 
|---|
| 1145 | /* Enable the Instructions Retired free counter */ | 
|---|
| 1146 | if (cpu_has(c, X86_FEATURE_IRPERF)) | 
|---|
| 1147 | msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); | 
|---|
| 1148 |  | 
|---|
| 1149 | check_null_seg_clears_base(c); | 
|---|
| 1150 |  | 
|---|
| 1151 | /* | 
|---|
| 1152 | * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up | 
|---|
| 1153 | * using the trampoline code and as part of it, MSR_EFER gets prepared there in | 
|---|
| 1154 | * order to be replicated onto them. Regardless, set it here again, if not set, | 
|---|
| 1155 | * to protect against any future refactoring/code reorganization which might | 
|---|
| 1156 | * miss setting this important bit. | 
|---|
| 1157 | */ | 
|---|
| 1158 | if (spectre_v2_in_eibrs_mode(mode: spectre_v2_enabled) && | 
|---|
| 1159 | cpu_has(c, X86_FEATURE_AUTOIBRS)) | 
|---|
| 1160 | WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0); | 
|---|
| 1161 |  | 
|---|
| 1162 | /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ | 
|---|
| 1163 | clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); | 
|---|
| 1164 |  | 
|---|
| 1165 | /* Enable Translation Cache Extension */ | 
|---|
| 1166 | if (cpu_has(c, X86_FEATURE_TCE)) | 
|---|
| 1167 | msr_set_bit(MSR_EFER, _EFER_TCE); | 
|---|
| 1168 | } | 
|---|
| 1169 |  | 
|---|
| 1170 | #ifdef CONFIG_X86_32 | 
|---|
| 1171 | static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 
|---|
| 1172 | { | 
|---|
| 1173 | /* AMD errata T13 (order #21922) */ | 
|---|
| 1174 | if (c->x86 == 6) { | 
|---|
| 1175 | /* Duron Rev A0 */ | 
|---|
| 1176 | if (c->x86_model == 3 && c->x86_stepping == 0) | 
|---|
| 1177 | size = 64; | 
|---|
| 1178 | /* Tbird rev A1/A2 */ | 
|---|
| 1179 | if (c->x86_model == 4 && | 
|---|
| 1180 | (c->x86_stepping == 0 || c->x86_stepping == 1)) | 
|---|
| 1181 | size = 256; | 
|---|
| 1182 | } | 
|---|
| 1183 | return size; | 
|---|
| 1184 | } | 
|---|
| 1185 | #endif | 
|---|
| 1186 |  | 
|---|
| 1187 | static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | 
|---|
| 1188 | { | 
|---|
| 1189 | u32 ebx, eax, ecx, edx; | 
|---|
| 1190 | u16 mask = 0xfff; | 
|---|
| 1191 |  | 
|---|
| 1192 | if (c->x86 < 0xf) | 
|---|
| 1193 | return; | 
|---|
| 1194 |  | 
|---|
| 1195 | if (c->extended_cpuid_level < 0x80000006) | 
|---|
| 1196 | return; | 
|---|
| 1197 |  | 
|---|
| 1198 | cpuid(op: 0x80000006, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); | 
|---|
| 1199 |  | 
|---|
| 1200 | tlb_lld_4k = (ebx >> 16) & mask; | 
|---|
| 1201 | tlb_lli_4k = ebx & mask; | 
|---|
| 1202 |  | 
|---|
| 1203 | /* | 
|---|
| 1204 | * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB | 
|---|
| 1205 | * characteristics from the CPUID function 0x80000005 instead. | 
|---|
| 1206 | */ | 
|---|
| 1207 | if (c->x86 == 0xf) { | 
|---|
| 1208 | cpuid(op: 0x80000005, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); | 
|---|
| 1209 | mask = 0xff; | 
|---|
| 1210 | } | 
|---|
| 1211 |  | 
|---|
| 1212 | /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ | 
|---|
| 1213 | if (!((eax >> 16) & mask)) | 
|---|
| 1214 | tlb_lld_2m = (cpuid_eax(op: 0x80000005) >> 16) & 0xff; | 
|---|
| 1215 | else | 
|---|
| 1216 | tlb_lld_2m = (eax >> 16) & mask; | 
|---|
| 1217 |  | 
|---|
| 1218 | /* a 4M entry uses two 2M entries */ | 
|---|
| 1219 | tlb_lld_4m = tlb_lld_2m >> 1; | 
|---|
| 1220 |  | 
|---|
| 1221 | /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ | 
|---|
| 1222 | if (!(eax & mask)) { | 
|---|
| 1223 | /* Erratum 658 */ | 
|---|
| 1224 | if (c->x86 == 0x15 && c->x86_model <= 0x1f) { | 
|---|
| 1225 | tlb_lli_2m = 1024; | 
|---|
| 1226 | } else { | 
|---|
| 1227 | cpuid(op: 0x80000005, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); | 
|---|
| 1228 | tlb_lli_2m = eax & 0xff; | 
|---|
| 1229 | } | 
|---|
| 1230 | } else | 
|---|
| 1231 | tlb_lli_2m = eax & mask; | 
|---|
| 1232 |  | 
|---|
| 1233 | tlb_lli_4m = tlb_lli_2m >> 1; | 
|---|
| 1234 |  | 
|---|
| 1235 | /* Max number of pages INVLPGB can invalidate in one shot */ | 
|---|
| 1236 | if (cpu_has(c, X86_FEATURE_INVLPGB)) | 
|---|
| 1237 | invlpgb_count_max = (cpuid_edx(op: 0x80000008) & 0xffff) + 1; | 
|---|
| 1238 | } | 
|---|
| 1239 |  | 
|---|
| 1240 | static const struct cpu_dev amd_cpu_dev = { | 
|---|
| 1241 | .c_vendor	= "AMD", | 
|---|
| 1242 | .c_ident	= { "AuthenticAMD"}, | 
|---|
| 1243 | #ifdef CONFIG_X86_32 | 
|---|
| 1244 | .legacy_models = { | 
|---|
| 1245 | { .family = 4, .model_names = | 
|---|
| 1246 | { | 
|---|
| 1247 | [3] = "486 DX/2", | 
|---|
| 1248 | [7] = "486 DX/2-WB", | 
|---|
| 1249 | [8] = "486 DX/4", | 
|---|
| 1250 | [9] = "486 DX/4-WB", | 
|---|
| 1251 | [14] = "Am5x86-WT", | 
|---|
| 1252 | [15] = "Am5x86-WB" | 
|---|
| 1253 | } | 
|---|
| 1254 | }, | 
|---|
| 1255 | }, | 
|---|
| 1256 | .legacy_cache_size = amd_size_cache, | 
|---|
| 1257 | #endif | 
|---|
| 1258 | .c_early_init   = early_init_amd, | 
|---|
| 1259 | .c_detect_tlb	= cpu_detect_tlb_amd, | 
|---|
| 1260 | .c_bsp_init	= bsp_init_amd, | 
|---|
| 1261 | .c_init		= init_amd, | 
|---|
| 1262 | .c_x86_vendor	= X86_VENDOR_AMD, | 
|---|
| 1263 | }; | 
|---|
| 1264 |  | 
|---|
| 1265 | cpu_dev_register(amd_cpu_dev); | 
|---|
| 1266 |  | 
|---|
| 1267 | static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask); | 
|---|
| 1268 |  | 
|---|
| 1269 | static unsigned int amd_msr_dr_addr_masks[] = { | 
|---|
| 1270 | MSR_F16H_DR0_ADDR_MASK, | 
|---|
| 1271 | MSR_F16H_DR1_ADDR_MASK, | 
|---|
| 1272 | MSR_F16H_DR1_ADDR_MASK + 1, | 
|---|
| 1273 | MSR_F16H_DR1_ADDR_MASK + 2 | 
|---|
| 1274 | }; | 
|---|
| 1275 |  | 
|---|
| 1276 | void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) | 
|---|
| 1277 | { | 
|---|
| 1278 | int cpu = smp_processor_id(); | 
|---|
| 1279 |  | 
|---|
| 1280 | if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) | 
|---|
| 1281 | return; | 
|---|
| 1282 |  | 
|---|
| 1283 | if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) | 
|---|
| 1284 | return; | 
|---|
| 1285 |  | 
|---|
| 1286 | if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask) | 
|---|
| 1287 | return; | 
|---|
| 1288 |  | 
|---|
| 1289 | wrmsrq(msr: amd_msr_dr_addr_masks[dr], val: mask); | 
|---|
| 1290 | per_cpu(amd_dr_addr_mask, cpu)[dr] = mask; | 
|---|
| 1291 | } | 
|---|
| 1292 |  | 
|---|
| 1293 | unsigned long amd_get_dr_addr_mask(unsigned int dr) | 
|---|
| 1294 | { | 
|---|
| 1295 | if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) | 
|---|
| 1296 | return 0; | 
|---|
| 1297 |  | 
|---|
| 1298 | if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) | 
|---|
| 1299 | return 0; | 
|---|
| 1300 |  | 
|---|
| 1301 | return per_cpu(amd_dr_addr_mask[dr], smp_processor_id()); | 
|---|
| 1302 | } | 
|---|
| 1303 | EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask); | 
|---|
| 1304 |  | 
|---|
| 1305 | static void zenbleed_check_cpu(void *unused) | 
|---|
| 1306 | { | 
|---|
| 1307 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | 
|---|
| 1308 |  | 
|---|
| 1309 | zen2_zenbleed_check(c); | 
|---|
| 1310 | } | 
|---|
| 1311 |  | 
|---|
| 1312 | void amd_check_microcode(void) | 
|---|
| 1313 | { | 
|---|
| 1314 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 
|---|
| 1315 | return; | 
|---|
| 1316 |  | 
|---|
| 1317 | if (cpu_feature_enabled(X86_FEATURE_ZEN2)) | 
|---|
| 1318 | on_each_cpu(func: zenbleed_check_cpu, NULL, wait: 1); | 
|---|
| 1319 | } | 
|---|
| 1320 |  | 
|---|
| 1321 | static const char * const s5_reset_reason_txt[] = { | 
|---|
| 1322 | [0]  = "thermal pin BP_THERMTRIP_L was tripped", | 
|---|
| 1323 | [1]  = "power button was pressed for 4 seconds", | 
|---|
| 1324 | [2]  = "shutdown pin was tripped", | 
|---|
| 1325 | [4]  = "remote ASF power off command was received", | 
|---|
| 1326 | [9]  = "internal CPU thermal limit was tripped", | 
|---|
| 1327 | [16] = "system reset pin BP_SYS_RST_L was tripped", | 
|---|
| 1328 | [17] = "software issued PCI reset", | 
|---|
| 1329 | [18] = "software wrote 0x4 to reset control register 0xCF9", | 
|---|
| 1330 | [19] = "software wrote 0x6 to reset control register 0xCF9", | 
|---|
| 1331 | [20] = "software wrote 0xE to reset control register 0xCF9", | 
|---|
| 1332 | [21] = "ACPI power state transition occurred", | 
|---|
| 1333 | [22] = "keyboard reset pin KB_RST_L was tripped", | 
|---|
| 1334 | [23] = "internal CPU shutdown event occurred", | 
|---|
| 1335 | [24] = "system failed to boot before failed boot timer expired", | 
|---|
| 1336 | [25] = "hardware watchdog timer expired", | 
|---|
| 1337 | [26] = "remote ASF reset command was received", | 
|---|
| 1338 | [27] = "an uncorrected error caused a data fabric sync flood event", | 
|---|
| 1339 | [29] = "FCH and MP1 failed warm reset handshake", | 
|---|
| 1340 | [30] = "a parity error occurred", | 
|---|
| 1341 | [31] = "a software sync flood event occurred", | 
|---|
| 1342 | }; | 
|---|
| 1343 |  | 
|---|
| 1344 | static __init int print_s5_reset_status_mmio(void) | 
|---|
| 1345 | { | 
|---|
| 1346 | void __iomem *addr; | 
|---|
| 1347 | u32 value; | 
|---|
| 1348 | int i; | 
|---|
| 1349 |  | 
|---|
| 1350 | if (!cpu_feature_enabled(X86_FEATURE_ZEN)) | 
|---|
| 1351 | return 0; | 
|---|
| 1352 |  | 
|---|
| 1353 | addr = ioremap(FCH_PM_BASE + FCH_PM_S5_RESET_STATUS, size: sizeof(value)); | 
|---|
| 1354 | if (!addr) | 
|---|
| 1355 | return 0; | 
|---|
| 1356 |  | 
|---|
| 1357 | value = ioread32(addr); | 
|---|
| 1358 | iounmap(addr); | 
|---|
| 1359 |  | 
|---|
| 1360 | /* Value with "all bits set" is an error response and should be ignored. */ | 
|---|
| 1361 | if (value == U32_MAX) | 
|---|
| 1362 | return 0; | 
|---|
| 1363 |  | 
|---|
| 1364 | for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) { | 
|---|
| 1365 | if (!(value & BIT(i))) | 
|---|
| 1366 | continue; | 
|---|
| 1367 |  | 
|---|
| 1368 | if (s5_reset_reason_txt[i]) { | 
|---|
| 1369 | pr_info( "x86/amd: Previous system reset reason [0x%08x]: %s\n", | 
|---|
| 1370 | value, s5_reset_reason_txt[i]); | 
|---|
| 1371 | } | 
|---|
| 1372 | } | 
|---|
| 1373 |  | 
|---|
| 1374 | return 0; | 
|---|
| 1375 | } | 
|---|
| 1376 | late_initcall(print_s5_reset_status_mmio); | 
|---|
| 1377 |  | 
|---|