| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _X86_MICROCODE_INTERNAL_H | 
|---|
| 3 | #define _X86_MICROCODE_INTERNAL_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/earlycpio.h> | 
|---|
| 6 | #include <linux/initrd.h> | 
|---|
| 7 |  | 
|---|
| 8 | #include <asm/cpu.h> | 
|---|
| 9 | #include <asm/microcode.h> | 
|---|
| 10 |  | 
|---|
| 11 | struct device; | 
|---|
| 12 |  | 
|---|
| 13 | enum ucode_state { | 
|---|
| 14 | UCODE_OK	= 0, | 
|---|
| 15 | UCODE_NEW, | 
|---|
| 16 | UCODE_NEW_SAFE, | 
|---|
| 17 | UCODE_UPDATED, | 
|---|
| 18 | UCODE_NFOUND, | 
|---|
| 19 | UCODE_ERROR, | 
|---|
| 20 | UCODE_TIMEOUT, | 
|---|
| 21 | UCODE_OFFLINE, | 
|---|
| 22 | }; | 
|---|
| 23 |  | 
|---|
| 24 | struct microcode_ops { | 
|---|
| 25 | enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); | 
|---|
| 26 | void (*microcode_fini_cpu)(int cpu); | 
|---|
| 27 |  | 
|---|
| 28 | /* | 
|---|
| 29 | * The generic 'microcode_core' part guarantees that the callbacks | 
|---|
| 30 | * below run on a target CPU when they are being called. | 
|---|
| 31 | * See also the "Synchronization" section in microcode_core.c. | 
|---|
| 32 | */ | 
|---|
| 33 | enum ucode_state	(*apply_microcode)(int cpu); | 
|---|
| 34 | int			(*collect_cpu_info)(int cpu, struct cpu_signature *csig); | 
|---|
| 35 | void			(*finalize_late_load)(int result); | 
|---|
| 36 | unsigned int		nmi_safe	: 1, | 
|---|
| 37 | use_nmi		: 1; | 
|---|
| 38 | }; | 
|---|
| 39 |  | 
|---|
| 40 | struct early_load_data { | 
|---|
| 41 | u32 old_rev; | 
|---|
| 42 | u32 new_rev; | 
|---|
| 43 | }; | 
|---|
| 44 |  | 
|---|
| 45 | extern struct early_load_data early_data; | 
|---|
| 46 | extern struct ucode_cpu_info ucode_cpu_info[]; | 
|---|
| 47 | extern u32 microcode_rev[NR_CPUS]; | 
|---|
| 48 | extern u32 base_rev; | 
|---|
| 49 |  | 
|---|
| 50 | struct cpio_data find_microcode_in_initrd(const char *path); | 
|---|
| 51 |  | 
|---|
| 52 | #define MAX_UCODE_COUNT 128 | 
|---|
| 53 |  | 
|---|
| 54 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) | 
|---|
| 55 | #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') | 
|---|
| 56 | #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') | 
|---|
| 57 | #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') | 
|---|
| 58 | #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') | 
|---|
| 59 | #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') | 
|---|
| 60 | #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') | 
|---|
| 61 |  | 
|---|
| 62 | #define CPUID_IS(a, b, c, ebx, ecx, edx)	\ | 
|---|
| 63 | (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) | 
|---|
| 64 |  | 
|---|
| 65 | /* | 
|---|
| 66 | * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. | 
|---|
| 67 | * x86_cpuid_vendor() gets vendor id for BSP. | 
|---|
| 68 | * | 
|---|
| 69 | * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify | 
|---|
| 70 | * coding, we still use x86_cpuid_vendor() to get vendor id for AP. | 
|---|
| 71 | * | 
|---|
| 72 | * x86_cpuid_vendor() gets vendor information directly from CPUID. | 
|---|
| 73 | */ | 
|---|
| 74 | static inline int x86_cpuid_vendor(void) | 
|---|
| 75 | { | 
|---|
| 76 | u32 eax = 0x00000000; | 
|---|
| 77 | u32 ebx, ecx = 0, edx; | 
|---|
| 78 |  | 
|---|
| 79 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); | 
|---|
| 80 |  | 
|---|
| 81 | if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) | 
|---|
| 82 | return X86_VENDOR_INTEL; | 
|---|
| 83 |  | 
|---|
| 84 | if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) | 
|---|
| 85 | return X86_VENDOR_AMD; | 
|---|
| 86 |  | 
|---|
| 87 | return X86_VENDOR_UNKNOWN; | 
|---|
| 88 | } | 
|---|
| 89 |  | 
|---|
| 90 | static inline unsigned int x86_cpuid_family(void) | 
|---|
| 91 | { | 
|---|
| 92 | u32 eax = 0x00000001; | 
|---|
| 93 | u32 ebx, ecx = 0, edx; | 
|---|
| 94 |  | 
|---|
| 95 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); | 
|---|
| 96 |  | 
|---|
| 97 | return x86_family(sig: eax); | 
|---|
| 98 | } | 
|---|
| 99 |  | 
|---|
| 100 | extern bool force_minrev; | 
|---|
| 101 |  | 
|---|
| 102 | #ifdef CONFIG_CPU_SUP_AMD | 
|---|
| 103 | void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); | 
|---|
| 104 | void load_ucode_amd_ap(unsigned int family); | 
|---|
| 105 | void reload_ucode_amd(unsigned int cpu); | 
|---|
| 106 | struct microcode_ops *init_amd_microcode(void); | 
|---|
| 107 | void exit_amd_microcode(void); | 
|---|
| 108 | #else /* CONFIG_CPU_SUP_AMD */ | 
|---|
| 109 | static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } | 
|---|
| 110 | static inline void load_ucode_amd_ap(unsigned int family) { } | 
|---|
| 111 | static inline void reload_ucode_amd(unsigned int cpu) { } | 
|---|
| 112 | static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } | 
|---|
| 113 | static inline void exit_amd_microcode(void) { } | 
|---|
| 114 | #endif /* !CONFIG_CPU_SUP_AMD */ | 
|---|
| 115 |  | 
|---|
| 116 | #ifdef CONFIG_CPU_SUP_INTEL | 
|---|
| 117 | void load_ucode_intel_bsp(struct early_load_data *ed); | 
|---|
| 118 | void load_ucode_intel_ap(void); | 
|---|
| 119 | void reload_ucode_intel(void); | 
|---|
| 120 | struct microcode_ops *init_intel_microcode(void); | 
|---|
| 121 | #else /* CONFIG_CPU_SUP_INTEL */ | 
|---|
| 122 | static inline void load_ucode_intel_bsp(struct early_load_data *ed) { } | 
|---|
| 123 | static inline void load_ucode_intel_ap(void) { } | 
|---|
| 124 | static inline void reload_ucode_intel(void) { } | 
|---|
| 125 | static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } | 
|---|
| 126 | #endif  /* !CONFIG_CPU_SUP_INTEL */ | 
|---|
| 127 |  | 
|---|
| 128 | #define ucode_dbg(fmt, ...)					\ | 
|---|
| 129 | ({								\ | 
|---|
| 130 | if (IS_ENABLED(CONFIG_MICROCODE_DBG))			\ | 
|---|
| 131 | pr_info(fmt, ##__VA_ARGS__);			\ | 
|---|
| 132 | }) | 
|---|
| 133 |  | 
|---|
| 134 | #endif /* _X86_MICROCODE_INTERNAL_H */ | 
|---|
| 135 |  | 
|---|