| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _ASM_X86_MCE_H | 
|---|
| 3 | #define _ASM_X86_MCE_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <uapi/asm/mce.h> | 
|---|
| 6 |  | 
|---|
| 7 | /* | 
|---|
| 8 | * Machine Check support for x86 | 
|---|
| 9 | */ | 
|---|
| 10 |  | 
|---|
| 11 | /* MCG_CAP register defines */ | 
|---|
| 12 | #define MCG_BANKCNT_MASK	0xff         /* Number of Banks */ | 
|---|
| 13 | #define MCG_CTL_P		BIT_ULL(8)   /* MCG_CTL register available */ | 
|---|
| 14 | #define MCG_EXT_P		BIT_ULL(9)   /* Extended registers available */ | 
|---|
| 15 | #define MCG_CMCI_P		BIT_ULL(10)  /* CMCI supported */ | 
|---|
| 16 | #define MCG_SEAM_NR		BIT_ULL(12)  /* MCG_STATUS_SEAM_NR supported */ | 
|---|
| 17 | #define MCG_EXT_CNT_MASK	0xff0000     /* Number of Extended registers */ | 
|---|
| 18 | #define MCG_EXT_CNT_SHIFT	16 | 
|---|
| 19 | #define MCG_EXT_CNT(c)		(((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) | 
|---|
| 20 | #define MCG_SER_P		BIT_ULL(24)  /* MCA recovery/new status bits */ | 
|---|
| 21 | #define MCG_ELOG_P		BIT_ULL(26)  /* Extended error log supported */ | 
|---|
| 22 | #define MCG_LMCE_P		BIT_ULL(27)  /* Local machine check supported */ | 
|---|
| 23 |  | 
|---|
| 24 | /* MCG_STATUS register defines */ | 
|---|
| 25 | #define MCG_STATUS_RIPV		BIT_ULL(0)   /* restart ip valid */ | 
|---|
| 26 | #define MCG_STATUS_EIPV		BIT_ULL(1)   /* ip points to correct instruction */ | 
|---|
| 27 | #define MCG_STATUS_MCIP		BIT_ULL(2)   /* machine check in progress */ | 
|---|
| 28 | #define MCG_STATUS_LMCES	BIT_ULL(3)   /* LMCE signaled */ | 
|---|
| 29 | #define MCG_STATUS_SEAM_NR	BIT_ULL(12)  /* Machine check inside SEAM non-root mode */ | 
|---|
| 30 |  | 
|---|
| 31 | /* MCG_EXT_CTL register defines */ | 
|---|
| 32 | #define MCG_EXT_CTL_LMCE_EN	BIT_ULL(0) /* Enable LMCE */ | 
|---|
| 33 |  | 
|---|
| 34 | /* MCi_STATUS register defines */ | 
|---|
| 35 | #define MCI_STATUS_VAL		BIT_ULL(63)  /* valid error */ | 
|---|
| 36 | #define MCI_STATUS_OVER		BIT_ULL(62)  /* previous errors lost */ | 
|---|
| 37 | #define MCI_STATUS_UC		BIT_ULL(61)  /* uncorrected error */ | 
|---|
| 38 | #define MCI_STATUS_EN		BIT_ULL(60)  /* error enabled */ | 
|---|
| 39 | #define MCI_STATUS_MISCV	BIT_ULL(59)  /* misc error reg. valid */ | 
|---|
| 40 | #define MCI_STATUS_ADDRV	BIT_ULL(58)  /* addr reg. valid */ | 
|---|
| 41 | #define MCI_STATUS_PCC		BIT_ULL(57)  /* processor context corrupt */ | 
|---|
| 42 | #define MCI_STATUS_S		BIT_ULL(56)  /* Signaled machine check */ | 
|---|
| 43 | #define MCI_STATUS_AR		BIT_ULL(55)  /* Action required */ | 
|---|
| 44 | #define MCI_STATUS_CEC_SHIFT	38           /* Corrected Error Count */ | 
|---|
| 45 | #define MCI_STATUS_CEC_MASK	GENMASK_ULL(52,38) | 
|---|
| 46 | #define MCI_STATUS_CEC(c)	(((c) & MCI_STATUS_CEC_MASK) >> MCI_STATUS_CEC_SHIFT) | 
|---|
| 47 | #define MCI_STATUS_MSCOD(m)	(((m) >> 16) & 0xffff) | 
|---|
| 48 |  | 
|---|
| 49 | /* AMD-specific bits */ | 
|---|
| 50 | #define MCI_STATUS_TCC		BIT_ULL(55)  /* Task context corrupt */ | 
|---|
| 51 | #define MCI_STATUS_SYNDV	BIT_ULL(53)  /* synd reg. valid */ | 
|---|
| 52 | #define MCI_STATUS_DEFERRED	BIT_ULL(44)  /* uncorrected error, deferred exception */ | 
|---|
| 53 | #define MCI_STATUS_POISON	BIT_ULL(43)  /* access poisonous data */ | 
|---|
| 54 | #define MCI_STATUS_SCRUB	BIT_ULL(40)  /* Error detected during scrub operation */ | 
|---|
| 55 |  | 
|---|
| 56 | /* | 
|---|
| 57 | * McaX field if set indicates a given bank supports MCA extensions: | 
|---|
| 58 | *  - Deferred error interrupt type is specifiable by bank. | 
|---|
| 59 | *  - MCx_MISC0[BlkPtr] field indicates presence of extended MISC registers, | 
|---|
| 60 | *    But should not be used to determine MSR numbers. | 
|---|
| 61 | *  - TCC bit is present in MCx_STATUS. | 
|---|
| 62 | */ | 
|---|
| 63 | #define MCI_CONFIG_MCAX		0x1 | 
|---|
| 64 | #define MCI_CONFIG_FRUTEXT	BIT_ULL(9) | 
|---|
| 65 | #define MCI_IPID_MCATYPE	0xFFFF0000 | 
|---|
| 66 | #define MCI_IPID_HWID		0xFFF | 
|---|
| 67 |  | 
|---|
| 68 | /* | 
|---|
| 69 | * Note that the full MCACOD field of IA32_MCi_STATUS MSR is | 
|---|
| 70 | * bits 15:0.  But bit 12 is the 'F' bit, defined for corrected | 
|---|
| 71 | * errors to indicate that errors are being filtered by hardware. | 
|---|
| 72 | * We should mask out bit 12 when looking for specific signatures | 
|---|
| 73 | * of uncorrected errors - so the F bit is deliberately skipped | 
|---|
| 74 | * in this #define. | 
|---|
| 75 | */ | 
|---|
| 76 | #define MCACOD		  0xefff     /* MCA Error Code */ | 
|---|
| 77 |  | 
|---|
| 78 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | 
|---|
| 79 | #define MCACOD_SCRUB	0x00C0	/* 0xC0-0xCF Memory Scrubbing */ | 
|---|
| 80 | #define MCACOD_SCRUBMSK	0xeff0	/* Skip bit 12 ('F' bit) */ | 
|---|
| 81 | #define MCACOD_L3WB	0x017A	/* L3 Explicit Writeback */ | 
|---|
| 82 | #define MCACOD_DATA	0x0134	/* Data Load */ | 
|---|
| 83 | #define MCACOD_INSTR	0x0150	/* Instruction Fetch */ | 
|---|
| 84 |  | 
|---|
| 85 | /* MCi_MISC register defines */ | 
|---|
| 86 | #define MCI_MISC_ADDR_LSB(m)	((m) & 0x3f) | 
|---|
| 87 | #define MCI_MISC_ADDR_MODE(m)	(((m) >> 6) & 7) | 
|---|
| 88 | #define  MCI_MISC_ADDR_SEGOFF	0	/* segment offset */ | 
|---|
| 89 | #define  MCI_MISC_ADDR_LINEAR	1	/* linear address */ | 
|---|
| 90 | #define  MCI_MISC_ADDR_PHYS	2	/* physical address */ | 
|---|
| 91 | #define  MCI_MISC_ADDR_MEM	3	/* memory address */ | 
|---|
| 92 | #define  MCI_MISC_ADDR_GENERIC	7	/* generic */ | 
|---|
| 93 |  | 
|---|
| 94 | /* MCi_ADDR register defines */ | 
|---|
| 95 | #define MCI_ADDR_PHYSADDR	GENMASK_ULL(boot_cpu_data.x86_phys_bits - 1, 0) | 
|---|
| 96 |  | 
|---|
| 97 | /* CTL2 register defines */ | 
|---|
| 98 | #define MCI_CTL2_CMCI_EN		BIT_ULL(30) | 
|---|
| 99 | #define MCI_CTL2_CMCI_THRESHOLD_MASK	0x7fffULL | 
|---|
| 100 |  | 
|---|
| 101 | #define MCJ_CTX_MASK		3 | 
|---|
| 102 | #define MCJ_CTX(flags)		((flags) & MCJ_CTX_MASK) | 
|---|
| 103 | #define MCJ_CTX_RANDOM		0    /* inject context: random */ | 
|---|
| 104 | #define MCJ_CTX_PROCESS		0x1  /* inject context: process */ | 
|---|
| 105 | #define MCJ_CTX_IRQ		0x2  /* inject context: IRQ */ | 
|---|
| 106 | #define MCJ_NMI_BROADCAST	0x4  /* do NMI broadcasting */ | 
|---|
| 107 | #define MCJ_EXCEPTION		0x8  /* raise as exception */ | 
|---|
| 108 | #define MCJ_IRQ_BROADCAST	0x10 /* do IRQ broadcasting */ | 
|---|
| 109 |  | 
|---|
| 110 | #define MCE_OVERFLOW 0		/* bit 0 in flags means overflow */ | 
|---|
| 111 |  | 
|---|
| 112 | #define MCE_LOG_MIN_LEN 32U | 
|---|
| 113 | #define MCE_LOG_SIGNATURE	"MACHINECHECK" | 
|---|
| 114 |  | 
|---|
| 115 | /* AMD Scalable MCA */ | 
|---|
| 116 | #define MSR_AMD64_SMCA_MC0_CTL		0xc0002000 | 
|---|
| 117 | #define MSR_AMD64_SMCA_MC0_STATUS	0xc0002001 | 
|---|
| 118 | #define MSR_AMD64_SMCA_MC0_ADDR		0xc0002002 | 
|---|
| 119 | #define MSR_AMD64_SMCA_MC0_MISC0	0xc0002003 | 
|---|
| 120 | #define MSR_AMD64_SMCA_MC0_CONFIG	0xc0002004 | 
|---|
| 121 | #define MSR_AMD64_SMCA_MC0_IPID		0xc0002005 | 
|---|
| 122 | #define MSR_AMD64_SMCA_MC0_SYND		0xc0002006 | 
|---|
| 123 | #define MSR_AMD64_SMCA_MC0_DESTAT	0xc0002008 | 
|---|
| 124 | #define MSR_AMD64_SMCA_MC0_DEADDR	0xc0002009 | 
|---|
| 125 | #define MSR_AMD64_SMCA_MC0_MISC1	0xc000200a | 
|---|
| 126 | /* Registers MISC2 to MISC4 are at offsets B to D. */ | 
|---|
| 127 | #define MSR_AMD64_SMCA_MC0_SYND1	0xc000200e | 
|---|
| 128 | #define MSR_AMD64_SMCA_MC0_SYND2	0xc000200f | 
|---|
| 129 | #define MSR_AMD64_SMCA_MCx_CTL(x)	(MSR_AMD64_SMCA_MC0_CTL + 0x10*(x)) | 
|---|
| 130 | #define MSR_AMD64_SMCA_MCx_STATUS(x)	(MSR_AMD64_SMCA_MC0_STATUS + 0x10*(x)) | 
|---|
| 131 | #define MSR_AMD64_SMCA_MCx_ADDR(x)	(MSR_AMD64_SMCA_MC0_ADDR + 0x10*(x)) | 
|---|
| 132 | #define MSR_AMD64_SMCA_MCx_MISC(x)	(MSR_AMD64_SMCA_MC0_MISC0 + 0x10*(x)) | 
|---|
| 133 | #define MSR_AMD64_SMCA_MCx_CONFIG(x)	(MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x)) | 
|---|
| 134 | #define MSR_AMD64_SMCA_MCx_IPID(x)	(MSR_AMD64_SMCA_MC0_IPID + 0x10*(x)) | 
|---|
| 135 | #define MSR_AMD64_SMCA_MCx_SYND(x)	(MSR_AMD64_SMCA_MC0_SYND + 0x10*(x)) | 
|---|
| 136 | #define MSR_AMD64_SMCA_MCx_DESTAT(x)	(MSR_AMD64_SMCA_MC0_DESTAT + 0x10*(x)) | 
|---|
| 137 | #define MSR_AMD64_SMCA_MCx_DEADDR(x)	(MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x)) | 
|---|
| 138 | #define MSR_AMD64_SMCA_MCx_MISCy(x, y)	((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x))) | 
|---|
| 139 | #define MSR_AMD64_SMCA_MCx_SYND1(x)	(MSR_AMD64_SMCA_MC0_SYND1 + 0x10*(x)) | 
|---|
| 140 | #define MSR_AMD64_SMCA_MCx_SYND2(x)	(MSR_AMD64_SMCA_MC0_SYND2 + 0x10*(x)) | 
|---|
| 141 |  | 
|---|
| 142 | #define XEC(x, mask)			(((x) >> 16) & mask) | 
|---|
| 143 |  | 
|---|
| 144 | /* mce.kflags flag bits for logging etc. */ | 
|---|
| 145 | #define	MCE_HANDLED_CEC		BIT_ULL(0) | 
|---|
| 146 | #define	MCE_HANDLED_UC		BIT_ULL(1) | 
|---|
| 147 | #define	MCE_HANDLED_EXTLOG	BIT_ULL(2) | 
|---|
| 148 | #define	MCE_HANDLED_NFIT	BIT_ULL(3) | 
|---|
| 149 | #define	MCE_HANDLED_EDAC	BIT_ULL(4) | 
|---|
| 150 | #define	MCE_HANDLED_MCELOG	BIT_ULL(5) | 
|---|
| 151 |  | 
|---|
| 152 | /* | 
|---|
| 153 | * Indicates an MCE which has happened in kernel space but from | 
|---|
| 154 | * which the kernel can recover simply by executing fixup_exception() | 
|---|
| 155 | * so that an error is returned to the caller of the function that | 
|---|
| 156 | * hit the machine check. | 
|---|
| 157 | */ | 
|---|
| 158 | #define MCE_IN_KERNEL_RECOV	BIT_ULL(6) | 
|---|
| 159 |  | 
|---|
| 160 | /* | 
|---|
| 161 | * Indicates an MCE that happened in kernel space while copying data | 
|---|
| 162 | * from user. In this case fixup_exception() gets the kernel to the | 
|---|
| 163 | * error exit for the copy function. Machine check handler can then | 
|---|
| 164 | * treat it like a fault taken in user mode. | 
|---|
| 165 | */ | 
|---|
| 166 | #define MCE_IN_KERNEL_COPYIN	BIT_ULL(7) | 
|---|
| 167 |  | 
|---|
| 168 | /* | 
|---|
| 169 | * This structure contains all data related to the MCE log.  Also | 
|---|
| 170 | * carries a signature to make it easier to find from external | 
|---|
| 171 | * debugging tools.  Each entry is only valid when its finished flag | 
|---|
| 172 | * is set. | 
|---|
| 173 | */ | 
|---|
| 174 | struct mce_log_buffer { | 
|---|
| 175 | char signature[12]; /* "MACHINECHECK" */ | 
|---|
| 176 | unsigned len;	    /* = elements in .mce_entry[] */ | 
|---|
| 177 | unsigned next; | 
|---|
| 178 | unsigned flags; | 
|---|
| 179 | unsigned recordlen;	/* length of struct mce */ | 
|---|
| 180 | struct mce entry[]; | 
|---|
| 181 | }; | 
|---|
| 182 |  | 
|---|
| 183 | /* Highest last */ | 
|---|
| 184 | enum mce_notifier_prios { | 
|---|
| 185 | MCE_PRIO_LOWEST, | 
|---|
| 186 | MCE_PRIO_MCELOG, | 
|---|
| 187 | MCE_PRIO_EDAC, | 
|---|
| 188 | MCE_PRIO_NFIT, | 
|---|
| 189 | MCE_PRIO_EXTLOG, | 
|---|
| 190 | MCE_PRIO_UC, | 
|---|
| 191 | MCE_PRIO_EARLY, | 
|---|
| 192 | MCE_PRIO_CEC, | 
|---|
| 193 | MCE_PRIO_HIGHEST = MCE_PRIO_CEC | 
|---|
| 194 | }; | 
|---|
| 195 |  | 
|---|
| 196 | /** | 
|---|
| 197 | * struct mce_hw_err - Hardware Error Record. | 
|---|
| 198 | * @m:		Machine Check record. | 
|---|
| 199 | * @vendor:	Vendor-specific error information. | 
|---|
| 200 | * | 
|---|
| 201 | * Vendor-specific fields should not be added to struct mce. Instead, vendors | 
|---|
| 202 | * should export their vendor-specific data through their structure in the | 
|---|
| 203 | * vendor union below. | 
|---|
| 204 | * | 
|---|
| 205 | * AMD's vendor data is parsed by error decoding tools for supplemental error | 
|---|
| 206 | * information. Thus, current offsets of existing fields must be maintained. | 
|---|
| 207 | * Only add new fields at the end of AMD's vendor structure. | 
|---|
| 208 | */ | 
|---|
| 209 | struct mce_hw_err { | 
|---|
| 210 | struct mce m; | 
|---|
| 211 |  | 
|---|
| 212 | union vendor_info { | 
|---|
| 213 | struct { | 
|---|
| 214 | u64 synd1;		/* MCA_SYND1 MSR */ | 
|---|
| 215 | u64 synd2;		/* MCA_SYND2 MSR */ | 
|---|
| 216 | } amd; | 
|---|
| 217 | } vendor; | 
|---|
| 218 | }; | 
|---|
| 219 |  | 
|---|
| 220 | #define	to_mce_hw_err(mce) container_of(mce, struct mce_hw_err, m) | 
|---|
| 221 |  | 
|---|
| 222 | struct notifier_block; | 
|---|
| 223 | extern void mce_register_decode_chain(struct notifier_block *nb); | 
|---|
| 224 | extern void mce_unregister_decode_chain(struct notifier_block *nb); | 
|---|
| 225 |  | 
|---|
| 226 | #include <linux/percpu.h> | 
|---|
| 227 | #include <linux/atomic.h> | 
|---|
| 228 |  | 
|---|
| 229 | extern int mce_p5_enabled; | 
|---|
| 230 |  | 
|---|
| 231 | #ifdef CONFIG_ARCH_HAS_COPY_MC | 
|---|
| 232 | extern void enable_copy_mc_fragile(void); | 
|---|
| 233 | unsigned long __must_check copy_mc_fragile(void *dst, const void *src, unsigned cnt); | 
|---|
| 234 | #else | 
|---|
| 235 | static inline void enable_copy_mc_fragile(void) | 
|---|
| 236 | { | 
|---|
| 237 | } | 
|---|
| 238 | #endif | 
|---|
| 239 |  | 
|---|
| 240 | struct cper_ia_proc_ctx; | 
|---|
| 241 |  | 
|---|
| 242 | #ifdef CONFIG_X86_MCE | 
|---|
| 243 | int mcheck_init(void); | 
|---|
| 244 | void mca_bsp_init(struct cpuinfo_x86 *c); | 
|---|
| 245 | void mcheck_cpu_init(struct cpuinfo_x86 *c); | 
|---|
| 246 | void mcheck_cpu_clear(struct cpuinfo_x86 *c); | 
|---|
| 247 | int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, | 
|---|
| 248 | u64 lapic_id); | 
|---|
| 249 | #else | 
|---|
| 250 | static inline int mcheck_init(void) { return 0; } | 
|---|
| 251 | static inline void mca_bsp_init(struct cpuinfo_x86 *c) {} | 
|---|
| 252 | static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} | 
|---|
| 253 | static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {} | 
|---|
| 254 | static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, | 
|---|
| 255 | u64 lapic_id) { return -EINVAL; } | 
|---|
| 256 | #endif | 
|---|
| 257 |  | 
|---|
| 258 | void mce_prep_record(struct mce_hw_err *err); | 
|---|
| 259 | void mce_log(struct mce_hw_err *err); | 
|---|
| 260 | DECLARE_PER_CPU(struct device *, mce_device); | 
|---|
| 261 |  | 
|---|
| 262 | /* Maximum number of MCA banks per CPU. */ | 
|---|
| 263 | #define MAX_NR_BANKS 64 | 
|---|
| 264 |  | 
|---|
| 265 | #ifdef CONFIG_X86_MCE_INTEL | 
|---|
| 266 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 
|---|
| 267 | void mce_intel_feature_clear(struct cpuinfo_x86 *c); | 
|---|
| 268 | void cmci_clear(void); | 
|---|
| 269 | void cmci_reenable(void); | 
|---|
| 270 | void cmci_rediscover(void); | 
|---|
| 271 | void cmci_recheck(void); | 
|---|
| 272 | #else | 
|---|
| 273 | static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } | 
|---|
| 274 | static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { } | 
|---|
| 275 | static inline void cmci_clear(void) {} | 
|---|
| 276 | static inline void cmci_reenable(void) {} | 
|---|
| 277 | static inline void cmci_rediscover(void) {} | 
|---|
| 278 | static inline void cmci_recheck(void) {} | 
|---|
| 279 | #endif | 
|---|
| 280 |  | 
|---|
| 281 | bool mce_available(struct cpuinfo_x86 *c); | 
|---|
| 282 | bool mce_is_memory_error(struct mce *m); | 
|---|
| 283 | bool mce_is_correctable(struct mce *m); | 
|---|
| 284 | bool mce_usable_address(struct mce *m); | 
|---|
| 285 |  | 
|---|
| 286 | DECLARE_PER_CPU(unsigned, mce_exception_count); | 
|---|
| 287 | DECLARE_PER_CPU(unsigned, mce_poll_count); | 
|---|
| 288 |  | 
|---|
| 289 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); | 
|---|
| 290 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); | 
|---|
| 291 |  | 
|---|
| 292 | enum mcp_flags { | 
|---|
| 293 | MCP_TIMESTAMP	= BIT(0),	/* log time stamp */ | 
|---|
| 294 | MCP_UC		= BIT(1),	/* log uncorrected errors */ | 
|---|
| 295 | MCP_QUEUE_LOG	= BIT(2),	/* only queue to genpool */ | 
|---|
| 296 | }; | 
|---|
| 297 |  | 
|---|
| 298 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); | 
|---|
| 299 |  | 
|---|
| 300 | DECLARE_PER_CPU(struct mce, injectm); | 
|---|
| 301 |  | 
|---|
| 302 | /* Disable CMCI/polling for MCA bank claimed by firmware */ | 
|---|
| 303 | extern void mce_disable_bank(int bank); | 
|---|
| 304 |  | 
|---|
| 305 | /* | 
|---|
| 306 | * Exception handler | 
|---|
| 307 | */ | 
|---|
| 308 | void do_machine_check(struct pt_regs *pt_regs); | 
|---|
| 309 |  | 
|---|
| 310 | /* | 
|---|
| 311 | * Threshold handler | 
|---|
| 312 | */ | 
|---|
| 313 | extern void (*mce_threshold_vector)(void); | 
|---|
| 314 |  | 
|---|
| 315 | /* Deferred error interrupt handler */ | 
|---|
| 316 | extern void (*deferred_error_int_vector)(void); | 
|---|
| 317 |  | 
|---|
| 318 | /* | 
|---|
| 319 | * Used by APEI to report memory error via /dev/mcelog | 
|---|
| 320 | */ | 
|---|
| 321 |  | 
|---|
| 322 | struct cper_sec_mem_err; | 
|---|
| 323 | extern void apei_mce_report_mem_error(int corrected, | 
|---|
| 324 | struct cper_sec_mem_err *mem_err); | 
|---|
| 325 |  | 
|---|
| 326 | /* | 
|---|
| 327 | * Enumerate new IP types and HWID values in AMD processors which support | 
|---|
| 328 | * Scalable MCA. | 
|---|
| 329 | */ | 
|---|
| 330 | #ifdef CONFIG_X86_MCE_AMD | 
|---|
| 331 |  | 
|---|
| 332 | /* These may be used by multiple smca_hwid_mcatypes */ | 
|---|
| 333 | enum smca_bank_types { | 
|---|
| 334 | SMCA_LS = 0,	/* Load Store */ | 
|---|
| 335 | SMCA_LS_V2, | 
|---|
| 336 | SMCA_IF,	/* Instruction Fetch */ | 
|---|
| 337 | SMCA_L2_CACHE,	/* L2 Cache */ | 
|---|
| 338 | SMCA_DE,	/* Decoder Unit */ | 
|---|
| 339 | SMCA_RESERVED,	/* Reserved */ | 
|---|
| 340 | SMCA_EX,	/* Execution Unit */ | 
|---|
| 341 | SMCA_FP,	/* Floating Point */ | 
|---|
| 342 | SMCA_L3_CACHE,	/* L3 Cache */ | 
|---|
| 343 | SMCA_CS,	/* Coherent Slave */ | 
|---|
| 344 | SMCA_CS_V2, | 
|---|
| 345 | SMCA_PIE,	/* Power, Interrupts, etc. */ | 
|---|
| 346 | SMCA_UMC,	/* Unified Memory Controller */ | 
|---|
| 347 | SMCA_UMC_V2, | 
|---|
| 348 | SMCA_MA_LLC,	/* Memory Attached Last Level Cache */ | 
|---|
| 349 | SMCA_PB,	/* Parameter Block */ | 
|---|
| 350 | SMCA_PSP,	/* Platform Security Processor */ | 
|---|
| 351 | SMCA_PSP_V2, | 
|---|
| 352 | SMCA_SMU,	/* System Management Unit */ | 
|---|
| 353 | SMCA_SMU_V2, | 
|---|
| 354 | SMCA_MP5,	/* Microprocessor 5 Unit */ | 
|---|
| 355 | SMCA_MPDMA,	/* MPDMA Unit */ | 
|---|
| 356 | SMCA_NBIO,	/* Northbridge IO Unit */ | 
|---|
| 357 | SMCA_PCIE,	/* PCI Express Unit */ | 
|---|
| 358 | SMCA_PCIE_V2, | 
|---|
| 359 | SMCA_XGMI_PCS,	/* xGMI PCS Unit */ | 
|---|
| 360 | SMCA_NBIF,	/* NBIF Unit */ | 
|---|
| 361 | SMCA_SHUB,	/* System HUB Unit */ | 
|---|
| 362 | SMCA_SATA,	/* SATA Unit */ | 
|---|
| 363 | SMCA_USB,	/* USB Unit */ | 
|---|
| 364 | SMCA_USR_DP,	/* Ultra Short Reach Data Plane Controller */ | 
|---|
| 365 | SMCA_USR_CP,	/* Ultra Short Reach Control Plane Controller */ | 
|---|
| 366 | SMCA_GMI_PCS,	/* GMI PCS Unit */ | 
|---|
| 367 | SMCA_XGMI_PHY,	/* xGMI PHY Unit */ | 
|---|
| 368 | SMCA_WAFL_PHY,	/* WAFL PHY Unit */ | 
|---|
| 369 | SMCA_GMI_PHY,	/* GMI PHY Unit */ | 
|---|
| 370 | N_SMCA_BANK_TYPES | 
|---|
| 371 | }; | 
|---|
| 372 |  | 
|---|
| 373 | extern bool amd_mce_is_memory_error(struct mce *m); | 
|---|
| 374 |  | 
|---|
| 375 | void mce_amd_feature_init(struct cpuinfo_x86 *c); | 
|---|
| 376 | enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank); | 
|---|
| 377 | #else | 
|---|
| 378 | static inline bool amd_mce_is_memory_error(struct mce *m)		{ return false; }; | 
|---|
| 379 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)		{ } | 
|---|
| 380 | #endif | 
|---|
| 381 |  | 
|---|
| 382 | unsigned long copy_mc_fragile_handle_tail(char *to, char *from, unsigned len); | 
|---|
| 383 |  | 
|---|
| 384 | #endif /* _ASM_X86_MCE_H */ | 
|---|
| 385 |  | 
|---|