| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Per core/cpu state | 
|---|
| 4 | * | 
|---|
| 5 | * Used to coordinate shared registers between HT threads or | 
|---|
| 6 | * among events on a single PMU. | 
|---|
| 7 | */ | 
|---|
| 8 |  | 
|---|
| 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|---|
| 10 |  | 
|---|
| 11 | #include <linux/stddef.h> | 
|---|
| 12 | #include <linux/types.h> | 
|---|
| 13 | #include <linux/init.h> | 
|---|
| 14 | #include <linux/slab.h> | 
|---|
| 15 | #include <linux/export.h> | 
|---|
| 16 | #include <linux/nmi.h> | 
|---|
| 17 | #include <linux/kvm_host.h> | 
|---|
| 18 |  | 
|---|
| 19 | #include <asm/cpufeature.h> | 
|---|
| 20 | #include <asm/debugreg.h> | 
|---|
| 21 | #include <asm/hardirq.h> | 
|---|
| 22 | #include <asm/intel-family.h> | 
|---|
| 23 | #include <asm/intel_pt.h> | 
|---|
| 24 | #include <asm/apic.h> | 
|---|
| 25 | #include <asm/cpu_device_id.h> | 
|---|
| 26 | #include <asm/msr.h> | 
|---|
| 27 |  | 
|---|
| 28 | #include "../perf_event.h" | 
|---|
| 29 |  | 
|---|
| 30 | /* | 
|---|
| 31 | * Intel PerfMon, used on Core and later. | 
|---|
| 32 | */ | 
|---|
| 33 | static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = | 
|---|
| 34 | { | 
|---|
| 35 | [PERF_COUNT_HW_CPU_CYCLES]		= 0x003c, | 
|---|
| 36 | [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0, | 
|---|
| 37 | [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x4f2e, | 
|---|
| 38 | [PERF_COUNT_HW_CACHE_MISSES]		= 0x412e, | 
|---|
| 39 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4, | 
|---|
| 40 | [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5, | 
|---|
| 41 | [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c, | 
|---|
| 42 | [PERF_COUNT_HW_REF_CPU_CYCLES]		= 0x0300, /* pseudo-encoding */ | 
|---|
| 43 | }; | 
|---|
| 44 |  | 
|---|
| 45 | static struct event_constraint intel_core_event_constraints[] __read_mostly = | 
|---|
| 46 | { | 
|---|
| 47 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | 
|---|
| 48 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | 
|---|
| 49 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | 
|---|
| 50 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | 
|---|
| 51 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | 
|---|
| 52 | INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ | 
|---|
| 53 | EVENT_CONSTRAINT_END | 
|---|
| 54 | }; | 
|---|
| 55 |  | 
|---|
| 56 | static struct event_constraint intel_core2_event_constraints[] __read_mostly = | 
|---|
| 57 | { | 
|---|
| 58 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 59 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 60 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 61 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | 
|---|
| 62 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | 
|---|
| 63 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | 
|---|
| 64 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | 
|---|
| 65 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | 
|---|
| 66 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | 
|---|
| 67 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | 
|---|
| 68 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | 
|---|
| 69 | INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ | 
|---|
| 70 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ | 
|---|
| 71 | EVENT_CONSTRAINT_END | 
|---|
| 72 | }; | 
|---|
| 73 |  | 
|---|
| 74 | static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = | 
|---|
| 75 | { | 
|---|
| 76 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 77 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 78 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 79 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | 
|---|
| 80 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | 
|---|
| 81 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | 
|---|
| 82 | INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | 
|---|
| 83 | INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ | 
|---|
| 84 | INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | 
|---|
| 85 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | 
|---|
| 86 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | 
|---|
| 87 | EVENT_CONSTRAINT_END | 
|---|
| 88 | }; | 
|---|
| 89 |  | 
|---|
| 90 | static struct extra_reg [] __read_mostly = | 
|---|
| 91 | { | 
|---|
| 92 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 93 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), | 
|---|
| 94 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), | 
|---|
| 95 | EVENT_EXTRA_END | 
|---|
| 96 | }; | 
|---|
| 97 |  | 
|---|
| 98 | static struct event_constraint intel_westmere_event_constraints[] __read_mostly = | 
|---|
| 99 | { | 
|---|
| 100 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 101 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 102 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 103 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | 
|---|
| 104 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | 
|---|
| 105 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | 
|---|
| 106 | INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ | 
|---|
| 107 | EVENT_CONSTRAINT_END | 
|---|
| 108 | }; | 
|---|
| 109 |  | 
|---|
| 110 | static struct event_constraint intel_snb_event_constraints[] __read_mostly = | 
|---|
| 111 | { | 
|---|
| 112 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 113 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 114 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 115 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ | 
|---|
| 116 | INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ | 
|---|
| 117 | INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 
|---|
| 118 | INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | 
|---|
| 119 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | 
|---|
| 120 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 
|---|
| 121 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 
|---|
| 122 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ | 
|---|
| 123 | INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 
|---|
| 124 |  | 
|---|
| 125 | /* | 
|---|
| 126 | * When HT is off these events can only run on the bottom 4 counters | 
|---|
| 127 | * When HT is on, they are impacted by the HT bug and require EXCL access | 
|---|
| 128 | */ | 
|---|
| 129 | INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ | 
|---|
| 130 | INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 
|---|
| 131 | INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 
|---|
| 132 | INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ | 
|---|
| 133 |  | 
|---|
| 134 | EVENT_CONSTRAINT_END | 
|---|
| 135 | }; | 
|---|
| 136 |  | 
|---|
| 137 | static struct event_constraint intel_ivb_event_constraints[] __read_mostly = | 
|---|
| 138 | { | 
|---|
| 139 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 140 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 141 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 142 | INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ | 
|---|
| 143 | INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ | 
|---|
| 144 | INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ | 
|---|
| 145 | INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ | 
|---|
| 146 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ | 
|---|
| 147 | INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ | 
|---|
| 148 | INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */ | 
|---|
| 149 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 
|---|
| 150 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | 
|---|
| 151 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 
|---|
| 152 |  | 
|---|
| 153 | /* | 
|---|
| 154 | * When HT is off these events can only run on the bottom 4 counters | 
|---|
| 155 | * When HT is on, they are impacted by the HT bug and require EXCL access | 
|---|
| 156 | */ | 
|---|
| 157 | INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ | 
|---|
| 158 | INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 
|---|
| 159 | INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 
|---|
| 160 | INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ | 
|---|
| 161 |  | 
|---|
| 162 | EVENT_CONSTRAINT_END | 
|---|
| 163 | }; | 
|---|
| 164 |  | 
|---|
| 165 | static struct extra_reg [] __read_mostly = | 
|---|
| 166 | { | 
|---|
| 167 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 168 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), | 
|---|
| 169 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), | 
|---|
| 170 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), | 
|---|
| 171 | EVENT_EXTRA_END | 
|---|
| 172 | }; | 
|---|
| 173 |  | 
|---|
| 174 | static struct event_constraint intel_v1_event_constraints[] __read_mostly = | 
|---|
| 175 | { | 
|---|
| 176 | EVENT_CONSTRAINT_END | 
|---|
| 177 | }; | 
|---|
| 178 |  | 
|---|
| 179 | static struct event_constraint intel_gen_event_constraints[] __read_mostly = | 
|---|
| 180 | { | 
|---|
| 181 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 182 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 183 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 184 | EVENT_CONSTRAINT_END | 
|---|
| 185 | }; | 
|---|
| 186 |  | 
|---|
| 187 | static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly = | 
|---|
| 188 | { | 
|---|
| 189 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 190 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 191 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 192 | FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ | 
|---|
| 193 | FIXED_EVENT_CONSTRAINT(0x0500, 4), | 
|---|
| 194 | FIXED_EVENT_CONSTRAINT(0x0600, 5), | 
|---|
| 195 | FIXED_EVENT_CONSTRAINT(0x0700, 6), | 
|---|
| 196 | FIXED_EVENT_CONSTRAINT(0x0800, 7), | 
|---|
| 197 | FIXED_EVENT_CONSTRAINT(0x0900, 8), | 
|---|
| 198 | FIXED_EVENT_CONSTRAINT(0x0a00, 9), | 
|---|
| 199 | FIXED_EVENT_CONSTRAINT(0x0b00, 10), | 
|---|
| 200 | FIXED_EVENT_CONSTRAINT(0x0c00, 11), | 
|---|
| 201 | FIXED_EVENT_CONSTRAINT(0x0d00, 12), | 
|---|
| 202 | FIXED_EVENT_CONSTRAINT(0x0e00, 13), | 
|---|
| 203 | FIXED_EVENT_CONSTRAINT(0x0f00, 14), | 
|---|
| 204 | FIXED_EVENT_CONSTRAINT(0x1000, 15), | 
|---|
| 205 | EVENT_CONSTRAINT_END | 
|---|
| 206 | }; | 
|---|
| 207 |  | 
|---|
| 208 | static struct event_constraint intel_slm_event_constraints[] __read_mostly = | 
|---|
| 209 | { | 
|---|
| 210 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 211 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 212 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ | 
|---|
| 213 | EVENT_CONSTRAINT_END | 
|---|
| 214 | }; | 
|---|
| 215 |  | 
|---|
| 216 | static struct event_constraint intel_grt_event_constraints[] __read_mostly = { | 
|---|
| 217 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 218 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 219 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ | 
|---|
| 220 | FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ | 
|---|
| 221 | EVENT_CONSTRAINT_END | 
|---|
| 222 | }; | 
|---|
| 223 |  | 
|---|
| 224 | static struct event_constraint intel_skt_event_constraints[] __read_mostly = { | 
|---|
| 225 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 226 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 227 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ | 
|---|
| 228 | FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ | 
|---|
| 229 | FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */ | 
|---|
| 230 | FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */ | 
|---|
| 231 | FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */ | 
|---|
| 232 | EVENT_CONSTRAINT_END | 
|---|
| 233 | }; | 
|---|
| 234 |  | 
|---|
| 235 | static struct event_constraint intel_skl_event_constraints[] = { | 
|---|
| 236 | FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */ | 
|---|
| 237 | FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 238 | FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */ | 
|---|
| 239 | INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */ | 
|---|
| 240 |  | 
|---|
| 241 | /* | 
|---|
| 242 | * when HT is off, these can only run on the bottom 4 counters | 
|---|
| 243 | */ | 
|---|
| 244 | INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */ | 
|---|
| 245 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */ | 
|---|
| 246 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */ | 
|---|
| 247 | INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */ | 
|---|
| 248 | INTEL_EVENT_CONSTRAINT(0xc6, 0xf),	/* FRONTEND_RETIRED.* */ | 
|---|
| 249 |  | 
|---|
| 250 | EVENT_CONSTRAINT_END | 
|---|
| 251 | }; | 
|---|
| 252 |  | 
|---|
| 253 | static struct extra_reg [] __read_mostly = { | 
|---|
| 254 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0), | 
|---|
| 255 | INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1), | 
|---|
| 256 | EVENT_EXTRA_END | 
|---|
| 257 | }; | 
|---|
| 258 |  | 
|---|
| 259 | static struct extra_reg [] __read_mostly = { | 
|---|
| 260 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 261 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), | 
|---|
| 262 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), | 
|---|
| 263 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 
|---|
| 264 | EVENT_EXTRA_END | 
|---|
| 265 | }; | 
|---|
| 266 |  | 
|---|
| 267 | static struct extra_reg [] __read_mostly = { | 
|---|
| 268 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 269 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 
|---|
| 270 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 
|---|
| 271 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 
|---|
| 272 | EVENT_EXTRA_END | 
|---|
| 273 | }; | 
|---|
| 274 |  | 
|---|
| 275 | static struct extra_reg [] __read_mostly = { | 
|---|
| 276 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 
|---|
| 277 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 
|---|
| 278 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 
|---|
| 279 | /* | 
|---|
| 280 | * Note the low 8 bits eventsel code is not a continuous field, containing | 
|---|
| 281 | * some #GPing bits. These are masked out. | 
|---|
| 282 | */ | 
|---|
| 283 | INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), | 
|---|
| 284 | EVENT_EXTRA_END | 
|---|
| 285 | }; | 
|---|
| 286 |  | 
|---|
| 287 | static struct event_constraint intel_icl_event_constraints[] = { | 
|---|
| 288 | FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */ | 
|---|
| 289 | FIXED_EVENT_CONSTRAINT(0x01c0, 0),	/* old INST_RETIRED.PREC_DIST */ | 
|---|
| 290 | FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */ | 
|---|
| 291 | FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 292 | FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */ | 
|---|
| 293 | FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */ | 
|---|
| 294 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), | 
|---|
| 295 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), | 
|---|
| 296 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), | 
|---|
| 297 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), | 
|---|
| 298 | INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), | 
|---|
| 299 | INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), | 
|---|
| 300 | INTEL_EVENT_CONSTRAINT(0x32, 0xf),	/* SW_PREFETCH_ACCESS.* */ | 
|---|
| 301 | INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf), | 
|---|
| 302 | INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), | 
|---|
| 303 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */ | 
|---|
| 304 | INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ | 
|---|
| 305 | INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ | 
|---|
| 306 | INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */ | 
|---|
| 307 | INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), | 
|---|
| 308 | INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), | 
|---|
| 309 | INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), | 
|---|
| 310 | INTEL_EVENT_CONSTRAINT(0xef, 0xf), | 
|---|
| 311 | INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), | 
|---|
| 312 | EVENT_CONSTRAINT_END | 
|---|
| 313 | }; | 
|---|
| 314 |  | 
|---|
| 315 | static struct extra_reg [] __read_mostly = { | 
|---|
| 316 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0), | 
|---|
| 317 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1), | 
|---|
| 318 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 
|---|
| 319 | INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), | 
|---|
| 320 | EVENT_EXTRA_END | 
|---|
| 321 | }; | 
|---|
| 322 |  | 
|---|
| 323 | static struct extra_reg [] __read_mostly = { | 
|---|
| 324 | INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), | 
|---|
| 325 | INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), | 
|---|
| 326 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 
|---|
| 327 | INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), | 
|---|
| 328 | INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), | 
|---|
| 329 | INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), | 
|---|
| 330 | EVENT_EXTRA_END | 
|---|
| 331 | }; | 
|---|
| 332 |  | 
|---|
| 333 | static struct event_constraint intel_glc_event_constraints[] = { | 
|---|
| 334 | FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */ | 
|---|
| 335 | FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */ | 
|---|
| 336 | FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 337 | FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */ | 
|---|
| 338 | FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */ | 
|---|
| 339 | FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */ | 
|---|
| 340 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), | 
|---|
| 341 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), | 
|---|
| 342 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), | 
|---|
| 343 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), | 
|---|
| 344 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), | 
|---|
| 345 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), | 
|---|
| 346 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), | 
|---|
| 347 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), | 
|---|
| 348 |  | 
|---|
| 349 | INTEL_EVENT_CONSTRAINT(0x2e, 0xff), | 
|---|
| 350 | INTEL_EVENT_CONSTRAINT(0x3c, 0xff), | 
|---|
| 351 | /* | 
|---|
| 352 | * Generally event codes < 0x90 are restricted to counters 0-3. | 
|---|
| 353 | * The 0x2E and 0x3C are exception, which has no restriction. | 
|---|
| 354 | */ | 
|---|
| 355 | INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), | 
|---|
| 356 |  | 
|---|
| 357 | INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), | 
|---|
| 358 | INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), | 
|---|
| 359 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf), | 
|---|
| 360 | INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), | 
|---|
| 361 | INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), | 
|---|
| 362 | INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1), | 
|---|
| 363 | INTEL_EVENT_CONSTRAINT(0xce, 0x1), | 
|---|
| 364 | INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), | 
|---|
| 365 | /* | 
|---|
| 366 | * Generally event codes >= 0x90 are likely to have no restrictions. | 
|---|
| 367 | * The exception are defined as above. | 
|---|
| 368 | */ | 
|---|
| 369 | INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff), | 
|---|
| 370 |  | 
|---|
| 371 | EVENT_CONSTRAINT_END | 
|---|
| 372 | }; | 
|---|
| 373 |  | 
|---|
| 374 | static struct extra_reg [] __read_mostly = { | 
|---|
| 375 | INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), | 
|---|
| 376 | INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), | 
|---|
| 377 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 
|---|
| 378 | INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), | 
|---|
| 379 | INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), | 
|---|
| 380 | INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), | 
|---|
| 381 | INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), | 
|---|
| 382 | EVENT_EXTRA_END | 
|---|
| 383 | }; | 
|---|
| 384 |  | 
|---|
| 385 | static struct event_constraint intel_lnc_event_constraints[] = { | 
|---|
| 386 | FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */ | 
|---|
| 387 | FIXED_EVENT_CONSTRAINT(0x0100, 0),	/* INST_RETIRED.PREC_DIST */ | 
|---|
| 388 | FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 389 | FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */ | 
|---|
| 390 | FIXED_EVENT_CONSTRAINT(0x013c, 2),	/* CPU_CLK_UNHALTED.REF_TSC_P */ | 
|---|
| 391 | FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */ | 
|---|
| 392 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), | 
|---|
| 393 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), | 
|---|
| 394 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), | 
|---|
| 395 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), | 
|---|
| 396 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), | 
|---|
| 397 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), | 
|---|
| 398 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), | 
|---|
| 399 | METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), | 
|---|
| 400 |  | 
|---|
| 401 | INTEL_EVENT_CONSTRAINT(0x20, 0xf), | 
|---|
| 402 |  | 
|---|
| 403 | INTEL_UEVENT_CONSTRAINT(0x012a, 0xf), | 
|---|
| 404 | INTEL_UEVENT_CONSTRAINT(0x012b, 0xf), | 
|---|
| 405 | INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), | 
|---|
| 406 | INTEL_UEVENT_CONSTRAINT(0x0175, 0x4), | 
|---|
| 407 |  | 
|---|
| 408 | INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff), | 
|---|
| 409 | INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff), | 
|---|
| 410 |  | 
|---|
| 411 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), | 
|---|
| 412 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), | 
|---|
| 413 | INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), | 
|---|
| 414 | INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), | 
|---|
| 415 | INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1), | 
|---|
| 416 | INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8), | 
|---|
| 417 | INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc), | 
|---|
| 418 | INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3), | 
|---|
| 419 |  | 
|---|
| 420 | INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), | 
|---|
| 421 |  | 
|---|
| 422 | INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf), | 
|---|
| 423 |  | 
|---|
| 424 | EVENT_CONSTRAINT_END | 
|---|
| 425 | }; | 
|---|
| 426 |  | 
|---|
| 427 | static struct extra_reg [] __read_mostly = { | 
|---|
| 428 | INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0), | 
|---|
| 429 | INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1), | 
|---|
| 430 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 
|---|
| 431 | INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), | 
|---|
| 432 | INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), | 
|---|
| 433 | INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE), | 
|---|
| 434 | INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), | 
|---|
| 435 | EVENT_EXTRA_END | 
|---|
| 436 | }; | 
|---|
| 437 |  | 
|---|
| 438 | EVENT_ATTR_STR(mem-loads,	mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); | 
|---|
| 439 | EVENT_ATTR_STR(mem-loads,	mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); | 
|---|
| 440 | EVENT_ATTR_STR(mem-stores,	mem_st_snb, "event=0xcd,umask=0x2"); | 
|---|
| 441 |  | 
|---|
| 442 | static struct attribute *nhm_mem_events_attrs[] = { | 
|---|
| 443 | EVENT_PTR(mem_ld_nhm), | 
|---|
| 444 | NULL, | 
|---|
| 445 | }; | 
|---|
| 446 |  | 
|---|
| 447 | /* | 
|---|
| 448 | * topdown events for Intel Core CPUs. | 
|---|
| 449 | * | 
|---|
| 450 | * The events are all in slots, which is a free slot in a 4 wide | 
|---|
| 451 | * pipeline. Some events are already reported in slots, for cycle | 
|---|
| 452 | * events we multiply by the pipeline width (4). | 
|---|
| 453 | * | 
|---|
| 454 | * With Hyper Threading on, topdown metrics are either summed or averaged | 
|---|
| 455 | * between the threads of a core: (count_t0 + count_t1). | 
|---|
| 456 | * | 
|---|
| 457 | * For the average case the metric is always scaled to pipeline width, | 
|---|
| 458 | * so we use factor 2 ((count_t0 + count_t1) / 2 * 4) | 
|---|
| 459 | */ | 
|---|
| 460 |  | 
|---|
| 461 | EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots, | 
|---|
| 462 | "event=0x3c,umask=0x0",			/* cpu_clk_unhalted.thread */ | 
|---|
| 463 | "event=0x3c,umask=0x0,any=1");		/* cpu_clk_unhalted.thread_any */ | 
|---|
| 464 | EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2"); | 
|---|
| 465 | EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued, | 
|---|
| 466 | "event=0xe,umask=0x1");			/* uops_issued.any */ | 
|---|
| 467 | EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired, | 
|---|
| 468 | "event=0xc2,umask=0x2");		/* uops_retired.retire_slots */ | 
|---|
| 469 | EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles, | 
|---|
| 470 | "event=0x9c,umask=0x1");		/* idq_uops_not_delivered_core */ | 
|---|
| 471 | EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, | 
|---|
| 472 | "event=0xd,umask=0x3,cmask=1",		/* int_misc.recovery_cycles */ | 
|---|
| 473 | "event=0xd,umask=0x3,cmask=1,any=1");	/* int_misc.recovery_cycles_any */ | 
|---|
| 474 | EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, | 
|---|
| 475 | "4", "2"); | 
|---|
| 476 |  | 
|---|
| 477 | EVENT_ATTR_STR(slots,			slots, "event=0x00,umask=0x4"); | 
|---|
| 478 | EVENT_ATTR_STR(topdown-retiring,	td_retiring, "event=0x00,umask=0x80"); | 
|---|
| 479 | EVENT_ATTR_STR(topdown-bad-spec,	td_bad_spec, "event=0x00,umask=0x81"); | 
|---|
| 480 | EVENT_ATTR_STR(topdown-fe-bound,	td_fe_bound, "event=0x00,umask=0x82"); | 
|---|
| 481 | EVENT_ATTR_STR(topdown-be-bound,	td_be_bound, "event=0x00,umask=0x83"); | 
|---|
| 482 | EVENT_ATTR_STR(topdown-heavy-ops,	td_heavy_ops, "event=0x00,umask=0x84"); | 
|---|
| 483 | EVENT_ATTR_STR(topdown-br-mispredict,	td_br_mispredict, "event=0x00,umask=0x85"); | 
|---|
| 484 | EVENT_ATTR_STR(topdown-fetch-lat,	td_fetch_lat, "event=0x00,umask=0x86"); | 
|---|
| 485 | EVENT_ATTR_STR(topdown-mem-bound,	td_mem_bound, "event=0x00,umask=0x87"); | 
|---|
| 486 |  | 
|---|
| 487 | static struct attribute *snb_events_attrs[] = { | 
|---|
| 488 | EVENT_PTR(td_slots_issued), | 
|---|
| 489 | EVENT_PTR(td_slots_retired), | 
|---|
| 490 | EVENT_PTR(td_fetch_bubbles), | 
|---|
| 491 | EVENT_PTR(td_total_slots), | 
|---|
| 492 | EVENT_PTR(td_total_slots_scale), | 
|---|
| 493 | EVENT_PTR(td_recovery_bubbles), | 
|---|
| 494 | EVENT_PTR(td_recovery_bubbles_scale), | 
|---|
| 495 | NULL, | 
|---|
| 496 | }; | 
|---|
| 497 |  | 
|---|
| 498 | static struct attribute *snb_mem_events_attrs[] = { | 
|---|
| 499 | EVENT_PTR(mem_ld_snb), | 
|---|
| 500 | EVENT_PTR(mem_st_snb), | 
|---|
| 501 | NULL, | 
|---|
| 502 | }; | 
|---|
| 503 |  | 
|---|
| 504 | static struct event_constraint intel_hsw_event_constraints[] = { | 
|---|
| 505 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 
|---|
| 506 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 507 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 
|---|
| 508 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */ | 
|---|
| 509 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 
|---|
| 510 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 
|---|
| 511 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 
|---|
| 512 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), | 
|---|
| 513 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | 
|---|
| 514 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), | 
|---|
| 515 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ | 
|---|
| 516 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), | 
|---|
| 517 |  | 
|---|
| 518 | /* | 
|---|
| 519 | * When HT is off these events can only run on the bottom 4 counters | 
|---|
| 520 | * When HT is on, they are impacted by the HT bug and require EXCL access | 
|---|
| 521 | */ | 
|---|
| 522 | INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ | 
|---|
| 523 | INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 
|---|
| 524 | INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 
|---|
| 525 | INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ | 
|---|
| 526 |  | 
|---|
| 527 | EVENT_CONSTRAINT_END | 
|---|
| 528 | }; | 
|---|
| 529 |  | 
|---|
| 530 | static struct event_constraint intel_bdw_event_constraints[] = { | 
|---|
| 531 | FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */ | 
|---|
| 532 | FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */ | 
|---|
| 533 | FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */ | 
|---|
| 534 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */ | 
|---|
| 535 | INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ | 
|---|
| 536 | /* | 
|---|
| 537 | * when HT is off, these can only run on the bottom 4 counters | 
|---|
| 538 | */ | 
|---|
| 539 | INTEL_EVENT_CONSTRAINT(0xd0, 0xf),	/* MEM_INST_RETIRED.* */ | 
|---|
| 540 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf),	/* MEM_LOAD_RETIRED.* */ | 
|---|
| 541 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf),	/* MEM_LOAD_L3_HIT_RETIRED.* */ | 
|---|
| 542 | INTEL_EVENT_CONSTRAINT(0xcd, 0xf),	/* MEM_TRANS_RETIRED.* */ | 
|---|
| 543 | EVENT_CONSTRAINT_END | 
|---|
| 544 | }; | 
|---|
| 545 |  | 
|---|
| 546 | static u64 intel_pmu_event_map(int hw_event) | 
|---|
| 547 | { | 
|---|
| 548 | return intel_perfmon_event_map[hw_event]; | 
|---|
| 549 | } | 
|---|
| 550 |  | 
|---|
| 551 | static __initconst const u64 glc_hw_cache_event_ids | 
|---|
| 552 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 553 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 554 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 555 | { | 
|---|
| 556 | [ C(L1D ) ] = { | 
|---|
| 557 | [ C(OP_READ) ] = { | 
|---|
| 558 | [ C(RESULT_ACCESS) ] = 0x81d0, | 
|---|
| 559 | [ C(RESULT_MISS)   ] = 0xe124, | 
|---|
| 560 | }, | 
|---|
| 561 | [ C(OP_WRITE) ] = { | 
|---|
| 562 | [ C(RESULT_ACCESS) ] = 0x82d0, | 
|---|
| 563 | }, | 
|---|
| 564 | }, | 
|---|
| 565 | [ C(L1I ) ] = { | 
|---|
| 566 | [ C(OP_READ) ] = { | 
|---|
| 567 | [ C(RESULT_MISS)   ] = 0xe424, | 
|---|
| 568 | }, | 
|---|
| 569 | [ C(OP_WRITE) ] = { | 
|---|
| 570 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 571 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 572 | }, | 
|---|
| 573 | }, | 
|---|
| 574 | [ C(LL  ) ] = { | 
|---|
| 575 | [ C(OP_READ) ] = { | 
|---|
| 576 | [ C(RESULT_ACCESS) ] = 0x12a, | 
|---|
| 577 | [ C(RESULT_MISS)   ] = 0x12a, | 
|---|
| 578 | }, | 
|---|
| 579 | [ C(OP_WRITE) ] = { | 
|---|
| 580 | [ C(RESULT_ACCESS) ] = 0x12a, | 
|---|
| 581 | [ C(RESULT_MISS)   ] = 0x12a, | 
|---|
| 582 | }, | 
|---|
| 583 | }, | 
|---|
| 584 | [ C(DTLB) ] = { | 
|---|
| 585 | [ C(OP_READ) ] = { | 
|---|
| 586 | [ C(RESULT_ACCESS) ] = 0x81d0, | 
|---|
| 587 | [ C(RESULT_MISS)   ] = 0xe12, | 
|---|
| 588 | }, | 
|---|
| 589 | [ C(OP_WRITE) ] = { | 
|---|
| 590 | [ C(RESULT_ACCESS) ] = 0x82d0, | 
|---|
| 591 | [ C(RESULT_MISS)   ] = 0xe13, | 
|---|
| 592 | }, | 
|---|
| 593 | }, | 
|---|
| 594 | [ C(ITLB) ] = { | 
|---|
| 595 | [ C(OP_READ) ] = { | 
|---|
| 596 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 597 | [ C(RESULT_MISS)   ] = 0xe11, | 
|---|
| 598 | }, | 
|---|
| 599 | [ C(OP_WRITE) ] = { | 
|---|
| 600 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 601 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 602 | }, | 
|---|
| 603 | [ C(OP_PREFETCH) ] = { | 
|---|
| 604 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 605 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 606 | }, | 
|---|
| 607 | }, | 
|---|
| 608 | [ C(BPU ) ] = { | 
|---|
| 609 | [ C(OP_READ) ] = { | 
|---|
| 610 | [ C(RESULT_ACCESS) ] = 0x4c4, | 
|---|
| 611 | [ C(RESULT_MISS)   ] = 0x4c5, | 
|---|
| 612 | }, | 
|---|
| 613 | [ C(OP_WRITE) ] = { | 
|---|
| 614 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 615 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 616 | }, | 
|---|
| 617 | [ C(OP_PREFETCH) ] = { | 
|---|
| 618 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 619 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 620 | }, | 
|---|
| 621 | }, | 
|---|
| 622 | [ C(NODE) ] = { | 
|---|
| 623 | [ C(OP_READ) ] = { | 
|---|
| 624 | [ C(RESULT_ACCESS) ] = 0x12a, | 
|---|
| 625 | [ C(RESULT_MISS)   ] = 0x12a, | 
|---|
| 626 | }, | 
|---|
| 627 | }, | 
|---|
| 628 | }; | 
|---|
| 629 |  | 
|---|
| 630 | static __initconst const u64 | 
|---|
| 631 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 632 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 633 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 634 | { | 
|---|
| 635 | [ C(LL  ) ] = { | 
|---|
| 636 | [ C(OP_READ) ] = { | 
|---|
| 637 | [ C(RESULT_ACCESS) ] = 0x10001, | 
|---|
| 638 | [ C(RESULT_MISS)   ] = 0x3fbfc00001, | 
|---|
| 639 | }, | 
|---|
| 640 | [ C(OP_WRITE) ] = { | 
|---|
| 641 | [ C(RESULT_ACCESS) ] = 0x3f3ffc0002, | 
|---|
| 642 | [ C(RESULT_MISS)   ] = 0x3f3fc00002, | 
|---|
| 643 | }, | 
|---|
| 644 | }, | 
|---|
| 645 | [ C(NODE) ] = { | 
|---|
| 646 | [ C(OP_READ) ] = { | 
|---|
| 647 | [ C(RESULT_ACCESS) ] = 0x10c000001, | 
|---|
| 648 | [ C(RESULT_MISS)   ] = 0x3fb3000001, | 
|---|
| 649 | }, | 
|---|
| 650 | }, | 
|---|
| 651 | }; | 
|---|
| 652 |  | 
|---|
| 653 | /* | 
|---|
| 654 | * Notes on the events: | 
|---|
| 655 | * - data reads do not include code reads (comparable to earlier tables) | 
|---|
| 656 | * - data counts include speculative execution (except L1 write, dtlb, bpu) | 
|---|
| 657 | * - remote node access includes remote memory, remote cache, remote mmio. | 
|---|
| 658 | * - prefetches are not included in the counts. | 
|---|
| 659 | * - icache miss does not include decoded icache | 
|---|
| 660 | */ | 
|---|
| 661 |  | 
|---|
| 662 | #define SKL_DEMAND_DATA_RD		BIT_ULL(0) | 
|---|
| 663 | #define SKL_DEMAND_RFO			BIT_ULL(1) | 
|---|
| 664 | #define SKL_ANY_RESPONSE		BIT_ULL(16) | 
|---|
| 665 | #define SKL_SUPPLIER_NONE		BIT_ULL(17) | 
|---|
| 666 | #define SKL_L3_MISS_LOCAL_DRAM		BIT_ULL(26) | 
|---|
| 667 | #define SKL_L3_MISS_REMOTE_HOP0_DRAM	BIT_ULL(27) | 
|---|
| 668 | #define SKL_L3_MISS_REMOTE_HOP1_DRAM	BIT_ULL(28) | 
|---|
| 669 | #define SKL_L3_MISS_REMOTE_HOP2P_DRAM	BIT_ULL(29) | 
|---|
| 670 | #define SKL_L3_MISS			(SKL_L3_MISS_LOCAL_DRAM| \ | 
|---|
| 671 | SKL_L3_MISS_REMOTE_HOP0_DRAM| \ | 
|---|
| 672 | SKL_L3_MISS_REMOTE_HOP1_DRAM| \ | 
|---|
| 673 | SKL_L3_MISS_REMOTE_HOP2P_DRAM) | 
|---|
| 674 | #define SKL_SPL_HIT			BIT_ULL(30) | 
|---|
| 675 | #define SKL_SNOOP_NONE			BIT_ULL(31) | 
|---|
| 676 | #define SKL_SNOOP_NOT_NEEDED		BIT_ULL(32) | 
|---|
| 677 | #define SKL_SNOOP_MISS			BIT_ULL(33) | 
|---|
| 678 | #define SKL_SNOOP_HIT_NO_FWD		BIT_ULL(34) | 
|---|
| 679 | #define SKL_SNOOP_HIT_WITH_FWD		BIT_ULL(35) | 
|---|
| 680 | #define SKL_SNOOP_HITM			BIT_ULL(36) | 
|---|
| 681 | #define SKL_SNOOP_NON_DRAM		BIT_ULL(37) | 
|---|
| 682 | #define SKL_ANY_SNOOP			(SKL_SPL_HIT|SKL_SNOOP_NONE| \ | 
|---|
| 683 | SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ | 
|---|
| 684 | SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ | 
|---|
| 685 | SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) | 
|---|
| 686 | #define SKL_DEMAND_READ			SKL_DEMAND_DATA_RD | 
|---|
| 687 | #define SKL_SNOOP_DRAM			(SKL_SNOOP_NONE| \ | 
|---|
| 688 | SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ | 
|---|
| 689 | SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ | 
|---|
| 690 | SKL_SNOOP_HITM|SKL_SPL_HIT) | 
|---|
| 691 | #define SKL_DEMAND_WRITE		SKL_DEMAND_RFO | 
|---|
| 692 | #define SKL_LLC_ACCESS			SKL_ANY_RESPONSE | 
|---|
| 693 | #define SKL_L3_MISS_REMOTE		(SKL_L3_MISS_REMOTE_HOP0_DRAM| \ | 
|---|
| 694 | SKL_L3_MISS_REMOTE_HOP1_DRAM| \ | 
|---|
| 695 | SKL_L3_MISS_REMOTE_HOP2P_DRAM) | 
|---|
| 696 |  | 
|---|
| 697 | static __initconst const u64 skl_hw_cache_event_ids | 
|---|
| 698 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 699 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 700 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 701 | { | 
|---|
| 702 | [ C(L1D ) ] = { | 
|---|
| 703 | [ C(OP_READ) ] = { | 
|---|
| 704 | [ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */ | 
|---|
| 705 | [ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */ | 
|---|
| 706 | }, | 
|---|
| 707 | [ C(OP_WRITE) ] = { | 
|---|
| 708 | [ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */ | 
|---|
| 709 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 710 | }, | 
|---|
| 711 | [ C(OP_PREFETCH) ] = { | 
|---|
| 712 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 713 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 714 | }, | 
|---|
| 715 | }, | 
|---|
| 716 | [ C(L1I ) ] = { | 
|---|
| 717 | [ C(OP_READ) ] = { | 
|---|
| 718 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 719 | [ C(RESULT_MISS)   ] = 0x283,	/* ICACHE_64B.MISS */ | 
|---|
| 720 | }, | 
|---|
| 721 | [ C(OP_WRITE) ] = { | 
|---|
| 722 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 723 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 724 | }, | 
|---|
| 725 | [ C(OP_PREFETCH) ] = { | 
|---|
| 726 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 727 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 728 | }, | 
|---|
| 729 | }, | 
|---|
| 730 | [ C(LL  ) ] = { | 
|---|
| 731 | [ C(OP_READ) ] = { | 
|---|
| 732 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 733 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 734 | }, | 
|---|
| 735 | [ C(OP_WRITE) ] = { | 
|---|
| 736 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 737 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 738 | }, | 
|---|
| 739 | [ C(OP_PREFETCH) ] = { | 
|---|
| 740 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 741 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 742 | }, | 
|---|
| 743 | }, | 
|---|
| 744 | [ C(DTLB) ] = { | 
|---|
| 745 | [ C(OP_READ) ] = { | 
|---|
| 746 | [ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */ | 
|---|
| 747 | [ C(RESULT_MISS)   ] = 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */ | 
|---|
| 748 | }, | 
|---|
| 749 | [ C(OP_WRITE) ] = { | 
|---|
| 750 | [ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */ | 
|---|
| 751 | [ C(RESULT_MISS)   ] = 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */ | 
|---|
| 752 | }, | 
|---|
| 753 | [ C(OP_PREFETCH) ] = { | 
|---|
| 754 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 755 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 756 | }, | 
|---|
| 757 | }, | 
|---|
| 758 | [ C(ITLB) ] = { | 
|---|
| 759 | [ C(OP_READ) ] = { | 
|---|
| 760 | [ C(RESULT_ACCESS) ] = 0x2085,	/* ITLB_MISSES.STLB_HIT */ | 
|---|
| 761 | [ C(RESULT_MISS)   ] = 0xe85,	/* ITLB_MISSES.WALK_COMPLETED */ | 
|---|
| 762 | }, | 
|---|
| 763 | [ C(OP_WRITE) ] = { | 
|---|
| 764 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 765 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 766 | }, | 
|---|
| 767 | [ C(OP_PREFETCH) ] = { | 
|---|
| 768 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 769 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 770 | }, | 
|---|
| 771 | }, | 
|---|
| 772 | [ C(BPU ) ] = { | 
|---|
| 773 | [ C(OP_READ) ] = { | 
|---|
| 774 | [ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */ | 
|---|
| 775 | [ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */ | 
|---|
| 776 | }, | 
|---|
| 777 | [ C(OP_WRITE) ] = { | 
|---|
| 778 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 779 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 780 | }, | 
|---|
| 781 | [ C(OP_PREFETCH) ] = { | 
|---|
| 782 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 783 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 784 | }, | 
|---|
| 785 | }, | 
|---|
| 786 | [ C(NODE) ] = { | 
|---|
| 787 | [ C(OP_READ) ] = { | 
|---|
| 788 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 789 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 790 | }, | 
|---|
| 791 | [ C(OP_WRITE) ] = { | 
|---|
| 792 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 793 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 794 | }, | 
|---|
| 795 | [ C(OP_PREFETCH) ] = { | 
|---|
| 796 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 797 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 798 | }, | 
|---|
| 799 | }, | 
|---|
| 800 | }; | 
|---|
| 801 |  | 
|---|
| 802 | static __initconst const u64 | 
|---|
| 803 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 804 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 805 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 806 | { | 
|---|
| 807 | [ C(LL  ) ] = { | 
|---|
| 808 | [ C(OP_READ) ] = { | 
|---|
| 809 | [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| | 
|---|
| 810 | SKL_LLC_ACCESS|SKL_ANY_SNOOP, | 
|---|
| 811 | [ C(RESULT_MISS)   ] = SKL_DEMAND_READ| | 
|---|
| 812 | SKL_L3_MISS|SKL_ANY_SNOOP| | 
|---|
| 813 | SKL_SUPPLIER_NONE, | 
|---|
| 814 | }, | 
|---|
| 815 | [ C(OP_WRITE) ] = { | 
|---|
| 816 | [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| | 
|---|
| 817 | SKL_LLC_ACCESS|SKL_ANY_SNOOP, | 
|---|
| 818 | [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE| | 
|---|
| 819 | SKL_L3_MISS|SKL_ANY_SNOOP| | 
|---|
| 820 | SKL_SUPPLIER_NONE, | 
|---|
| 821 | }, | 
|---|
| 822 | [ C(OP_PREFETCH) ] = { | 
|---|
| 823 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 824 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 825 | }, | 
|---|
| 826 | }, | 
|---|
| 827 | [ C(NODE) ] = { | 
|---|
| 828 | [ C(OP_READ) ] = { | 
|---|
| 829 | [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| | 
|---|
| 830 | SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, | 
|---|
| 831 | [ C(RESULT_MISS)   ] = SKL_DEMAND_READ| | 
|---|
| 832 | SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, | 
|---|
| 833 | }, | 
|---|
| 834 | [ C(OP_WRITE) ] = { | 
|---|
| 835 | [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| | 
|---|
| 836 | SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, | 
|---|
| 837 | [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE| | 
|---|
| 838 | SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, | 
|---|
| 839 | }, | 
|---|
| 840 | [ C(OP_PREFETCH) ] = { | 
|---|
| 841 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 842 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 843 | }, | 
|---|
| 844 | }, | 
|---|
| 845 | }; | 
|---|
| 846 |  | 
|---|
| 847 | #define SNB_DMND_DATA_RD	(1ULL << 0) | 
|---|
| 848 | #define SNB_DMND_RFO		(1ULL << 1) | 
|---|
| 849 | #define SNB_DMND_IFETCH		(1ULL << 2) | 
|---|
| 850 | #define SNB_DMND_WB		(1ULL << 3) | 
|---|
| 851 | #define SNB_PF_DATA_RD		(1ULL << 4) | 
|---|
| 852 | #define SNB_PF_RFO		(1ULL << 5) | 
|---|
| 853 | #define SNB_PF_IFETCH		(1ULL << 6) | 
|---|
| 854 | #define SNB_LLC_DATA_RD		(1ULL << 7) | 
|---|
| 855 | #define SNB_LLC_RFO		(1ULL << 8) | 
|---|
| 856 | #define SNB_LLC_IFETCH		(1ULL << 9) | 
|---|
| 857 | #define SNB_BUS_LOCKS		(1ULL << 10) | 
|---|
| 858 | #define SNB_STRM_ST		(1ULL << 11) | 
|---|
| 859 | #define SNB_OTHER		(1ULL << 15) | 
|---|
| 860 | #define SNB_RESP_ANY		(1ULL << 16) | 
|---|
| 861 | #define SNB_NO_SUPP		(1ULL << 17) | 
|---|
| 862 | #define SNB_LLC_HITM		(1ULL << 18) | 
|---|
| 863 | #define SNB_LLC_HITE		(1ULL << 19) | 
|---|
| 864 | #define SNB_LLC_HITS		(1ULL << 20) | 
|---|
| 865 | #define SNB_LLC_HITF		(1ULL << 21) | 
|---|
| 866 | #define SNB_LOCAL		(1ULL << 22) | 
|---|
| 867 | #define SNB_REMOTE		(0xffULL << 23) | 
|---|
| 868 | #define SNB_SNP_NONE		(1ULL << 31) | 
|---|
| 869 | #define SNB_SNP_NOT_NEEDED	(1ULL << 32) | 
|---|
| 870 | #define SNB_SNP_MISS		(1ULL << 33) | 
|---|
| 871 | #define SNB_NO_FWD		(1ULL << 34) | 
|---|
| 872 | #define SNB_SNP_FWD		(1ULL << 35) | 
|---|
| 873 | #define SNB_HITM		(1ULL << 36) | 
|---|
| 874 | #define SNB_NON_DRAM		(1ULL << 37) | 
|---|
| 875 |  | 
|---|
| 876 | #define SNB_DMND_READ		(SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) | 
|---|
| 877 | #define SNB_DMND_WRITE		(SNB_DMND_RFO|SNB_LLC_RFO) | 
|---|
| 878 | #define SNB_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO) | 
|---|
| 879 |  | 
|---|
| 880 | #define SNB_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ | 
|---|
| 881 | SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ | 
|---|
| 882 | SNB_HITM) | 
|---|
| 883 |  | 
|---|
| 884 | #define SNB_DRAM_ANY		(SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) | 
|---|
| 885 | #define SNB_DRAM_REMOTE		(SNB_REMOTE|SNB_SNP_ANY) | 
|---|
| 886 |  | 
|---|
| 887 | #define SNB_L3_ACCESS		SNB_RESP_ANY | 
|---|
| 888 | #define SNB_L3_MISS		(SNB_DRAM_ANY|SNB_NON_DRAM) | 
|---|
| 889 |  | 
|---|
| 890 | static __initconst const u64 | 
|---|
| 891 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 892 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 893 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 894 | { | 
|---|
| 895 | [ C(LL  ) ] = { | 
|---|
| 896 | [ C(OP_READ) ] = { | 
|---|
| 897 | [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, | 
|---|
| 898 | [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS, | 
|---|
| 899 | }, | 
|---|
| 900 | [ C(OP_WRITE) ] = { | 
|---|
| 901 | [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, | 
|---|
| 902 | [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS, | 
|---|
| 903 | }, | 
|---|
| 904 | [ C(OP_PREFETCH) ] = { | 
|---|
| 905 | [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, | 
|---|
| 906 | [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS, | 
|---|
| 907 | }, | 
|---|
| 908 | }, | 
|---|
| 909 | [ C(NODE) ] = { | 
|---|
| 910 | [ C(OP_READ) ] = { | 
|---|
| 911 | [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, | 
|---|
| 912 | [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE, | 
|---|
| 913 | }, | 
|---|
| 914 | [ C(OP_WRITE) ] = { | 
|---|
| 915 | [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, | 
|---|
| 916 | [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, | 
|---|
| 917 | }, | 
|---|
| 918 | [ C(OP_PREFETCH) ] = { | 
|---|
| 919 | [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, | 
|---|
| 920 | [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, | 
|---|
| 921 | }, | 
|---|
| 922 | }, | 
|---|
| 923 | }; | 
|---|
| 924 |  | 
|---|
| 925 | static __initconst const u64 snb_hw_cache_event_ids | 
|---|
| 926 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 927 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 928 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 929 | { | 
|---|
| 930 | [ C(L1D) ] = { | 
|---|
| 931 | [ C(OP_READ) ] = { | 
|---|
| 932 | [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */ | 
|---|
| 933 | [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */ | 
|---|
| 934 | }, | 
|---|
| 935 | [ C(OP_WRITE) ] = { | 
|---|
| 936 | [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */ | 
|---|
| 937 | [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */ | 
|---|
| 938 | }, | 
|---|
| 939 | [ C(OP_PREFETCH) ] = { | 
|---|
| 940 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 941 | [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */ | 
|---|
| 942 | }, | 
|---|
| 943 | }, | 
|---|
| 944 | [ C(L1I ) ] = { | 
|---|
| 945 | [ C(OP_READ) ] = { | 
|---|
| 946 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 947 | [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */ | 
|---|
| 948 | }, | 
|---|
| 949 | [ C(OP_WRITE) ] = { | 
|---|
| 950 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 951 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 952 | }, | 
|---|
| 953 | [ C(OP_PREFETCH) ] = { | 
|---|
| 954 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 955 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 956 | }, | 
|---|
| 957 | }, | 
|---|
| 958 | [ C(LL  ) ] = { | 
|---|
| 959 | [ C(OP_READ) ] = { | 
|---|
| 960 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ | 
|---|
| 961 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 962 | /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ | 
|---|
| 963 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 964 | }, | 
|---|
| 965 | [ C(OP_WRITE) ] = { | 
|---|
| 966 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ | 
|---|
| 967 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 968 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | 
|---|
| 969 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 970 | }, | 
|---|
| 971 | [ C(OP_PREFETCH) ] = { | 
|---|
| 972 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ | 
|---|
| 973 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 974 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | 
|---|
| 975 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 976 | }, | 
|---|
| 977 | }, | 
|---|
| 978 | [ C(DTLB) ] = { | 
|---|
| 979 | [ C(OP_READ) ] = { | 
|---|
| 980 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ | 
|---|
| 981 | [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ | 
|---|
| 982 | }, | 
|---|
| 983 | [ C(OP_WRITE) ] = { | 
|---|
| 984 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ | 
|---|
| 985 | [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | 
|---|
| 986 | }, | 
|---|
| 987 | [ C(OP_PREFETCH) ] = { | 
|---|
| 988 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 989 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 990 | }, | 
|---|
| 991 | }, | 
|---|
| 992 | [ C(ITLB) ] = { | 
|---|
| 993 | [ C(OP_READ) ] = { | 
|---|
| 994 | [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */ | 
|---|
| 995 | [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */ | 
|---|
| 996 | }, | 
|---|
| 997 | [ C(OP_WRITE) ] = { | 
|---|
| 998 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 999 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1000 | }, | 
|---|
| 1001 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1002 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1003 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1004 | }, | 
|---|
| 1005 | }, | 
|---|
| 1006 | [ C(BPU ) ] = { | 
|---|
| 1007 | [ C(OP_READ) ] = { | 
|---|
| 1008 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | 
|---|
| 1009 | [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ | 
|---|
| 1010 | }, | 
|---|
| 1011 | [ C(OP_WRITE) ] = { | 
|---|
| 1012 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1013 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1014 | }, | 
|---|
| 1015 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1016 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1017 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1018 | }, | 
|---|
| 1019 | }, | 
|---|
| 1020 | [ C(NODE) ] = { | 
|---|
| 1021 | [ C(OP_READ) ] = { | 
|---|
| 1022 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1023 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1024 | }, | 
|---|
| 1025 | [ C(OP_WRITE) ] = { | 
|---|
| 1026 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1027 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1028 | }, | 
|---|
| 1029 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1030 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1031 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1032 | }, | 
|---|
| 1033 | }, | 
|---|
| 1034 |  | 
|---|
| 1035 | }; | 
|---|
| 1036 |  | 
|---|
| 1037 | /* | 
|---|
| 1038 | * Notes on the events: | 
|---|
| 1039 | * - data reads do not include code reads (comparable to earlier tables) | 
|---|
| 1040 | * - data counts include speculative execution (except L1 write, dtlb, bpu) | 
|---|
| 1041 | * - remote node access includes remote memory, remote cache, remote mmio. | 
|---|
| 1042 | * - prefetches are not included in the counts because they are not | 
|---|
| 1043 | *   reliably counted. | 
|---|
| 1044 | */ | 
|---|
| 1045 |  | 
|---|
| 1046 | #define HSW_DEMAND_DATA_RD		BIT_ULL(0) | 
|---|
| 1047 | #define HSW_DEMAND_RFO			BIT_ULL(1) | 
|---|
| 1048 | #define HSW_ANY_RESPONSE		BIT_ULL(16) | 
|---|
| 1049 | #define HSW_SUPPLIER_NONE		BIT_ULL(17) | 
|---|
| 1050 | #define HSW_L3_MISS_LOCAL_DRAM		BIT_ULL(22) | 
|---|
| 1051 | #define HSW_L3_MISS_REMOTE_HOP0		BIT_ULL(27) | 
|---|
| 1052 | #define HSW_L3_MISS_REMOTE_HOP1		BIT_ULL(28) | 
|---|
| 1053 | #define HSW_L3_MISS_REMOTE_HOP2P	BIT_ULL(29) | 
|---|
| 1054 | #define HSW_L3_MISS			(HSW_L3_MISS_LOCAL_DRAM| \ | 
|---|
| 1055 | HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ | 
|---|
| 1056 | HSW_L3_MISS_REMOTE_HOP2P) | 
|---|
| 1057 | #define HSW_SNOOP_NONE			BIT_ULL(31) | 
|---|
| 1058 | #define HSW_SNOOP_NOT_NEEDED		BIT_ULL(32) | 
|---|
| 1059 | #define HSW_SNOOP_MISS			BIT_ULL(33) | 
|---|
| 1060 | #define HSW_SNOOP_HIT_NO_FWD		BIT_ULL(34) | 
|---|
| 1061 | #define HSW_SNOOP_HIT_WITH_FWD		BIT_ULL(35) | 
|---|
| 1062 | #define HSW_SNOOP_HITM			BIT_ULL(36) | 
|---|
| 1063 | #define HSW_SNOOP_NON_DRAM		BIT_ULL(37) | 
|---|
| 1064 | #define HSW_ANY_SNOOP			(HSW_SNOOP_NONE| \ | 
|---|
| 1065 | HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \ | 
|---|
| 1066 | HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \ | 
|---|
| 1067 | HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM) | 
|---|
| 1068 | #define HSW_SNOOP_DRAM			(HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM) | 
|---|
| 1069 | #define HSW_DEMAND_READ			HSW_DEMAND_DATA_RD | 
|---|
| 1070 | #define HSW_DEMAND_WRITE		HSW_DEMAND_RFO | 
|---|
| 1071 | #define HSW_L3_MISS_REMOTE		(HSW_L3_MISS_REMOTE_HOP0|\ | 
|---|
| 1072 | HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P) | 
|---|
| 1073 | #define HSW_LLC_ACCESS			HSW_ANY_RESPONSE | 
|---|
| 1074 |  | 
|---|
| 1075 | #define BDW_L3_MISS_LOCAL		BIT(26) | 
|---|
| 1076 | #define BDW_L3_MISS			(BDW_L3_MISS_LOCAL| \ | 
|---|
| 1077 | HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ | 
|---|
| 1078 | HSW_L3_MISS_REMOTE_HOP2P) | 
|---|
| 1079 |  | 
|---|
| 1080 |  | 
|---|
| 1081 | static __initconst const u64 hsw_hw_cache_event_ids | 
|---|
| 1082 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1083 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1084 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1085 | { | 
|---|
| 1086 | [ C(L1D ) ] = { | 
|---|
| 1087 | [ C(OP_READ) ] = { | 
|---|
| 1088 | [ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */ | 
|---|
| 1089 | [ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */ | 
|---|
| 1090 | }, | 
|---|
| 1091 | [ C(OP_WRITE) ] = { | 
|---|
| 1092 | [ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */ | 
|---|
| 1093 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1094 | }, | 
|---|
| 1095 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1096 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1097 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1098 | }, | 
|---|
| 1099 | }, | 
|---|
| 1100 | [ C(L1I ) ] = { | 
|---|
| 1101 | [ C(OP_READ) ] = { | 
|---|
| 1102 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1103 | [ C(RESULT_MISS)   ] = 0x280,	/* ICACHE.MISSES */ | 
|---|
| 1104 | }, | 
|---|
| 1105 | [ C(OP_WRITE) ] = { | 
|---|
| 1106 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1107 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1108 | }, | 
|---|
| 1109 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1110 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1111 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1112 | }, | 
|---|
| 1113 | }, | 
|---|
| 1114 | [ C(LL  ) ] = { | 
|---|
| 1115 | [ C(OP_READ) ] = { | 
|---|
| 1116 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1117 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1118 | }, | 
|---|
| 1119 | [ C(OP_WRITE) ] = { | 
|---|
| 1120 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1121 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1122 | }, | 
|---|
| 1123 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1124 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1125 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1126 | }, | 
|---|
| 1127 | }, | 
|---|
| 1128 | [ C(DTLB) ] = { | 
|---|
| 1129 | [ C(OP_READ) ] = { | 
|---|
| 1130 | [ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */ | 
|---|
| 1131 | [ C(RESULT_MISS)   ] = 0x108,	/* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ | 
|---|
| 1132 | }, | 
|---|
| 1133 | [ C(OP_WRITE) ] = { | 
|---|
| 1134 | [ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */ | 
|---|
| 1135 | [ C(RESULT_MISS)   ] = 0x149,	/* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | 
|---|
| 1136 | }, | 
|---|
| 1137 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1138 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1139 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1140 | }, | 
|---|
| 1141 | }, | 
|---|
| 1142 | [ C(ITLB) ] = { | 
|---|
| 1143 | [ C(OP_READ) ] = { | 
|---|
| 1144 | [ C(RESULT_ACCESS) ] = 0x6085,	/* ITLB_MISSES.STLB_HIT */ | 
|---|
| 1145 | [ C(RESULT_MISS)   ] = 0x185,	/* ITLB_MISSES.MISS_CAUSES_A_WALK */ | 
|---|
| 1146 | }, | 
|---|
| 1147 | [ C(OP_WRITE) ] = { | 
|---|
| 1148 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1149 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1150 | }, | 
|---|
| 1151 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1152 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1153 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1154 | }, | 
|---|
| 1155 | }, | 
|---|
| 1156 | [ C(BPU ) ] = { | 
|---|
| 1157 | [ C(OP_READ) ] = { | 
|---|
| 1158 | [ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */ | 
|---|
| 1159 | [ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */ | 
|---|
| 1160 | }, | 
|---|
| 1161 | [ C(OP_WRITE) ] = { | 
|---|
| 1162 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1163 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1164 | }, | 
|---|
| 1165 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1166 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1167 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1168 | }, | 
|---|
| 1169 | }, | 
|---|
| 1170 | [ C(NODE) ] = { | 
|---|
| 1171 | [ C(OP_READ) ] = { | 
|---|
| 1172 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1173 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1174 | }, | 
|---|
| 1175 | [ C(OP_WRITE) ] = { | 
|---|
| 1176 | [ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1177 | [ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1178 | }, | 
|---|
| 1179 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1180 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1181 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1182 | }, | 
|---|
| 1183 | }, | 
|---|
| 1184 | }; | 
|---|
| 1185 |  | 
|---|
| 1186 | static __initconst const u64 | 
|---|
| 1187 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1188 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1189 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1190 | { | 
|---|
| 1191 | [ C(LL  ) ] = { | 
|---|
| 1192 | [ C(OP_READ) ] = { | 
|---|
| 1193 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| | 
|---|
| 1194 | HSW_LLC_ACCESS, | 
|---|
| 1195 | [ C(RESULT_MISS)   ] = HSW_DEMAND_READ| | 
|---|
| 1196 | HSW_L3_MISS|HSW_ANY_SNOOP, | 
|---|
| 1197 | }, | 
|---|
| 1198 | [ C(OP_WRITE) ] = { | 
|---|
| 1199 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| | 
|---|
| 1200 | HSW_LLC_ACCESS, | 
|---|
| 1201 | [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE| | 
|---|
| 1202 | HSW_L3_MISS|HSW_ANY_SNOOP, | 
|---|
| 1203 | }, | 
|---|
| 1204 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1205 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1206 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1207 | }, | 
|---|
| 1208 | }, | 
|---|
| 1209 | [ C(NODE) ] = { | 
|---|
| 1210 | [ C(OP_READ) ] = { | 
|---|
| 1211 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| | 
|---|
| 1212 | HSW_L3_MISS_LOCAL_DRAM| | 
|---|
| 1213 | HSW_SNOOP_DRAM, | 
|---|
| 1214 | [ C(RESULT_MISS)   ] = HSW_DEMAND_READ| | 
|---|
| 1215 | HSW_L3_MISS_REMOTE| | 
|---|
| 1216 | HSW_SNOOP_DRAM, | 
|---|
| 1217 | }, | 
|---|
| 1218 | [ C(OP_WRITE) ] = { | 
|---|
| 1219 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| | 
|---|
| 1220 | HSW_L3_MISS_LOCAL_DRAM| | 
|---|
| 1221 | HSW_SNOOP_DRAM, | 
|---|
| 1222 | [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE| | 
|---|
| 1223 | HSW_L3_MISS_REMOTE| | 
|---|
| 1224 | HSW_SNOOP_DRAM, | 
|---|
| 1225 | }, | 
|---|
| 1226 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1227 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1228 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1229 | }, | 
|---|
| 1230 | }, | 
|---|
| 1231 | }; | 
|---|
| 1232 |  | 
|---|
| 1233 | static __initconst const u64 westmere_hw_cache_event_ids | 
|---|
| 1234 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1235 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1236 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1237 | { | 
|---|
| 1238 | [ C(L1D) ] = { | 
|---|
| 1239 | [ C(OP_READ) ] = { | 
|---|
| 1240 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */ | 
|---|
| 1241 | [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */ | 
|---|
| 1242 | }, | 
|---|
| 1243 | [ C(OP_WRITE) ] = { | 
|---|
| 1244 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */ | 
|---|
| 1245 | [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */ | 
|---|
| 1246 | }, | 
|---|
| 1247 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1248 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */ | 
|---|
| 1249 | [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */ | 
|---|
| 1250 | }, | 
|---|
| 1251 | }, | 
|---|
| 1252 | [ C(L1I ) ] = { | 
|---|
| 1253 | [ C(OP_READ) ] = { | 
|---|
| 1254 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */ | 
|---|
| 1255 | [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */ | 
|---|
| 1256 | }, | 
|---|
| 1257 | [ C(OP_WRITE) ] = { | 
|---|
| 1258 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1259 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1260 | }, | 
|---|
| 1261 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1262 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1263 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1264 | }, | 
|---|
| 1265 | }, | 
|---|
| 1266 | [ C(LL  ) ] = { | 
|---|
| 1267 | [ C(OP_READ) ] = { | 
|---|
| 1268 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ | 
|---|
| 1269 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1270 | /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ | 
|---|
| 1271 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1272 | }, | 
|---|
| 1273 | /* | 
|---|
| 1274 | * Use RFO, not WRITEBACK, because a write miss would typically occur | 
|---|
| 1275 | * on RFO. | 
|---|
| 1276 | */ | 
|---|
| 1277 | [ C(OP_WRITE) ] = { | 
|---|
| 1278 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ | 
|---|
| 1279 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1280 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | 
|---|
| 1281 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1282 | }, | 
|---|
| 1283 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1284 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ | 
|---|
| 1285 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1286 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | 
|---|
| 1287 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1288 | }, | 
|---|
| 1289 | }, | 
|---|
| 1290 | [ C(DTLB) ] = { | 
|---|
| 1291 | [ C(OP_READ) ] = { | 
|---|
| 1292 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */ | 
|---|
| 1293 | [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */ | 
|---|
| 1294 | }, | 
|---|
| 1295 | [ C(OP_WRITE) ] = { | 
|---|
| 1296 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */ | 
|---|
| 1297 | [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */ | 
|---|
| 1298 | }, | 
|---|
| 1299 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1300 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1301 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1302 | }, | 
|---|
| 1303 | }, | 
|---|
| 1304 | [ C(ITLB) ] = { | 
|---|
| 1305 | [ C(OP_READ) ] = { | 
|---|
| 1306 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */ | 
|---|
| 1307 | [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */ | 
|---|
| 1308 | }, | 
|---|
| 1309 | [ C(OP_WRITE) ] = { | 
|---|
| 1310 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1311 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1312 | }, | 
|---|
| 1313 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1314 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1315 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1316 | }, | 
|---|
| 1317 | }, | 
|---|
| 1318 | [ C(BPU ) ] = { | 
|---|
| 1319 | [ C(OP_READ) ] = { | 
|---|
| 1320 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | 
|---|
| 1321 | [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */ | 
|---|
| 1322 | }, | 
|---|
| 1323 | [ C(OP_WRITE) ] = { | 
|---|
| 1324 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1325 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1326 | }, | 
|---|
| 1327 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1328 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1329 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1330 | }, | 
|---|
| 1331 | }, | 
|---|
| 1332 | [ C(NODE) ] = { | 
|---|
| 1333 | [ C(OP_READ) ] = { | 
|---|
| 1334 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1335 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1336 | }, | 
|---|
| 1337 | [ C(OP_WRITE) ] = { | 
|---|
| 1338 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1339 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1340 | }, | 
|---|
| 1341 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1342 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1343 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1344 | }, | 
|---|
| 1345 | }, | 
|---|
| 1346 | }; | 
|---|
| 1347 |  | 
|---|
| 1348 | /* | 
|---|
| 1349 | * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; | 
|---|
| 1350 | * See IA32 SDM Vol 3B 30.6.1.3 | 
|---|
| 1351 | */ | 
|---|
| 1352 |  | 
|---|
| 1353 | #define NHM_DMND_DATA_RD	(1 << 0) | 
|---|
| 1354 | #define NHM_DMND_RFO		(1 << 1) | 
|---|
| 1355 | #define NHM_DMND_IFETCH		(1 << 2) | 
|---|
| 1356 | #define NHM_DMND_WB		(1 << 3) | 
|---|
| 1357 | #define NHM_PF_DATA_RD		(1 << 4) | 
|---|
| 1358 | #define NHM_PF_DATA_RFO		(1 << 5) | 
|---|
| 1359 | #define NHM_PF_IFETCH		(1 << 6) | 
|---|
| 1360 | #define NHM_OFFCORE_OTHER	(1 << 7) | 
|---|
| 1361 | #define NHM_UNCORE_HIT		(1 << 8) | 
|---|
| 1362 | #define NHM_OTHER_CORE_HIT_SNP	(1 << 9) | 
|---|
| 1363 | #define NHM_OTHER_CORE_HITM	(1 << 10) | 
|---|
| 1364 | /* reserved */ | 
|---|
| 1365 | #define NHM_REMOTE_CACHE_FWD	(1 << 12) | 
|---|
| 1366 | #define NHM_REMOTE_DRAM		(1 << 13) | 
|---|
| 1367 | #define NHM_LOCAL_DRAM		(1 << 14) | 
|---|
| 1368 | #define NHM_NON_DRAM		(1 << 15) | 
|---|
| 1369 |  | 
|---|
| 1370 | #define NHM_LOCAL		(NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD) | 
|---|
| 1371 | #define NHM_REMOTE		(NHM_REMOTE_DRAM) | 
|---|
| 1372 |  | 
|---|
| 1373 | #define NHM_DMND_READ		(NHM_DMND_DATA_RD) | 
|---|
| 1374 | #define NHM_DMND_WRITE		(NHM_DMND_RFO|NHM_DMND_WB) | 
|---|
| 1375 | #define NHM_DMND_PREFETCH	(NHM_PF_DATA_RD|NHM_PF_DATA_RFO) | 
|---|
| 1376 |  | 
|---|
| 1377 | #define NHM_L3_HIT	(NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) | 
|---|
| 1378 | #define NHM_L3_MISS	(NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD) | 
|---|
| 1379 | #define NHM_L3_ACCESS	(NHM_L3_HIT|NHM_L3_MISS) | 
|---|
| 1380 |  | 
|---|
| 1381 | static __initconst const u64 | 
|---|
| 1382 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1383 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1384 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1385 | { | 
|---|
| 1386 | [ C(LL  ) ] = { | 
|---|
| 1387 | [ C(OP_READ) ] = { | 
|---|
| 1388 | [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, | 
|---|
| 1389 | [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS, | 
|---|
| 1390 | }, | 
|---|
| 1391 | [ C(OP_WRITE) ] = { | 
|---|
| 1392 | [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, | 
|---|
| 1393 | [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS, | 
|---|
| 1394 | }, | 
|---|
| 1395 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1396 | [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, | 
|---|
| 1397 | [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS, | 
|---|
| 1398 | }, | 
|---|
| 1399 | }, | 
|---|
| 1400 | [ C(NODE) ] = { | 
|---|
| 1401 | [ C(OP_READ) ] = { | 
|---|
| 1402 | [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE, | 
|---|
| 1403 | [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE, | 
|---|
| 1404 | }, | 
|---|
| 1405 | [ C(OP_WRITE) ] = { | 
|---|
| 1406 | [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE, | 
|---|
| 1407 | [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE, | 
|---|
| 1408 | }, | 
|---|
| 1409 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1410 | [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE, | 
|---|
| 1411 | [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE, | 
|---|
| 1412 | }, | 
|---|
| 1413 | }, | 
|---|
| 1414 | }; | 
|---|
| 1415 |  | 
|---|
| 1416 | static __initconst const u64 nehalem_hw_cache_event_ids | 
|---|
| 1417 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1418 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1419 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1420 | { | 
|---|
| 1421 | [ C(L1D) ] = { | 
|---|
| 1422 | [ C(OP_READ) ] = { | 
|---|
| 1423 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */ | 
|---|
| 1424 | [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */ | 
|---|
| 1425 | }, | 
|---|
| 1426 | [ C(OP_WRITE) ] = { | 
|---|
| 1427 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */ | 
|---|
| 1428 | [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */ | 
|---|
| 1429 | }, | 
|---|
| 1430 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1431 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */ | 
|---|
| 1432 | [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */ | 
|---|
| 1433 | }, | 
|---|
| 1434 | }, | 
|---|
| 1435 | [ C(L1I ) ] = { | 
|---|
| 1436 | [ C(OP_READ) ] = { | 
|---|
| 1437 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */ | 
|---|
| 1438 | [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */ | 
|---|
| 1439 | }, | 
|---|
| 1440 | [ C(OP_WRITE) ] = { | 
|---|
| 1441 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1442 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1443 | }, | 
|---|
| 1444 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1445 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1446 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1447 | }, | 
|---|
| 1448 | }, | 
|---|
| 1449 | [ C(LL  ) ] = { | 
|---|
| 1450 | [ C(OP_READ) ] = { | 
|---|
| 1451 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ | 
|---|
| 1452 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1453 | /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ | 
|---|
| 1454 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1455 | }, | 
|---|
| 1456 | /* | 
|---|
| 1457 | * Use RFO, not WRITEBACK, because a write miss would typically occur | 
|---|
| 1458 | * on RFO. | 
|---|
| 1459 | */ | 
|---|
| 1460 | [ C(OP_WRITE) ] = { | 
|---|
| 1461 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ | 
|---|
| 1462 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1463 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | 
|---|
| 1464 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1465 | }, | 
|---|
| 1466 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1467 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ | 
|---|
| 1468 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1469 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | 
|---|
| 1470 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1471 | }, | 
|---|
| 1472 | }, | 
|---|
| 1473 | [ C(DTLB) ] = { | 
|---|
| 1474 | [ C(OP_READ) ] = { | 
|---|
| 1475 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */ | 
|---|
| 1476 | [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */ | 
|---|
| 1477 | }, | 
|---|
| 1478 | [ C(OP_WRITE) ] = { | 
|---|
| 1479 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */ | 
|---|
| 1480 | [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */ | 
|---|
| 1481 | }, | 
|---|
| 1482 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1483 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1484 | [ C(RESULT_MISS)   ] = 0x0, | 
|---|
| 1485 | }, | 
|---|
| 1486 | }, | 
|---|
| 1487 | [ C(ITLB) ] = { | 
|---|
| 1488 | [ C(OP_READ) ] = { | 
|---|
| 1489 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */ | 
|---|
| 1490 | [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */ | 
|---|
| 1491 | }, | 
|---|
| 1492 | [ C(OP_WRITE) ] = { | 
|---|
| 1493 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1494 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1495 | }, | 
|---|
| 1496 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1497 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1498 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1499 | }, | 
|---|
| 1500 | }, | 
|---|
| 1501 | [ C(BPU ) ] = { | 
|---|
| 1502 | [ C(OP_READ) ] = { | 
|---|
| 1503 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | 
|---|
| 1504 | [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */ | 
|---|
| 1505 | }, | 
|---|
| 1506 | [ C(OP_WRITE) ] = { | 
|---|
| 1507 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1508 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1509 | }, | 
|---|
| 1510 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1511 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1512 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1513 | }, | 
|---|
| 1514 | }, | 
|---|
| 1515 | [ C(NODE) ] = { | 
|---|
| 1516 | [ C(OP_READ) ] = { | 
|---|
| 1517 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1518 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1519 | }, | 
|---|
| 1520 | [ C(OP_WRITE) ] = { | 
|---|
| 1521 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1522 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1523 | }, | 
|---|
| 1524 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1525 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1526 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1527 | }, | 
|---|
| 1528 | }, | 
|---|
| 1529 | }; | 
|---|
| 1530 |  | 
|---|
| 1531 | static __initconst const u64 core2_hw_cache_event_ids | 
|---|
| 1532 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1533 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1534 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1535 | { | 
|---|
| 1536 | [ C(L1D) ] = { | 
|---|
| 1537 | [ C(OP_READ) ] = { | 
|---|
| 1538 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */ | 
|---|
| 1539 | [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */ | 
|---|
| 1540 | }, | 
|---|
| 1541 | [ C(OP_WRITE) ] = { | 
|---|
| 1542 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */ | 
|---|
| 1543 | [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */ | 
|---|
| 1544 | }, | 
|---|
| 1545 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1546 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */ | 
|---|
| 1547 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1548 | }, | 
|---|
| 1549 | }, | 
|---|
| 1550 | [ C(L1I ) ] = { | 
|---|
| 1551 | [ C(OP_READ) ] = { | 
|---|
| 1552 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */ | 
|---|
| 1553 | [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */ | 
|---|
| 1554 | }, | 
|---|
| 1555 | [ C(OP_WRITE) ] = { | 
|---|
| 1556 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1557 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1558 | }, | 
|---|
| 1559 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1560 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1561 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1562 | }, | 
|---|
| 1563 | }, | 
|---|
| 1564 | [ C(LL  ) ] = { | 
|---|
| 1565 | [ C(OP_READ) ] = { | 
|---|
| 1566 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */ | 
|---|
| 1567 | [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */ | 
|---|
| 1568 | }, | 
|---|
| 1569 | [ C(OP_WRITE) ] = { | 
|---|
| 1570 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */ | 
|---|
| 1571 | [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */ | 
|---|
| 1572 | }, | 
|---|
| 1573 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1574 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1575 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1576 | }, | 
|---|
| 1577 | }, | 
|---|
| 1578 | [ C(DTLB) ] = { | 
|---|
| 1579 | [ C(OP_READ) ] = { | 
|---|
| 1580 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */ | 
|---|
| 1581 | [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */ | 
|---|
| 1582 | }, | 
|---|
| 1583 | [ C(OP_WRITE) ] = { | 
|---|
| 1584 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */ | 
|---|
| 1585 | [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */ | 
|---|
| 1586 | }, | 
|---|
| 1587 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1588 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1589 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1590 | }, | 
|---|
| 1591 | }, | 
|---|
| 1592 | [ C(ITLB) ] = { | 
|---|
| 1593 | [ C(OP_READ) ] = { | 
|---|
| 1594 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */ | 
|---|
| 1595 | [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */ | 
|---|
| 1596 | }, | 
|---|
| 1597 | [ C(OP_WRITE) ] = { | 
|---|
| 1598 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1599 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1600 | }, | 
|---|
| 1601 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1602 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1603 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1604 | }, | 
|---|
| 1605 | }, | 
|---|
| 1606 | [ C(BPU ) ] = { | 
|---|
| 1607 | [ C(OP_READ) ] = { | 
|---|
| 1608 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */ | 
|---|
| 1609 | [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */ | 
|---|
| 1610 | }, | 
|---|
| 1611 | [ C(OP_WRITE) ] = { | 
|---|
| 1612 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1613 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1614 | }, | 
|---|
| 1615 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1616 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1617 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1618 | }, | 
|---|
| 1619 | }, | 
|---|
| 1620 | }; | 
|---|
| 1621 |  | 
|---|
| 1622 | static __initconst const u64 atom_hw_cache_event_ids | 
|---|
| 1623 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1624 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1625 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1626 | { | 
|---|
| 1627 | [ C(L1D) ] = { | 
|---|
| 1628 | [ C(OP_READ) ] = { | 
|---|
| 1629 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */ | 
|---|
| 1630 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1631 | }, | 
|---|
| 1632 | [ C(OP_WRITE) ] = { | 
|---|
| 1633 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */ | 
|---|
| 1634 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1635 | }, | 
|---|
| 1636 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1637 | [ C(RESULT_ACCESS) ] = 0x0, | 
|---|
| 1638 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1639 | }, | 
|---|
| 1640 | }, | 
|---|
| 1641 | [ C(L1I ) ] = { | 
|---|
| 1642 | [ C(OP_READ) ] = { | 
|---|
| 1643 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */ | 
|---|
| 1644 | [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */ | 
|---|
| 1645 | }, | 
|---|
| 1646 | [ C(OP_WRITE) ] = { | 
|---|
| 1647 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1648 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1649 | }, | 
|---|
| 1650 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1651 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1652 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1653 | }, | 
|---|
| 1654 | }, | 
|---|
| 1655 | [ C(LL  ) ] = { | 
|---|
| 1656 | [ C(OP_READ) ] = { | 
|---|
| 1657 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */ | 
|---|
| 1658 | [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */ | 
|---|
| 1659 | }, | 
|---|
| 1660 | [ C(OP_WRITE) ] = { | 
|---|
| 1661 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */ | 
|---|
| 1662 | [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */ | 
|---|
| 1663 | }, | 
|---|
| 1664 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1665 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1666 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1667 | }, | 
|---|
| 1668 | }, | 
|---|
| 1669 | [ C(DTLB) ] = { | 
|---|
| 1670 | [ C(OP_READ) ] = { | 
|---|
| 1671 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */ | 
|---|
| 1672 | [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */ | 
|---|
| 1673 | }, | 
|---|
| 1674 | [ C(OP_WRITE) ] = { | 
|---|
| 1675 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */ | 
|---|
| 1676 | [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */ | 
|---|
| 1677 | }, | 
|---|
| 1678 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1679 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1680 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1681 | }, | 
|---|
| 1682 | }, | 
|---|
| 1683 | [ C(ITLB) ] = { | 
|---|
| 1684 | [ C(OP_READ) ] = { | 
|---|
| 1685 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */ | 
|---|
| 1686 | [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */ | 
|---|
| 1687 | }, | 
|---|
| 1688 | [ C(OP_WRITE) ] = { | 
|---|
| 1689 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1690 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1691 | }, | 
|---|
| 1692 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1693 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1694 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1695 | }, | 
|---|
| 1696 | }, | 
|---|
| 1697 | [ C(BPU ) ] = { | 
|---|
| 1698 | [ C(OP_READ) ] = { | 
|---|
| 1699 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */ | 
|---|
| 1700 | [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */ | 
|---|
| 1701 | }, | 
|---|
| 1702 | [ C(OP_WRITE) ] = { | 
|---|
| 1703 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1704 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1705 | }, | 
|---|
| 1706 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1707 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1708 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1709 | }, | 
|---|
| 1710 | }, | 
|---|
| 1711 | }; | 
|---|
| 1712 |  | 
|---|
| 1713 | EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c"); | 
|---|
| 1714 | EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2"); | 
|---|
| 1715 | /* no_alloc_cycles.not_delivered */ | 
|---|
| 1716 | EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm, | 
|---|
| 1717 | "event=0xca,umask=0x50"); | 
|---|
| 1718 | EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2"); | 
|---|
| 1719 | /* uops_retired.all */ | 
|---|
| 1720 | EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm, | 
|---|
| 1721 | "event=0xc2,umask=0x10"); | 
|---|
| 1722 | /* uops_retired.all */ | 
|---|
| 1723 | EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm, | 
|---|
| 1724 | "event=0xc2,umask=0x10"); | 
|---|
| 1725 |  | 
|---|
| 1726 | static struct attribute *slm_events_attrs[] = { | 
|---|
| 1727 | EVENT_PTR(td_total_slots_slm), | 
|---|
| 1728 | EVENT_PTR(td_total_slots_scale_slm), | 
|---|
| 1729 | EVENT_PTR(td_fetch_bubbles_slm), | 
|---|
| 1730 | EVENT_PTR(td_fetch_bubbles_scale_slm), | 
|---|
| 1731 | EVENT_PTR(td_slots_issued_slm), | 
|---|
| 1732 | EVENT_PTR(td_slots_retired_slm), | 
|---|
| 1733 | NULL | 
|---|
| 1734 | }; | 
|---|
| 1735 |  | 
|---|
| 1736 | static struct extra_reg [] __read_mostly = | 
|---|
| 1737 | { | 
|---|
| 1738 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 1739 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), | 
|---|
| 1740 | INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), | 
|---|
| 1741 | EVENT_EXTRA_END | 
|---|
| 1742 | }; | 
|---|
| 1743 |  | 
|---|
| 1744 | #define SLM_DMND_READ		SNB_DMND_DATA_RD | 
|---|
| 1745 | #define SLM_DMND_WRITE		SNB_DMND_RFO | 
|---|
| 1746 | #define SLM_DMND_PREFETCH	(SNB_PF_DATA_RD|SNB_PF_RFO) | 
|---|
| 1747 |  | 
|---|
| 1748 | #define SLM_SNP_ANY		(SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) | 
|---|
| 1749 | #define SLM_LLC_ACCESS		SNB_RESP_ANY | 
|---|
| 1750 | #define SLM_LLC_MISS		(SLM_SNP_ANY|SNB_NON_DRAM) | 
|---|
| 1751 |  | 
|---|
| 1752 | static __initconst const u64 | 
|---|
| 1753 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1754 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1755 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1756 | { | 
|---|
| 1757 | [ C(LL  ) ] = { | 
|---|
| 1758 | [ C(OP_READ) ] = { | 
|---|
| 1759 | [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, | 
|---|
| 1760 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1761 | }, | 
|---|
| 1762 | [ C(OP_WRITE) ] = { | 
|---|
| 1763 | [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, | 
|---|
| 1764 | [ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS, | 
|---|
| 1765 | }, | 
|---|
| 1766 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1767 | [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, | 
|---|
| 1768 | [ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, | 
|---|
| 1769 | }, | 
|---|
| 1770 | }, | 
|---|
| 1771 | }; | 
|---|
| 1772 |  | 
|---|
| 1773 | static __initconst const u64 slm_hw_cache_event_ids | 
|---|
| 1774 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1775 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1776 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 
|---|
| 1777 | { | 
|---|
| 1778 | [ C(L1D) ] = { | 
|---|
| 1779 | [ C(OP_READ) ] = { | 
|---|
| 1780 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1781 | [ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */ | 
|---|
| 1782 | }, | 
|---|
| 1783 | [ C(OP_WRITE) ] = { | 
|---|
| 1784 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1785 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1786 | }, | 
|---|
| 1787 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1788 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1789 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1790 | }, | 
|---|
| 1791 | }, | 
|---|
| 1792 | [ C(L1I ) ] = { | 
|---|
| 1793 | [ C(OP_READ) ] = { | 
|---|
| 1794 | [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ | 
|---|
| 1795 | [ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */ | 
|---|
| 1796 | }, | 
|---|
| 1797 | [ C(OP_WRITE) ] = { | 
|---|
| 1798 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1799 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1800 | }, | 
|---|
| 1801 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1802 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1803 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1804 | }, | 
|---|
| 1805 | }, | 
|---|
| 1806 | [ C(LL  ) ] = { | 
|---|
| 1807 | [ C(OP_READ) ] = { | 
|---|
| 1808 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ | 
|---|
| 1809 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1810 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1811 | }, | 
|---|
| 1812 | [ C(OP_WRITE) ] = { | 
|---|
| 1813 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ | 
|---|
| 1814 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1815 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | 
|---|
| 1816 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1817 | }, | 
|---|
| 1818 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1819 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ | 
|---|
| 1820 | [ C(RESULT_ACCESS) ] = 0x01b7, | 
|---|
| 1821 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | 
|---|
| 1822 | [ C(RESULT_MISS)   ] = 0x01b7, | 
|---|
| 1823 | }, | 
|---|
| 1824 | }, | 
|---|
| 1825 | [ C(DTLB) ] = { | 
|---|
| 1826 | [ C(OP_READ) ] = { | 
|---|
| 1827 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1828 | [ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */ | 
|---|
| 1829 | }, | 
|---|
| 1830 | [ C(OP_WRITE) ] = { | 
|---|
| 1831 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1832 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1833 | }, | 
|---|
| 1834 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1835 | [ C(RESULT_ACCESS) ] = 0, | 
|---|
| 1836 | [ C(RESULT_MISS)   ] = 0, | 
|---|
| 1837 | }, | 
|---|
| 1838 | }, | 
|---|
| 1839 | [ C(ITLB) ] = { | 
|---|
| 1840 | [ C(OP_READ) ] = { | 
|---|
| 1841 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | 
|---|
| 1842 | [ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */ | 
|---|
| 1843 | }, | 
|---|
| 1844 | [ C(OP_WRITE) ] = { | 
|---|
| 1845 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1846 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1847 | }, | 
|---|
| 1848 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1849 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1850 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1851 | }, | 
|---|
| 1852 | }, | 
|---|
| 1853 | [ C(BPU ) ] = { | 
|---|
| 1854 | [ C(OP_READ) ] = { | 
|---|
| 1855 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | 
|---|
| 1856 | [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | 
|---|
| 1857 | }, | 
|---|
| 1858 | [ C(OP_WRITE) ] = { | 
|---|
| 1859 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1860 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1861 | }, | 
|---|
| 1862 | [ C(OP_PREFETCH) ] = { | 
|---|
| 1863 | [ C(RESULT_ACCESS) ] = -1, | 
|---|
| 1864 | [ C(RESULT_MISS)   ] = -1, | 
|---|
| 1865 | }, | 
|---|
| 1866 | }, | 
|---|
| 1867 | }; | 
|---|
| 1868 |  | 
|---|
| 1869 | EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c"); | 
|---|
| 1870 | EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3"); | 
|---|
| 1871 | /* UOPS_NOT_DELIVERED.ANY */ | 
|---|
| 1872 | EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c"); | 
|---|
| 1873 | /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */ | 
|---|
| 1874 | EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02"); | 
|---|
| 1875 | /* UOPS_RETIRED.ANY */ | 
|---|
| 1876 | EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2"); | 
|---|
| 1877 | /* UOPS_ISSUED.ANY */ | 
|---|
| 1878 | EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e"); | 
|---|
| 1879 |  | 
|---|
| 1880 | static struct attribute *glm_events_attrs[] = { | 
|---|
| 1881 | EVENT_PTR(td_total_slots_glm), | 
|---|
| 1882 | EVENT_PTR(td_total_slots_scale_glm), | 
|---|
| 1883 | EVENT_PTR(td_fetch_bubbles_glm), | 
|---|
| 1884 | EVENT_PTR(td_recovery_bubbles_glm), | 
|---|
| 1885 | EVENT_PTR(td_slots_issued_glm), | 
|---|
| 1886 | EVENT_PTR(td_slots_retired_glm), | 
|---|
| 1887 | NULL | 
|---|
| 1888 | }; | 
|---|
| 1889 |  | 
|---|
| 1890 | static struct extra_reg [] __read_mostly = { | 
|---|
| 1891 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 1892 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0), | 
|---|
| 1893 | INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1), | 
|---|
| 1894 | EVENT_EXTRA_END | 
|---|
| 1895 | }; | 
|---|
| 1896 |  | 
|---|
| 1897 | #define GLM_DEMAND_DATA_RD		BIT_ULL(0) | 
|---|
| 1898 | #define GLM_DEMAND_RFO			BIT_ULL(1) | 
|---|
| 1899 | #define GLM_ANY_RESPONSE		BIT_ULL(16) | 
|---|
| 1900 | #define GLM_SNP_NONE_OR_MISS		BIT_ULL(33) | 
|---|
| 1901 | #define GLM_DEMAND_READ			GLM_DEMAND_DATA_RD | 
|---|
| 1902 | #define GLM_DEMAND_WRITE		GLM_DEMAND_RFO | 
|---|
| 1903 | #define GLM_DEMAND_PREFETCH		(SNB_PF_DATA_RD|SNB_PF_RFO) | 
|---|
| 1904 | #define GLM_LLC_ACCESS			GLM_ANY_RESPONSE | 
|---|
| 1905 | #define GLM_SNP_ANY			(GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM) | 
|---|
| 1906 | #define GLM_LLC_MISS			(GLM_SNP_ANY|SNB_NON_DRAM) | 
|---|
| 1907 |  | 
|---|
| 1908 | static __initconst const u64 glm_hw_cache_event_ids | 
|---|
| 1909 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 1910 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 1911 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | 
|---|
| 1912 | [C(L1D)] = { | 
|---|
| 1913 | [C(OP_READ)] = { | 
|---|
| 1914 | [C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */ | 
|---|
| 1915 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 1916 | }, | 
|---|
| 1917 | [C(OP_WRITE)] = { | 
|---|
| 1918 | [C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */ | 
|---|
| 1919 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 1920 | }, | 
|---|
| 1921 | [C(OP_PREFETCH)] = { | 
|---|
| 1922 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 1923 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 1924 | }, | 
|---|
| 1925 | }, | 
|---|
| 1926 | [C(L1I)] = { | 
|---|
| 1927 | [C(OP_READ)] = { | 
|---|
| 1928 | [C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */ | 
|---|
| 1929 | [C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */ | 
|---|
| 1930 | }, | 
|---|
| 1931 | [C(OP_WRITE)] = { | 
|---|
| 1932 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 1933 | [C(RESULT_MISS)]	= -1, | 
|---|
| 1934 | }, | 
|---|
| 1935 | [C(OP_PREFETCH)] = { | 
|---|
| 1936 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 1937 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 1938 | }, | 
|---|
| 1939 | }, | 
|---|
| 1940 | [C(LL)] = { | 
|---|
| 1941 | [C(OP_READ)] = { | 
|---|
| 1942 | [C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1943 | [C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1944 | }, | 
|---|
| 1945 | [C(OP_WRITE)] = { | 
|---|
| 1946 | [C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1947 | [C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1948 | }, | 
|---|
| 1949 | [C(OP_PREFETCH)] = { | 
|---|
| 1950 | [C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1951 | [C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 1952 | }, | 
|---|
| 1953 | }, | 
|---|
| 1954 | [C(DTLB)] = { | 
|---|
| 1955 | [C(OP_READ)] = { | 
|---|
| 1956 | [C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */ | 
|---|
| 1957 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 1958 | }, | 
|---|
| 1959 | [C(OP_WRITE)] = { | 
|---|
| 1960 | [C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */ | 
|---|
| 1961 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 1962 | }, | 
|---|
| 1963 | [C(OP_PREFETCH)] = { | 
|---|
| 1964 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 1965 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 1966 | }, | 
|---|
| 1967 | }, | 
|---|
| 1968 | [C(ITLB)] = { | 
|---|
| 1969 | [C(OP_READ)] = { | 
|---|
| 1970 | [C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */ | 
|---|
| 1971 | [C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */ | 
|---|
| 1972 | }, | 
|---|
| 1973 | [C(OP_WRITE)] = { | 
|---|
| 1974 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 1975 | [C(RESULT_MISS)]	= -1, | 
|---|
| 1976 | }, | 
|---|
| 1977 | [C(OP_PREFETCH)] = { | 
|---|
| 1978 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 1979 | [C(RESULT_MISS)]	= -1, | 
|---|
| 1980 | }, | 
|---|
| 1981 | }, | 
|---|
| 1982 | [C(BPU)] = { | 
|---|
| 1983 | [C(OP_READ)] = { | 
|---|
| 1984 | [C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */ | 
|---|
| 1985 | [C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */ | 
|---|
| 1986 | }, | 
|---|
| 1987 | [C(OP_WRITE)] = { | 
|---|
| 1988 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 1989 | [C(RESULT_MISS)]	= -1, | 
|---|
| 1990 | }, | 
|---|
| 1991 | [C(OP_PREFETCH)] = { | 
|---|
| 1992 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 1993 | [C(RESULT_MISS)]	= -1, | 
|---|
| 1994 | }, | 
|---|
| 1995 | }, | 
|---|
| 1996 | }; | 
|---|
| 1997 |  | 
|---|
| 1998 | static __initconst const u64 | 
|---|
| 1999 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 2000 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 2001 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | 
|---|
| 2002 | [C(LL)] = { | 
|---|
| 2003 | [C(OP_READ)] = { | 
|---|
| 2004 | [C(RESULT_ACCESS)]	= GLM_DEMAND_READ| | 
|---|
| 2005 | GLM_LLC_ACCESS, | 
|---|
| 2006 | [C(RESULT_MISS)]	= GLM_DEMAND_READ| | 
|---|
| 2007 | GLM_LLC_MISS, | 
|---|
| 2008 | }, | 
|---|
| 2009 | [C(OP_WRITE)] = { | 
|---|
| 2010 | [C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE| | 
|---|
| 2011 | GLM_LLC_ACCESS, | 
|---|
| 2012 | [C(RESULT_MISS)]	= GLM_DEMAND_WRITE| | 
|---|
| 2013 | GLM_LLC_MISS, | 
|---|
| 2014 | }, | 
|---|
| 2015 | [C(OP_PREFETCH)] = { | 
|---|
| 2016 | [C(RESULT_ACCESS)]	= GLM_DEMAND_PREFETCH| | 
|---|
| 2017 | GLM_LLC_ACCESS, | 
|---|
| 2018 | [C(RESULT_MISS)]	= GLM_DEMAND_PREFETCH| | 
|---|
| 2019 | GLM_LLC_MISS, | 
|---|
| 2020 | }, | 
|---|
| 2021 | }, | 
|---|
| 2022 | }; | 
|---|
| 2023 |  | 
|---|
| 2024 | static __initconst const u64 glp_hw_cache_event_ids | 
|---|
| 2025 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 2026 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 2027 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | 
|---|
| 2028 | [C(L1D)] = { | 
|---|
| 2029 | [C(OP_READ)] = { | 
|---|
| 2030 | [C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */ | 
|---|
| 2031 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2032 | }, | 
|---|
| 2033 | [C(OP_WRITE)] = { | 
|---|
| 2034 | [C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */ | 
|---|
| 2035 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2036 | }, | 
|---|
| 2037 | [C(OP_PREFETCH)] = { | 
|---|
| 2038 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 2039 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2040 | }, | 
|---|
| 2041 | }, | 
|---|
| 2042 | [C(L1I)] = { | 
|---|
| 2043 | [C(OP_READ)] = { | 
|---|
| 2044 | [C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */ | 
|---|
| 2045 | [C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */ | 
|---|
| 2046 | }, | 
|---|
| 2047 | [C(OP_WRITE)] = { | 
|---|
| 2048 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 2049 | [C(RESULT_MISS)]	= -1, | 
|---|
| 2050 | }, | 
|---|
| 2051 | [C(OP_PREFETCH)] = { | 
|---|
| 2052 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 2053 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2054 | }, | 
|---|
| 2055 | }, | 
|---|
| 2056 | [C(LL)] = { | 
|---|
| 2057 | [C(OP_READ)] = { | 
|---|
| 2058 | [C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 2059 | [C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 2060 | }, | 
|---|
| 2061 | [C(OP_WRITE)] = { | 
|---|
| 2062 | [C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 2063 | [C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */ | 
|---|
| 2064 | }, | 
|---|
| 2065 | [C(OP_PREFETCH)] = { | 
|---|
| 2066 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 2067 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2068 | }, | 
|---|
| 2069 | }, | 
|---|
| 2070 | [C(DTLB)] = { | 
|---|
| 2071 | [C(OP_READ)] = { | 
|---|
| 2072 | [C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */ | 
|---|
| 2073 | [C(RESULT_MISS)]	= 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */ | 
|---|
| 2074 | }, | 
|---|
| 2075 | [C(OP_WRITE)] = { | 
|---|
| 2076 | [C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */ | 
|---|
| 2077 | [C(RESULT_MISS)]	= 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */ | 
|---|
| 2078 | }, | 
|---|
| 2079 | [C(OP_PREFETCH)] = { | 
|---|
| 2080 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 2081 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2082 | }, | 
|---|
| 2083 | }, | 
|---|
| 2084 | [C(ITLB)] = { | 
|---|
| 2085 | [C(OP_READ)] = { | 
|---|
| 2086 | [C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */ | 
|---|
| 2087 | [C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */ | 
|---|
| 2088 | }, | 
|---|
| 2089 | [C(OP_WRITE)] = { | 
|---|
| 2090 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 2091 | [C(RESULT_MISS)]	= -1, | 
|---|
| 2092 | }, | 
|---|
| 2093 | [C(OP_PREFETCH)] = { | 
|---|
| 2094 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 2095 | [C(RESULT_MISS)]	= -1, | 
|---|
| 2096 | }, | 
|---|
| 2097 | }, | 
|---|
| 2098 | [C(BPU)] = { | 
|---|
| 2099 | [C(OP_READ)] = { | 
|---|
| 2100 | [C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */ | 
|---|
| 2101 | [C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */ | 
|---|
| 2102 | }, | 
|---|
| 2103 | [C(OP_WRITE)] = { | 
|---|
| 2104 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 2105 | [C(RESULT_MISS)]	= -1, | 
|---|
| 2106 | }, | 
|---|
| 2107 | [C(OP_PREFETCH)] = { | 
|---|
| 2108 | [C(RESULT_ACCESS)]	= -1, | 
|---|
| 2109 | [C(RESULT_MISS)]	= -1, | 
|---|
| 2110 | }, | 
|---|
| 2111 | }, | 
|---|
| 2112 | }; | 
|---|
| 2113 |  | 
|---|
| 2114 | static __initconst const u64 | 
|---|
| 2115 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 2116 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 2117 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | 
|---|
| 2118 | [C(LL)] = { | 
|---|
| 2119 | [C(OP_READ)] = { | 
|---|
| 2120 | [C(RESULT_ACCESS)]	= GLM_DEMAND_READ| | 
|---|
| 2121 | GLM_LLC_ACCESS, | 
|---|
| 2122 | [C(RESULT_MISS)]	= GLM_DEMAND_READ| | 
|---|
| 2123 | GLM_LLC_MISS, | 
|---|
| 2124 | }, | 
|---|
| 2125 | [C(OP_WRITE)] = { | 
|---|
| 2126 | [C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE| | 
|---|
| 2127 | GLM_LLC_ACCESS, | 
|---|
| 2128 | [C(RESULT_MISS)]	= GLM_DEMAND_WRITE| | 
|---|
| 2129 | GLM_LLC_MISS, | 
|---|
| 2130 | }, | 
|---|
| 2131 | [C(OP_PREFETCH)] = { | 
|---|
| 2132 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 2133 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2134 | }, | 
|---|
| 2135 | }, | 
|---|
| 2136 | }; | 
|---|
| 2137 |  | 
|---|
| 2138 | #define TNT_LOCAL_DRAM			BIT_ULL(26) | 
|---|
| 2139 | #define TNT_DEMAND_READ			GLM_DEMAND_DATA_RD | 
|---|
| 2140 | #define TNT_DEMAND_WRITE		GLM_DEMAND_RFO | 
|---|
| 2141 | #define TNT_LLC_ACCESS			GLM_ANY_RESPONSE | 
|---|
| 2142 | #define TNT_SNP_ANY			(SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \ | 
|---|
| 2143 | SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM) | 
|---|
| 2144 | #define TNT_LLC_MISS			(TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM) | 
|---|
| 2145 |  | 
|---|
| 2146 | static __initconst const u64 | 
|---|
| 2147 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 2148 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 2149 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | 
|---|
| 2150 | [C(LL)] = { | 
|---|
| 2151 | [C(OP_READ)] = { | 
|---|
| 2152 | [C(RESULT_ACCESS)]	= TNT_DEMAND_READ| | 
|---|
| 2153 | TNT_LLC_ACCESS, | 
|---|
| 2154 | [C(RESULT_MISS)]	= TNT_DEMAND_READ| | 
|---|
| 2155 | TNT_LLC_MISS, | 
|---|
| 2156 | }, | 
|---|
| 2157 | [C(OP_WRITE)] = { | 
|---|
| 2158 | [C(RESULT_ACCESS)]	= TNT_DEMAND_WRITE| | 
|---|
| 2159 | TNT_LLC_ACCESS, | 
|---|
| 2160 | [C(RESULT_MISS)]	= TNT_DEMAND_WRITE| | 
|---|
| 2161 | TNT_LLC_MISS, | 
|---|
| 2162 | }, | 
|---|
| 2163 | [C(OP_PREFETCH)] = { | 
|---|
| 2164 | [C(RESULT_ACCESS)]	= 0x0, | 
|---|
| 2165 | [C(RESULT_MISS)]	= 0x0, | 
|---|
| 2166 | }, | 
|---|
| 2167 | }, | 
|---|
| 2168 | }; | 
|---|
| 2169 |  | 
|---|
| 2170 | EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_tnt, "event=0x71,umask=0x0"); | 
|---|
| 2171 | EVENT_ATTR_STR(topdown-retiring,       td_retiring_tnt, "event=0xc2,umask=0x0"); | 
|---|
| 2172 | EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_tnt, "event=0x73,umask=0x6"); | 
|---|
| 2173 | EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_tnt, "event=0x74,umask=0x0"); | 
|---|
| 2174 |  | 
|---|
| 2175 | static struct attribute *tnt_events_attrs[] = { | 
|---|
| 2176 | EVENT_PTR(td_fe_bound_tnt), | 
|---|
| 2177 | EVENT_PTR(td_retiring_tnt), | 
|---|
| 2178 | EVENT_PTR(td_bad_spec_tnt), | 
|---|
| 2179 | EVENT_PTR(td_be_bound_tnt), | 
|---|
| 2180 | NULL, | 
|---|
| 2181 | }; | 
|---|
| 2182 |  | 
|---|
| 2183 | static struct extra_reg [] __read_mostly = { | 
|---|
| 2184 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 2185 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), | 
|---|
| 2186 | INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), | 
|---|
| 2187 | EVENT_EXTRA_END | 
|---|
| 2188 | }; | 
|---|
| 2189 |  | 
|---|
| 2190 | EVENT_ATTR_STR(mem-loads,	mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3"); | 
|---|
| 2191 | EVENT_ATTR_STR(mem-stores,	mem_st_grt, "event=0xd0,umask=0x6"); | 
|---|
| 2192 |  | 
|---|
| 2193 | static struct attribute *grt_mem_attrs[] = { | 
|---|
| 2194 | EVENT_PTR(mem_ld_grt), | 
|---|
| 2195 | EVENT_PTR(mem_st_grt), | 
|---|
| 2196 | NULL | 
|---|
| 2197 | }; | 
|---|
| 2198 |  | 
|---|
| 2199 | static struct extra_reg [] __read_mostly = { | 
|---|
| 2200 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 2201 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), | 
|---|
| 2202 | INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), | 
|---|
| 2203 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), | 
|---|
| 2204 | EVENT_EXTRA_END | 
|---|
| 2205 | }; | 
|---|
| 2206 |  | 
|---|
| 2207 | EVENT_ATTR_STR(topdown-retiring,       td_retiring_cmt, "event=0x72,umask=0x0"); | 
|---|
| 2208 | EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_cmt, "event=0x73,umask=0x0"); | 
|---|
| 2209 |  | 
|---|
| 2210 | static struct attribute *cmt_events_attrs[] = { | 
|---|
| 2211 | EVENT_PTR(td_fe_bound_tnt), | 
|---|
| 2212 | EVENT_PTR(td_retiring_cmt), | 
|---|
| 2213 | EVENT_PTR(td_bad_spec_cmt), | 
|---|
| 2214 | EVENT_PTR(td_be_bound_tnt), | 
|---|
| 2215 | NULL | 
|---|
| 2216 | }; | 
|---|
| 2217 |  | 
|---|
| 2218 | static struct extra_reg [] __read_mostly = { | 
|---|
| 2219 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | 
|---|
| 2220 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0), | 
|---|
| 2221 | INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1), | 
|---|
| 2222 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), | 
|---|
| 2223 | INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0), | 
|---|
| 2224 | INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1), | 
|---|
| 2225 | EVENT_EXTRA_END | 
|---|
| 2226 | }; | 
|---|
| 2227 |  | 
|---|
| 2228 | EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_skt, "event=0x9c,umask=0x01"); | 
|---|
| 2229 | EVENT_ATTR_STR(topdown-retiring,       td_retiring_skt, "event=0xc2,umask=0x02"); | 
|---|
| 2230 | EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_skt, "event=0xa4,umask=0x02"); | 
|---|
| 2231 |  | 
|---|
| 2232 | static struct attribute *skt_events_attrs[] = { | 
|---|
| 2233 | EVENT_PTR(td_fe_bound_skt), | 
|---|
| 2234 | EVENT_PTR(td_retiring_skt), | 
|---|
| 2235 | EVENT_PTR(td_bad_spec_cmt), | 
|---|
| 2236 | EVENT_PTR(td_be_bound_skt), | 
|---|
| 2237 | NULL, | 
|---|
| 2238 | }; | 
|---|
| 2239 |  | 
|---|
| 2240 | #define KNL_OT_L2_HITE		BIT_ULL(19) /* Other Tile L2 Hit */ | 
|---|
| 2241 | #define KNL_OT_L2_HITF		BIT_ULL(20) /* Other Tile L2 Hit */ | 
|---|
| 2242 | #define KNL_MCDRAM_LOCAL	BIT_ULL(21) | 
|---|
| 2243 | #define KNL_MCDRAM_FAR		BIT_ULL(22) | 
|---|
| 2244 | #define KNL_DDR_LOCAL		BIT_ULL(23) | 
|---|
| 2245 | #define KNL_DDR_FAR		BIT_ULL(24) | 
|---|
| 2246 | #define KNL_DRAM_ANY		(KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \ | 
|---|
| 2247 | KNL_DDR_LOCAL | KNL_DDR_FAR) | 
|---|
| 2248 | #define KNL_L2_READ		SLM_DMND_READ | 
|---|
| 2249 | #define KNL_L2_WRITE		SLM_DMND_WRITE | 
|---|
| 2250 | #define KNL_L2_PREFETCH		SLM_DMND_PREFETCH | 
|---|
| 2251 | #define KNL_L2_ACCESS		SLM_LLC_ACCESS | 
|---|
| 2252 | #define KNL_L2_MISS		(KNL_OT_L2_HITE | KNL_OT_L2_HITF | \ | 
|---|
| 2253 | KNL_DRAM_ANY | SNB_SNP_ANY | \ | 
|---|
| 2254 | SNB_NON_DRAM) | 
|---|
| 2255 |  | 
|---|
| 2256 | static __initconst const u64 | 
|---|
| 2257 | [PERF_COUNT_HW_CACHE_MAX] | 
|---|
| 2258 | [PERF_COUNT_HW_CACHE_OP_MAX] | 
|---|
| 2259 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | 
|---|
| 2260 | [C(LL)] = { | 
|---|
| 2261 | [C(OP_READ)] = { | 
|---|
| 2262 | [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS, | 
|---|
| 2263 | [C(RESULT_MISS)]   = 0, | 
|---|
| 2264 | }, | 
|---|
| 2265 | [C(OP_WRITE)] = { | 
|---|
| 2266 | [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS, | 
|---|
| 2267 | [C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS, | 
|---|
| 2268 | }, | 
|---|
| 2269 | [C(OP_PREFETCH)] = { | 
|---|
| 2270 | [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS, | 
|---|
| 2271 | [C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS, | 
|---|
| 2272 | }, | 
|---|
| 2273 | }, | 
|---|
| 2274 | }; | 
|---|
| 2275 |  | 
|---|
| 2276 | /* | 
|---|
| 2277 | * Used from PMIs where the LBRs are already disabled. | 
|---|
| 2278 | * | 
|---|
| 2279 | * This function could be called consecutively. It is required to remain in | 
|---|
| 2280 | * disabled state if called consecutively. | 
|---|
| 2281 | * | 
|---|
| 2282 | * During consecutive calls, the same disable value will be written to related | 
|---|
| 2283 | * registers, so the PMU state remains unchanged. | 
|---|
| 2284 | * | 
|---|
| 2285 | * intel_bts events don't coexist with intel PMU's BTS events because of | 
|---|
| 2286 | * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them | 
|---|
| 2287 | * disabled around intel PMU's event batching etc, only inside the PMI handler. | 
|---|
| 2288 | * | 
|---|
| 2289 | * Avoid PEBS_ENABLE MSR access in PMIs. | 
|---|
| 2290 | * The GLOBAL_CTRL has been disabled. All the counters do not count anymore. | 
|---|
| 2291 | * It doesn't matter if the PEBS is enabled or not. | 
|---|
| 2292 | * Usually, the PEBS status are not changed in PMIs. It's unnecessary to | 
|---|
| 2293 | * access PEBS_ENABLE MSR in disable_all()/enable_all(). | 
|---|
| 2294 | * However, there are some cases which may change PEBS status, e.g. PMI | 
|---|
| 2295 | * throttle. The PEBS_ENABLE should be updated where the status changes. | 
|---|
| 2296 | */ | 
|---|
| 2297 | static __always_inline void __intel_pmu_disable_all(bool bts) | 
|---|
| 2298 | { | 
|---|
| 2299 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2300 |  | 
|---|
| 2301 | wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, val: 0); | 
|---|
| 2302 |  | 
|---|
| 2303 | if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | 
|---|
| 2304 | intel_pmu_disable_bts(); | 
|---|
| 2305 | } | 
|---|
| 2306 |  | 
|---|
| 2307 | static __always_inline void intel_pmu_disable_all(void) | 
|---|
| 2308 | { | 
|---|
| 2309 | __intel_pmu_disable_all(bts: true); | 
|---|
| 2310 | static_call_cond(x86_pmu_pebs_disable_all)(); | 
|---|
| 2311 | intel_pmu_lbr_disable_all(); | 
|---|
| 2312 | } | 
|---|
| 2313 |  | 
|---|
| 2314 | static void __intel_pmu_enable_all(int added, bool pmi) | 
|---|
| 2315 | { | 
|---|
| 2316 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2317 | u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); | 
|---|
| 2318 |  | 
|---|
| 2319 | intel_pmu_lbr_enable_all(pmi); | 
|---|
| 2320 |  | 
|---|
| 2321 | if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { | 
|---|
| 2322 | wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, val: cpuc->fixed_ctrl_val); | 
|---|
| 2323 | cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; | 
|---|
| 2324 | } | 
|---|
| 2325 |  | 
|---|
| 2326 | wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, | 
|---|
| 2327 | val: intel_ctrl & ~cpuc->intel_ctrl_guest_mask); | 
|---|
| 2328 |  | 
|---|
| 2329 | if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | 
|---|
| 2330 | struct perf_event *event = | 
|---|
| 2331 | cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; | 
|---|
| 2332 |  | 
|---|
| 2333 | if (WARN_ON_ONCE(!event)) | 
|---|
| 2334 | return; | 
|---|
| 2335 |  | 
|---|
| 2336 | intel_pmu_enable_bts(config: event->hw.config); | 
|---|
| 2337 | } | 
|---|
| 2338 | } | 
|---|
| 2339 |  | 
|---|
| 2340 | static void intel_pmu_enable_all(int added) | 
|---|
| 2341 | { | 
|---|
| 2342 | static_call_cond(x86_pmu_pebs_enable_all)(); | 
|---|
| 2343 | __intel_pmu_enable_all(added, pmi: false); | 
|---|
| 2344 | } | 
|---|
| 2345 |  | 
|---|
| 2346 | static noinline int | 
|---|
| 2347 | __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, | 
|---|
| 2348 | unsigned int cnt, unsigned long flags) | 
|---|
| 2349 | { | 
|---|
| 2350 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2351 |  | 
|---|
| 2352 | intel_pmu_lbr_read(); | 
|---|
| 2353 | cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); | 
|---|
| 2354 |  | 
|---|
| 2355 | memcpy(to: entries, from: cpuc->lbr_entries, len: sizeof(struct perf_branch_entry) * cnt); | 
|---|
| 2356 | intel_pmu_enable_all(added: 0); | 
|---|
| 2357 | local_irq_restore(flags); | 
|---|
| 2358 | return cnt; | 
|---|
| 2359 | } | 
|---|
| 2360 |  | 
|---|
| 2361 | static int | 
|---|
| 2362 | intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) | 
|---|
| 2363 | { | 
|---|
| 2364 | unsigned long flags; | 
|---|
| 2365 |  | 
|---|
| 2366 | /* must not have branches... */ | 
|---|
| 2367 | local_irq_save(flags); | 
|---|
| 2368 | __intel_pmu_disable_all(bts: false); /* we don't care about BTS */ | 
|---|
| 2369 | __intel_pmu_lbr_disable(); | 
|---|
| 2370 | /*            ... until here */ | 
|---|
| 2371 | return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); | 
|---|
| 2372 | } | 
|---|
| 2373 |  | 
|---|
| 2374 | static int | 
|---|
| 2375 | intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) | 
|---|
| 2376 | { | 
|---|
| 2377 | unsigned long flags; | 
|---|
| 2378 |  | 
|---|
| 2379 | /* must not have branches... */ | 
|---|
| 2380 | local_irq_save(flags); | 
|---|
| 2381 | __intel_pmu_disable_all(bts: false); /* we don't care about BTS */ | 
|---|
| 2382 | __intel_pmu_arch_lbr_disable(); | 
|---|
| 2383 | /*            ... until here */ | 
|---|
| 2384 | return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); | 
|---|
| 2385 | } | 
|---|
| 2386 |  | 
|---|
| 2387 | /* | 
|---|
| 2388 | * Workaround for: | 
|---|
| 2389 | *   Intel Errata AAK100 (model 26) | 
|---|
| 2390 | *   Intel Errata AAP53  (model 30) | 
|---|
| 2391 | *   Intel Errata BD53   (model 44) | 
|---|
| 2392 | * | 
|---|
| 2393 | * The official story: | 
|---|
| 2394 | *   These chips need to be 'reset' when adding counters by programming the | 
|---|
| 2395 | *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either | 
|---|
| 2396 | *   in sequence on the same PMC or on different PMCs. | 
|---|
| 2397 | * | 
|---|
| 2398 | * In practice it appears some of these events do in fact count, and | 
|---|
| 2399 | * we need to program all 4 events. | 
|---|
| 2400 | */ | 
|---|
| 2401 | static void intel_pmu_nhm_workaround(void) | 
|---|
| 2402 | { | 
|---|
| 2403 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2404 | static const unsigned long nhm_magic[4] = { | 
|---|
| 2405 | 0x4300B5, | 
|---|
| 2406 | 0x4300D2, | 
|---|
| 2407 | 0x4300B1, | 
|---|
| 2408 | 0x4300B1 | 
|---|
| 2409 | }; | 
|---|
| 2410 | struct perf_event *event; | 
|---|
| 2411 | int i; | 
|---|
| 2412 |  | 
|---|
| 2413 | /* | 
|---|
| 2414 | * The Errata requires below steps: | 
|---|
| 2415 | * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; | 
|---|
| 2416 | * 2) Configure 4 PERFEVTSELx with the magic events and clear | 
|---|
| 2417 | *    the corresponding PMCx; | 
|---|
| 2418 | * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; | 
|---|
| 2419 | * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; | 
|---|
| 2420 | * 5) Clear 4 pairs of ERFEVTSELx and PMCx; | 
|---|
| 2421 | */ | 
|---|
| 2422 |  | 
|---|
| 2423 | /* | 
|---|
| 2424 | * The real steps we choose are a little different from above. | 
|---|
| 2425 | * A) To reduce MSR operations, we don't run step 1) as they | 
|---|
| 2426 | *    are already cleared before this function is called; | 
|---|
| 2427 | * B) Call x86_perf_event_update to save PMCx before configuring | 
|---|
| 2428 | *    PERFEVTSELx with magic number; | 
|---|
| 2429 | * C) With step 5), we do clear only when the PERFEVTSELx is | 
|---|
| 2430 | *    not used currently. | 
|---|
| 2431 | * D) Call x86_perf_event_set_period to restore PMCx; | 
|---|
| 2432 | */ | 
|---|
| 2433 |  | 
|---|
| 2434 | /* We always operate 4 pairs of PERF Counters */ | 
|---|
| 2435 | for (i = 0; i < 4; i++) { | 
|---|
| 2436 | event = cpuc->events[i]; | 
|---|
| 2437 | if (event) | 
|---|
| 2438 | static_call(x86_pmu_update)(event); | 
|---|
| 2439 | } | 
|---|
| 2440 |  | 
|---|
| 2441 | for (i = 0; i < 4; i++) { | 
|---|
| 2442 | wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, val: nhm_magic[i]); | 
|---|
| 2443 | wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, val: 0x0); | 
|---|
| 2444 | } | 
|---|
| 2445 |  | 
|---|
| 2446 | wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, val: 0xf); | 
|---|
| 2447 | wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, val: 0x0); | 
|---|
| 2448 |  | 
|---|
| 2449 | for (i = 0; i < 4; i++) { | 
|---|
| 2450 | event = cpuc->events[i]; | 
|---|
| 2451 |  | 
|---|
| 2452 | if (event) { | 
|---|
| 2453 | static_call(x86_pmu_set_period)(event); | 
|---|
| 2454 | __x86_pmu_enable_event(hwc: &event->hw, | 
|---|
| 2455 | ARCH_PERFMON_EVENTSEL_ENABLE); | 
|---|
| 2456 | } else | 
|---|
| 2457 | wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, val: 0x0); | 
|---|
| 2458 | } | 
|---|
| 2459 | } | 
|---|
| 2460 |  | 
|---|
| 2461 | static void intel_pmu_nhm_enable_all(int added) | 
|---|
| 2462 | { | 
|---|
| 2463 | if (added) | 
|---|
| 2464 | intel_pmu_nhm_workaround(); | 
|---|
| 2465 | intel_pmu_enable_all(added); | 
|---|
| 2466 | } | 
|---|
| 2467 |  | 
|---|
| 2468 | static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) | 
|---|
| 2469 | { | 
|---|
| 2470 | u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; | 
|---|
| 2471 |  | 
|---|
| 2472 | if (cpuc->tfa_shadow != val) { | 
|---|
| 2473 | cpuc->tfa_shadow = val; | 
|---|
| 2474 | wrmsrq(MSR_TSX_FORCE_ABORT, val); | 
|---|
| 2475 | } | 
|---|
| 2476 | } | 
|---|
| 2477 |  | 
|---|
| 2478 | static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) | 
|---|
| 2479 | { | 
|---|
| 2480 | /* | 
|---|
| 2481 | * We're going to use PMC3, make sure TFA is set before we touch it. | 
|---|
| 2482 | */ | 
|---|
| 2483 | if (cntr == 3) | 
|---|
| 2484 | intel_set_tfa(cpuc, on: true); | 
|---|
| 2485 | } | 
|---|
| 2486 |  | 
|---|
| 2487 | static void intel_tfa_pmu_enable_all(int added) | 
|---|
| 2488 | { | 
|---|
| 2489 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2490 |  | 
|---|
| 2491 | /* | 
|---|
| 2492 | * If we find PMC3 is no longer used when we enable the PMU, we can | 
|---|
| 2493 | * clear TFA. | 
|---|
| 2494 | */ | 
|---|
| 2495 | if (!test_bit(3, cpuc->active_mask)) | 
|---|
| 2496 | intel_set_tfa(cpuc, on: false); | 
|---|
| 2497 |  | 
|---|
| 2498 | intel_pmu_enable_all(added); | 
|---|
| 2499 | } | 
|---|
| 2500 |  | 
|---|
| 2501 | static inline u64 intel_pmu_get_status(void) | 
|---|
| 2502 | { | 
|---|
| 2503 | u64 status; | 
|---|
| 2504 |  | 
|---|
| 2505 | rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); | 
|---|
| 2506 |  | 
|---|
| 2507 | return status; | 
|---|
| 2508 | } | 
|---|
| 2509 |  | 
|---|
| 2510 | static inline void intel_pmu_ack_status(u64 ack) | 
|---|
| 2511 | { | 
|---|
| 2512 | wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, val: ack); | 
|---|
| 2513 | } | 
|---|
| 2514 |  | 
|---|
| 2515 | static inline bool event_is_checkpointed(struct perf_event *event) | 
|---|
| 2516 | { | 
|---|
| 2517 | return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; | 
|---|
| 2518 | } | 
|---|
| 2519 |  | 
|---|
| 2520 | static inline void intel_set_masks(struct perf_event *event, int idx) | 
|---|
| 2521 | { | 
|---|
| 2522 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2523 |  | 
|---|
| 2524 | if (event->attr.exclude_host) | 
|---|
| 2525 | __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); | 
|---|
| 2526 | if (event->attr.exclude_guest) | 
|---|
| 2527 | __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); | 
|---|
| 2528 | if (event_is_checkpointed(event)) | 
|---|
| 2529 | __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); | 
|---|
| 2530 | } | 
|---|
| 2531 |  | 
|---|
| 2532 | static inline void intel_clear_masks(struct perf_event *event, int idx) | 
|---|
| 2533 | { | 
|---|
| 2534 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2535 |  | 
|---|
| 2536 | __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); | 
|---|
| 2537 | __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); | 
|---|
| 2538 | __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); | 
|---|
| 2539 | } | 
|---|
| 2540 |  | 
|---|
| 2541 | static void intel_pmu_disable_fixed(struct perf_event *event) | 
|---|
| 2542 | { | 
|---|
| 2543 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2544 | struct hw_perf_event *hwc = &event->hw; | 
|---|
| 2545 | int idx = hwc->idx; | 
|---|
| 2546 | u64 mask; | 
|---|
| 2547 |  | 
|---|
| 2548 | if (is_topdown_idx(idx)) { | 
|---|
| 2549 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2550 |  | 
|---|
| 2551 | /* | 
|---|
| 2552 | * When there are other active TopDown events, | 
|---|
| 2553 | * don't disable the fixed counter 3. | 
|---|
| 2554 | */ | 
|---|
| 2555 | if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) | 
|---|
| 2556 | return; | 
|---|
| 2557 | idx = INTEL_PMC_IDX_FIXED_SLOTS; | 
|---|
| 2558 | } | 
|---|
| 2559 |  | 
|---|
| 2560 | intel_clear_masks(event, idx); | 
|---|
| 2561 |  | 
|---|
| 2562 | mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK); | 
|---|
| 2563 | cpuc->fixed_ctrl_val &= ~mask; | 
|---|
| 2564 | } | 
|---|
| 2565 |  | 
|---|
| 2566 | static void intel_pmu_disable_event(struct perf_event *event) | 
|---|
| 2567 | { | 
|---|
| 2568 | struct hw_perf_event *hwc = &event->hw; | 
|---|
| 2569 | int idx = hwc->idx; | 
|---|
| 2570 |  | 
|---|
| 2571 | switch (idx) { | 
|---|
| 2572 | case 0 ... INTEL_PMC_IDX_FIXED - 1: | 
|---|
| 2573 | intel_clear_masks(event, idx); | 
|---|
| 2574 | x86_pmu_disable_event(event); | 
|---|
| 2575 | break; | 
|---|
| 2576 | case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: | 
|---|
| 2577 | case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: | 
|---|
| 2578 | intel_pmu_disable_fixed(event); | 
|---|
| 2579 | break; | 
|---|
| 2580 | case INTEL_PMC_IDX_FIXED_BTS: | 
|---|
| 2581 | intel_pmu_disable_bts(); | 
|---|
| 2582 | intel_pmu_drain_bts_buffer(); | 
|---|
| 2583 | return; | 
|---|
| 2584 | case INTEL_PMC_IDX_FIXED_VLBR: | 
|---|
| 2585 | intel_clear_masks(event, idx); | 
|---|
| 2586 | break; | 
|---|
| 2587 | default: | 
|---|
| 2588 | intel_clear_masks(event, idx); | 
|---|
| 2589 | pr_warn( "Failed to disable the event with invalid index %d\n", | 
|---|
| 2590 | idx); | 
|---|
| 2591 | return; | 
|---|
| 2592 | } | 
|---|
| 2593 |  | 
|---|
| 2594 | /* | 
|---|
| 2595 | * Needs to be called after x86_pmu_disable_event, | 
|---|
| 2596 | * so we don't trigger the event without PEBS bit set. | 
|---|
| 2597 | */ | 
|---|
| 2598 | if (unlikely(event->attr.precise_ip)) | 
|---|
| 2599 | static_call(x86_pmu_pebs_disable)(event); | 
|---|
| 2600 | } | 
|---|
| 2601 |  | 
|---|
| 2602 | static void intel_pmu_assign_event(struct perf_event *event, int idx) | 
|---|
| 2603 | { | 
|---|
| 2604 | if (is_pebs_pt(event)) | 
|---|
| 2605 | perf_report_aux_output_id(event, hw_id: idx); | 
|---|
| 2606 | } | 
|---|
| 2607 |  | 
|---|
| 2608 | static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event) | 
|---|
| 2609 | { | 
|---|
| 2610 | return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK; | 
|---|
| 2611 | } | 
|---|
| 2612 |  | 
|---|
| 2613 | static void intel_pmu_del_event(struct perf_event *event) | 
|---|
| 2614 | { | 
|---|
| 2615 | if (intel_pmu_needs_branch_stack(event)) | 
|---|
| 2616 | intel_pmu_lbr_del(event); | 
|---|
| 2617 | if (event->attr.precise_ip) | 
|---|
| 2618 | intel_pmu_pebs_del(event); | 
|---|
| 2619 | if (is_pebs_counter_event_group(event) || | 
|---|
| 2620 | is_acr_event_group(event)) | 
|---|
| 2621 | this_cpu_ptr(&cpu_hw_events)->n_late_setup--; | 
|---|
| 2622 | } | 
|---|
| 2623 |  | 
|---|
| 2624 | static int icl_set_topdown_event_period(struct perf_event *event) | 
|---|
| 2625 | { | 
|---|
| 2626 | struct hw_perf_event *hwc = &event->hw; | 
|---|
| 2627 | s64 left = local64_read(&hwc->period_left); | 
|---|
| 2628 |  | 
|---|
| 2629 | /* | 
|---|
| 2630 | * The values in PERF_METRICS MSR are derived from fixed counter 3. | 
|---|
| 2631 | * Software should start both registers, PERF_METRICS and fixed | 
|---|
| 2632 | * counter 3, from zero. | 
|---|
| 2633 | * Clear PERF_METRICS and Fixed counter 3 in initialization. | 
|---|
| 2634 | * After that, both MSRs will be cleared for each read. | 
|---|
| 2635 | * Don't need to clear them again. | 
|---|
| 2636 | */ | 
|---|
| 2637 | if (left == x86_pmu.max_period) { | 
|---|
| 2638 | wrmsrq(MSR_CORE_PERF_FIXED_CTR3, val: 0); | 
|---|
| 2639 | wrmsrq(MSR_PERF_METRICS, val: 0); | 
|---|
| 2640 | hwc->saved_slots = 0; | 
|---|
| 2641 | hwc->saved_metric = 0; | 
|---|
| 2642 | } | 
|---|
| 2643 |  | 
|---|
| 2644 | if ((hwc->saved_slots) && is_slots_event(event)) { | 
|---|
| 2645 | wrmsrq(MSR_CORE_PERF_FIXED_CTR3, val: hwc->saved_slots); | 
|---|
| 2646 | wrmsrq(MSR_PERF_METRICS, val: hwc->saved_metric); | 
|---|
| 2647 | } | 
|---|
| 2648 |  | 
|---|
| 2649 | perf_event_update_userpage(event); | 
|---|
| 2650 |  | 
|---|
| 2651 | return 0; | 
|---|
| 2652 | } | 
|---|
| 2653 |  | 
|---|
| 2654 | DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); | 
|---|
| 2655 |  | 
|---|
| 2656 | static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) | 
|---|
| 2657 | { | 
|---|
| 2658 | u32 val; | 
|---|
| 2659 |  | 
|---|
| 2660 | /* | 
|---|
| 2661 | * The metric is reported as an 8bit integer fraction | 
|---|
| 2662 | * summing up to 0xff. | 
|---|
| 2663 | * slots-in-metric = (Metric / 0xff) * slots | 
|---|
| 2664 | */ | 
|---|
| 2665 | val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; | 
|---|
| 2666 | return  mul_u64_u32_div(a: slots, mul: val, div: 0xff); | 
|---|
| 2667 | } | 
|---|
| 2668 |  | 
|---|
| 2669 | static u64 icl_get_topdown_value(struct perf_event *event, | 
|---|
| 2670 | u64 slots, u64 metrics) | 
|---|
| 2671 | { | 
|---|
| 2672 | int idx = event->hw.idx; | 
|---|
| 2673 | u64 delta; | 
|---|
| 2674 |  | 
|---|
| 2675 | if (is_metric_idx(idx)) | 
|---|
| 2676 | delta = icl_get_metrics_event_value(metric: metrics, slots, idx); | 
|---|
| 2677 | else | 
|---|
| 2678 | delta = slots; | 
|---|
| 2679 |  | 
|---|
| 2680 | return delta; | 
|---|
| 2681 | } | 
|---|
| 2682 |  | 
|---|
| 2683 | static void __icl_update_topdown_event(struct perf_event *event, | 
|---|
| 2684 | u64 slots, u64 metrics, | 
|---|
| 2685 | u64 last_slots, u64 last_metrics) | 
|---|
| 2686 | { | 
|---|
| 2687 | u64 delta, last = 0; | 
|---|
| 2688 |  | 
|---|
| 2689 | delta = icl_get_topdown_value(event, slots, metrics); | 
|---|
| 2690 | if (last_slots) | 
|---|
| 2691 | last = icl_get_topdown_value(event, slots: last_slots, metrics: last_metrics); | 
|---|
| 2692 |  | 
|---|
| 2693 | /* | 
|---|
| 2694 | * The 8bit integer fraction of metric may be not accurate, | 
|---|
| 2695 | * especially when the changes is very small. | 
|---|
| 2696 | * For example, if only a few bad_spec happens, the fraction | 
|---|
| 2697 | * may be reduced from 1 to 0. If so, the bad_spec event value | 
|---|
| 2698 | * will be 0 which is definitely less than the last value. | 
|---|
| 2699 | * Avoid update event->count for this case. | 
|---|
| 2700 | */ | 
|---|
| 2701 | if (delta > last) { | 
|---|
| 2702 | delta -= last; | 
|---|
| 2703 | local64_add(delta, &event->count); | 
|---|
| 2704 | } | 
|---|
| 2705 | } | 
|---|
| 2706 |  | 
|---|
| 2707 | static void update_saved_topdown_regs(struct perf_event *event, u64 slots, | 
|---|
| 2708 | u64 metrics, int metric_end) | 
|---|
| 2709 | { | 
|---|
| 2710 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2711 | struct perf_event *other; | 
|---|
| 2712 | int idx; | 
|---|
| 2713 |  | 
|---|
| 2714 | event->hw.saved_slots = slots; | 
|---|
| 2715 | event->hw.saved_metric = metrics; | 
|---|
| 2716 |  | 
|---|
| 2717 | for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { | 
|---|
| 2718 | if (!is_topdown_idx(idx)) | 
|---|
| 2719 | continue; | 
|---|
| 2720 | other = cpuc->events[idx]; | 
|---|
| 2721 | other->hw.saved_slots = slots; | 
|---|
| 2722 | other->hw.saved_metric = metrics; | 
|---|
| 2723 | } | 
|---|
| 2724 | } | 
|---|
| 2725 |  | 
|---|
| 2726 | /* | 
|---|
| 2727 | * Update all active Topdown events. | 
|---|
| 2728 | * | 
|---|
| 2729 | * The PERF_METRICS and Fixed counter 3 are read separately. The values may be | 
|---|
| 2730 | * modify by a NMI. PMU has to be disabled before calling this function. | 
|---|
| 2731 | */ | 
|---|
| 2732 |  | 
|---|
| 2733 | static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, u64 *val) | 
|---|
| 2734 | { | 
|---|
| 2735 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2736 | struct perf_event *other; | 
|---|
| 2737 | u64 slots, metrics; | 
|---|
| 2738 | bool reset = true; | 
|---|
| 2739 | int idx; | 
|---|
| 2740 |  | 
|---|
| 2741 | if (!val) { | 
|---|
| 2742 | /* read Fixed counter 3 */ | 
|---|
| 2743 | slots = rdpmc(counter: 3 | INTEL_PMC_FIXED_RDPMC_BASE); | 
|---|
| 2744 | if (!slots) | 
|---|
| 2745 | return 0; | 
|---|
| 2746 |  | 
|---|
| 2747 | /* read PERF_METRICS */ | 
|---|
| 2748 | metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS); | 
|---|
| 2749 | } else { | 
|---|
| 2750 | slots = val[0]; | 
|---|
| 2751 | metrics = val[1]; | 
|---|
| 2752 | /* | 
|---|
| 2753 | * Don't reset the PERF_METRICS and Fixed counter 3 | 
|---|
| 2754 | * for each PEBS record read. Utilize the RDPMC metrics | 
|---|
| 2755 | * clear mode. | 
|---|
| 2756 | */ | 
|---|
| 2757 | reset = false; | 
|---|
| 2758 | } | 
|---|
| 2759 |  | 
|---|
| 2760 | for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { | 
|---|
| 2761 | if (!is_topdown_idx(idx)) | 
|---|
| 2762 | continue; | 
|---|
| 2763 | other = cpuc->events[idx]; | 
|---|
| 2764 | __icl_update_topdown_event(event: other, slots, metrics, | 
|---|
| 2765 | last_slots: event ? event->hw.saved_slots : 0, | 
|---|
| 2766 | last_metrics: event ? event->hw.saved_metric : 0); | 
|---|
| 2767 | } | 
|---|
| 2768 |  | 
|---|
| 2769 | /* | 
|---|
| 2770 | * Check and update this event, which may have been cleared | 
|---|
| 2771 | * in active_mask e.g. x86_pmu_stop() | 
|---|
| 2772 | */ | 
|---|
| 2773 | if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { | 
|---|
| 2774 | __icl_update_topdown_event(event, slots, metrics, | 
|---|
| 2775 | last_slots: event->hw.saved_slots, | 
|---|
| 2776 | last_metrics: event->hw.saved_metric); | 
|---|
| 2777 |  | 
|---|
| 2778 | /* | 
|---|
| 2779 | * In x86_pmu_stop(), the event is cleared in active_mask first, | 
|---|
| 2780 | * then drain the delta, which indicates context switch for | 
|---|
| 2781 | * counting. | 
|---|
| 2782 | * Save metric and slots for context switch. | 
|---|
| 2783 | * Don't need to reset the PERF_METRICS and Fixed counter 3. | 
|---|
| 2784 | * Because the values will be restored in next schedule in. | 
|---|
| 2785 | */ | 
|---|
| 2786 | update_saved_topdown_regs(event, slots, metrics, metric_end); | 
|---|
| 2787 | reset = false; | 
|---|
| 2788 | } | 
|---|
| 2789 |  | 
|---|
| 2790 | if (reset) { | 
|---|
| 2791 | /* The fixed counter 3 has to be written before the PERF_METRICS. */ | 
|---|
| 2792 | wrmsrq(MSR_CORE_PERF_FIXED_CTR3, val: 0); | 
|---|
| 2793 | wrmsrq(MSR_PERF_METRICS, val: 0); | 
|---|
| 2794 | if (event) | 
|---|
| 2795 | update_saved_topdown_regs(event, slots: 0, metrics: 0, metric_end); | 
|---|
| 2796 | } | 
|---|
| 2797 |  | 
|---|
| 2798 | return slots; | 
|---|
| 2799 | } | 
|---|
| 2800 |  | 
|---|
| 2801 | static u64 icl_update_topdown_event(struct perf_event *event, u64 *val) | 
|---|
| 2802 | { | 
|---|
| 2803 | return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE + | 
|---|
| 2804 | x86_pmu.num_topdown_events - 1, | 
|---|
| 2805 | val); | 
|---|
| 2806 | } | 
|---|
| 2807 |  | 
|---|
| 2808 | DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update); | 
|---|
| 2809 |  | 
|---|
| 2810 | static void intel_pmu_read_event(struct perf_event *event) | 
|---|
| 2811 | { | 
|---|
| 2812 | if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN) || | 
|---|
| 2813 | is_pebs_counter_event_group(event)) { | 
|---|
| 2814 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2815 | bool pmu_enabled = cpuc->enabled; | 
|---|
| 2816 |  | 
|---|
| 2817 | /* Only need to call update_topdown_event() once for group read. */ | 
|---|
| 2818 | if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ)) | 
|---|
| 2819 | return; | 
|---|
| 2820 |  | 
|---|
| 2821 | cpuc->enabled = 0; | 
|---|
| 2822 | if (pmu_enabled) | 
|---|
| 2823 | intel_pmu_disable_all(); | 
|---|
| 2824 |  | 
|---|
| 2825 | /* | 
|---|
| 2826 | * If the PEBS counters snapshotting is enabled, | 
|---|
| 2827 | * the topdown event is available in PEBS records. | 
|---|
| 2828 | */ | 
|---|
| 2829 | if (is_topdown_count(event) && !is_pebs_counter_event_group(event)) | 
|---|
| 2830 | static_call(intel_pmu_update_topdown_event)(event, NULL); | 
|---|
| 2831 | else | 
|---|
| 2832 | intel_pmu_drain_pebs_buffer(); | 
|---|
| 2833 |  | 
|---|
| 2834 | cpuc->enabled = pmu_enabled; | 
|---|
| 2835 | if (pmu_enabled) | 
|---|
| 2836 | intel_pmu_enable_all(added: 0); | 
|---|
| 2837 |  | 
|---|
| 2838 | return; | 
|---|
| 2839 | } | 
|---|
| 2840 |  | 
|---|
| 2841 | x86_perf_event_update(event); | 
|---|
| 2842 | } | 
|---|
| 2843 |  | 
|---|
| 2844 | static void intel_pmu_enable_fixed(struct perf_event *event) | 
|---|
| 2845 | { | 
|---|
| 2846 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2847 | struct hw_perf_event *hwc = &event->hw; | 
|---|
| 2848 | int idx = hwc->idx; | 
|---|
| 2849 | u64 bits = 0; | 
|---|
| 2850 |  | 
|---|
| 2851 | if (is_topdown_idx(idx)) { | 
|---|
| 2852 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2853 | /* | 
|---|
| 2854 | * When there are other active TopDown events, | 
|---|
| 2855 | * don't enable the fixed counter 3 again. | 
|---|
| 2856 | */ | 
|---|
| 2857 | if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) | 
|---|
| 2858 | return; | 
|---|
| 2859 |  | 
|---|
| 2860 | idx = INTEL_PMC_IDX_FIXED_SLOTS; | 
|---|
| 2861 |  | 
|---|
| 2862 | if (event->attr.config1 & INTEL_TD_CFG_METRIC_CLEAR) | 
|---|
| 2863 | bits |= INTEL_FIXED_3_METRICS_CLEAR; | 
|---|
| 2864 | } | 
|---|
| 2865 |  | 
|---|
| 2866 | intel_set_masks(event, idx); | 
|---|
| 2867 |  | 
|---|
| 2868 | /* | 
|---|
| 2869 | * Enable IRQ generation (0x8), if not PEBS, | 
|---|
| 2870 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | 
|---|
| 2871 | * if requested: | 
|---|
| 2872 | */ | 
|---|
| 2873 | if (!event->attr.precise_ip) | 
|---|
| 2874 | bits |= INTEL_FIXED_0_ENABLE_PMI; | 
|---|
| 2875 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | 
|---|
| 2876 | bits |= INTEL_FIXED_0_USER; | 
|---|
| 2877 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | 
|---|
| 2878 | bits |= INTEL_FIXED_0_KERNEL; | 
|---|
| 2879 |  | 
|---|
| 2880 | /* | 
|---|
| 2881 | * ANY bit is supported in v3 and up | 
|---|
| 2882 | */ | 
|---|
| 2883 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | 
|---|
| 2884 | bits |= INTEL_FIXED_0_ANYTHREAD; | 
|---|
| 2885 |  | 
|---|
| 2886 | idx -= INTEL_PMC_IDX_FIXED; | 
|---|
| 2887 | bits = intel_fixed_bits_by_idx(idx, bits); | 
|---|
| 2888 | if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) | 
|---|
| 2889 | bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); | 
|---|
| 2890 |  | 
|---|
| 2891 | cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); | 
|---|
| 2892 | cpuc->fixed_ctrl_val |= bits; | 
|---|
| 2893 | } | 
|---|
| 2894 |  | 
|---|
| 2895 | static void intel_pmu_config_acr(int idx, u64 mask, u32 reload) | 
|---|
| 2896 | { | 
|---|
| 2897 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 2898 | int msr_b, msr_c; | 
|---|
| 2899 | int msr_offset; | 
|---|
| 2900 |  | 
|---|
| 2901 | if (!mask && !cpuc->acr_cfg_b[idx]) | 
|---|
| 2902 | return; | 
|---|
| 2903 |  | 
|---|
| 2904 | if (idx < INTEL_PMC_IDX_FIXED) { | 
|---|
| 2905 | msr_b = MSR_IA32_PMC_V6_GP0_CFG_B; | 
|---|
| 2906 | msr_c = MSR_IA32_PMC_V6_GP0_CFG_C; | 
|---|
| 2907 | msr_offset = x86_pmu.addr_offset(idx, false); | 
|---|
| 2908 | } else { | 
|---|
| 2909 | msr_b = MSR_IA32_PMC_V6_FX0_CFG_B; | 
|---|
| 2910 | msr_c = MSR_IA32_PMC_V6_FX0_CFG_C; | 
|---|
| 2911 | msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false); | 
|---|
| 2912 | } | 
|---|
| 2913 |  | 
|---|
| 2914 | if (cpuc->acr_cfg_b[idx] != mask) { | 
|---|
| 2915 | wrmsrl(msr_b + msr_offset, mask); | 
|---|
| 2916 | cpuc->acr_cfg_b[idx] = mask; | 
|---|
| 2917 | } | 
|---|
| 2918 | /* Only need to update the reload value when there is a valid config value. */ | 
|---|
| 2919 | if (mask && cpuc->acr_cfg_c[idx] != reload) { | 
|---|
| 2920 | wrmsrl(msr_c + msr_offset, reload); | 
|---|
| 2921 | cpuc->acr_cfg_c[idx] = reload; | 
|---|
| 2922 | } | 
|---|
| 2923 | } | 
|---|
| 2924 |  | 
|---|
| 2925 | static void intel_pmu_enable_acr(struct perf_event *event) | 
|---|
| 2926 | { | 
|---|
| 2927 | struct hw_perf_event *hwc = &event->hw; | 
|---|
| 2928 |  | 
|---|
| 2929 | if (!is_acr_event_group(event) || !event->attr.config2) { | 
|---|
| 2930 | /* | 
|---|
| 2931 | * The disable doesn't clear the ACR CFG register. | 
|---|
| 2932 | * Check and clear the ACR CFG register. | 
|---|
| 2933 | */ | 
|---|
| 2934 | intel_pmu_config_acr(idx: hwc->idx, mask: 0, reload: 0); | 
|---|
| 2935 | return; | 
|---|
| 2936 | } | 
|---|
| 2937 |  | 
|---|
| 2938 | intel_pmu_config_acr(idx: hwc->idx, mask: hwc->config1, reload: -hwc->sample_period); | 
|---|
| 2939 | } | 
|---|
| 2940 |  | 
|---|
| 2941 | DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr); | 
|---|
| 2942 |  | 
|---|
| 2943 | static void intel_pmu_enable_event(struct perf_event *event) | 
|---|
| 2944 | { | 
|---|
| 2945 | u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; | 
|---|
| 2946 | struct hw_perf_event *hwc = &event->hw; | 
|---|
| 2947 | int idx = hwc->idx; | 
|---|
| 2948 |  | 
|---|
| 2949 | if (unlikely(event->attr.precise_ip)) | 
|---|
| 2950 | static_call(x86_pmu_pebs_enable)(event); | 
|---|
| 2951 |  | 
|---|
| 2952 | switch (idx) { | 
|---|
| 2953 | case 0 ... INTEL_PMC_IDX_FIXED - 1: | 
|---|
| 2954 | if (branch_sample_counters(event)) | 
|---|
| 2955 | enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; | 
|---|
| 2956 | intel_set_masks(event, idx); | 
|---|
| 2957 | static_call_cond(intel_pmu_enable_acr_event)(event); | 
|---|
| 2958 | __x86_pmu_enable_event(hwc, enable_mask); | 
|---|
| 2959 | break; | 
|---|
| 2960 | case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: | 
|---|
| 2961 | static_call_cond(intel_pmu_enable_acr_event)(event); | 
|---|
| 2962 | fallthrough; | 
|---|
| 2963 | case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: | 
|---|
| 2964 | intel_pmu_enable_fixed(event); | 
|---|
| 2965 | break; | 
|---|
| 2966 | case INTEL_PMC_IDX_FIXED_BTS: | 
|---|
| 2967 | if (!__this_cpu_read(cpu_hw_events.enabled)) | 
|---|
| 2968 | return; | 
|---|
| 2969 | intel_pmu_enable_bts(config: hwc->config); | 
|---|
| 2970 | break; | 
|---|
| 2971 | case INTEL_PMC_IDX_FIXED_VLBR: | 
|---|
| 2972 | intel_set_masks(event, idx); | 
|---|
| 2973 | break; | 
|---|
| 2974 | default: | 
|---|
| 2975 | pr_warn( "Failed to enable the event with invalid index %d\n", | 
|---|
| 2976 | idx); | 
|---|
| 2977 | } | 
|---|
| 2978 | } | 
|---|
| 2979 |  | 
|---|
| 2980 | static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc) | 
|---|
| 2981 | { | 
|---|
| 2982 | struct perf_event *event, *leader; | 
|---|
| 2983 | int i, j, idx; | 
|---|
| 2984 |  | 
|---|
| 2985 | for (i = 0; i < cpuc->n_events; i++) { | 
|---|
| 2986 | leader = cpuc->event_list[i]; | 
|---|
| 2987 | if (!is_acr_event_group(event: leader)) | 
|---|
| 2988 | continue; | 
|---|
| 2989 |  | 
|---|
| 2990 | /* The ACR events must be contiguous. */ | 
|---|
| 2991 | for (j = i; j < cpuc->n_events; j++) { | 
|---|
| 2992 | event = cpuc->event_list[j]; | 
|---|
| 2993 | if (event->group_leader != leader->group_leader) | 
|---|
| 2994 | break; | 
|---|
| 2995 | for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) { | 
|---|
| 2996 | if (i + idx >= cpuc->n_events || | 
|---|
| 2997 | !is_acr_event_group(event: cpuc->event_list[i + idx])) | 
|---|
| 2998 | return; | 
|---|
| 2999 | __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1); | 
|---|
| 3000 | } | 
|---|
| 3001 | } | 
|---|
| 3002 | i = j - 1; | 
|---|
| 3003 | } | 
|---|
| 3004 | } | 
|---|
| 3005 |  | 
|---|
| 3006 | void intel_pmu_late_setup(void) | 
|---|
| 3007 | { | 
|---|
| 3008 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 3009 |  | 
|---|
| 3010 | if (!cpuc->n_late_setup) | 
|---|
| 3011 | return; | 
|---|
| 3012 |  | 
|---|
| 3013 | intel_pmu_pebs_late_setup(cpuc); | 
|---|
| 3014 | intel_pmu_acr_late_setup(cpuc); | 
|---|
| 3015 | } | 
|---|
| 3016 |  | 
|---|
| 3017 | static void intel_pmu_add_event(struct perf_event *event) | 
|---|
| 3018 | { | 
|---|
| 3019 | if (event->attr.precise_ip) | 
|---|
| 3020 | intel_pmu_pebs_add(event); | 
|---|
| 3021 | if (intel_pmu_needs_branch_stack(event)) | 
|---|
| 3022 | intel_pmu_lbr_add(event); | 
|---|
| 3023 | if (is_pebs_counter_event_group(event) || | 
|---|
| 3024 | is_acr_event_group(event)) | 
|---|
| 3025 | this_cpu_ptr(&cpu_hw_events)->n_late_setup++; | 
|---|
| 3026 | } | 
|---|
| 3027 |  | 
|---|
| 3028 | /* | 
|---|
| 3029 | * Save and restart an expired event. Called by NMI contexts, | 
|---|
| 3030 | * so it has to be careful about preempting normal event ops: | 
|---|
| 3031 | */ | 
|---|
| 3032 | int intel_pmu_save_and_restart(struct perf_event *event) | 
|---|
| 3033 | { | 
|---|
| 3034 | static_call(x86_pmu_update)(event); | 
|---|
| 3035 | /* | 
|---|
| 3036 | * For a checkpointed counter always reset back to 0.  This | 
|---|
| 3037 | * avoids a situation where the counter overflows, aborts the | 
|---|
| 3038 | * transaction and is then set back to shortly before the | 
|---|
| 3039 | * overflow, and overflows and aborts again. | 
|---|
| 3040 | */ | 
|---|
| 3041 | if (unlikely(event_is_checkpointed(event))) { | 
|---|
| 3042 | /* No race with NMIs because the counter should not be armed */ | 
|---|
| 3043 | wrmsrq(msr: event->hw.event_base, val: 0); | 
|---|
| 3044 | local64_set(&event->hw.prev_count, 0); | 
|---|
| 3045 | } | 
|---|
| 3046 | return static_call(x86_pmu_set_period)(event); | 
|---|
| 3047 | } | 
|---|
| 3048 |  | 
|---|
| 3049 | static int intel_pmu_set_period(struct perf_event *event) | 
|---|
| 3050 | { | 
|---|
| 3051 | if (unlikely(is_topdown_count(event))) | 
|---|
| 3052 | return static_call(intel_pmu_set_topdown_event_period)(event); | 
|---|
| 3053 |  | 
|---|
| 3054 | return x86_perf_event_set_period(event); | 
|---|
| 3055 | } | 
|---|
| 3056 |  | 
|---|
| 3057 | static u64 intel_pmu_update(struct perf_event *event) | 
|---|
| 3058 | { | 
|---|
| 3059 | if (unlikely(is_topdown_count(event))) | 
|---|
| 3060 | return static_call(intel_pmu_update_topdown_event)(event, NULL); | 
|---|
| 3061 |  | 
|---|
| 3062 | return x86_perf_event_update(event); | 
|---|
| 3063 | } | 
|---|
| 3064 |  | 
|---|
| 3065 | static void intel_pmu_reset(void) | 
|---|
| 3066 | { | 
|---|
| 3067 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); | 
|---|
| 3068 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 3069 | unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask); | 
|---|
| 3070 | unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); | 
|---|
| 3071 | unsigned long flags; | 
|---|
| 3072 | int idx; | 
|---|
| 3073 |  | 
|---|
| 3074 | if (!*(u64 *)cntr_mask) | 
|---|
| 3075 | return; | 
|---|
| 3076 |  | 
|---|
| 3077 | local_irq_save(flags); | 
|---|
| 3078 |  | 
|---|
| 3079 | pr_info( "clearing PMU state on CPU#%d\n", smp_processor_id()); | 
|---|
| 3080 |  | 
|---|
| 3081 | for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) { | 
|---|
| 3082 | wrmsrq_safe(msr: x86_pmu_config_addr(index: idx), val: 0ull); | 
|---|
| 3083 | wrmsrq_safe(msr: x86_pmu_event_addr(index: idx),  val: 0ull); | 
|---|
| 3084 | } | 
|---|
| 3085 | for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) { | 
|---|
| 3086 | if (fixed_counter_disabled(i: idx, pmu: cpuc->pmu)) | 
|---|
| 3087 | continue; | 
|---|
| 3088 | wrmsrq_safe(msr: x86_pmu_fixed_ctr_addr(index: idx), val: 0ull); | 
|---|
| 3089 | } | 
|---|
| 3090 |  | 
|---|
| 3091 | if (ds) | 
|---|
| 3092 | ds->bts_index = ds->bts_buffer_base; | 
|---|
| 3093 |  | 
|---|
| 3094 | /* Ack all overflows and disable fixed counters */ | 
|---|
| 3095 | if (x86_pmu.version >= 2) { | 
|---|
| 3096 | intel_pmu_ack_status(ack: intel_pmu_get_status()); | 
|---|
| 3097 | wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, val: 0); | 
|---|
| 3098 | } | 
|---|
| 3099 |  | 
|---|
| 3100 | /* Reset LBRs and LBR freezing */ | 
|---|
| 3101 | if (x86_pmu.lbr_nr) { | 
|---|
| 3102 | update_debugctlmsr(debugctlmsr: get_debugctlmsr() & | 
|---|
| 3103 | ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR)); | 
|---|
| 3104 | } | 
|---|
| 3105 |  | 
|---|
| 3106 | local_irq_restore(flags); | 
|---|
| 3107 | } | 
|---|
| 3108 |  | 
|---|
| 3109 | /* | 
|---|
| 3110 | * We may be running with guest PEBS events created by KVM, and the | 
|---|
| 3111 | * PEBS records are logged into the guest's DS and invisible to host. | 
|---|
| 3112 | * | 
|---|
| 3113 | * In the case of guest PEBS overflow, we only trigger a fake event | 
|---|
| 3114 | * to emulate the PEBS overflow PMI for guest PEBS counters in KVM. | 
|---|
| 3115 | * The guest will then vm-entry and check the guest DS area to read | 
|---|
| 3116 | * the guest PEBS records. | 
|---|
| 3117 | * | 
|---|
| 3118 | * The contents and other behavior of the guest event do not matter. | 
|---|
| 3119 | */ | 
|---|
| 3120 | static void x86_pmu_handle_guest_pebs(struct pt_regs *regs, | 
|---|
| 3121 | struct perf_sample_data *data) | 
|---|
| 3122 | { | 
|---|
| 3123 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 3124 | u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask; | 
|---|
| 3125 | struct perf_event *event = NULL; | 
|---|
| 3126 | int bit; | 
|---|
| 3127 |  | 
|---|
| 3128 | if (!unlikely(perf_guest_state())) | 
|---|
| 3129 | return; | 
|---|
| 3130 |  | 
|---|
| 3131 | if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active || | 
|---|
| 3132 | !guest_pebs_idxs) | 
|---|
| 3133 | return; | 
|---|
| 3134 |  | 
|---|
| 3135 | for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) { | 
|---|
| 3136 | event = cpuc->events[bit]; | 
|---|
| 3137 | if (!event->attr.precise_ip) | 
|---|
| 3138 | continue; | 
|---|
| 3139 |  | 
|---|
| 3140 | perf_sample_data_init(data, addr: 0, period: event->hw.last_period); | 
|---|
| 3141 | perf_event_overflow(event, data, regs); | 
|---|
| 3142 |  | 
|---|
| 3143 | /* Inject one fake event is enough. */ | 
|---|
| 3144 | break; | 
|---|
| 3145 | } | 
|---|
| 3146 | } | 
|---|
| 3147 |  | 
|---|
| 3148 | static int handle_pmi_common(struct pt_regs *regs, u64 status) | 
|---|
| 3149 | { | 
|---|
| 3150 | struct perf_sample_data data; | 
|---|
| 3151 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 3152 | int bit; | 
|---|
| 3153 | int handled = 0; | 
|---|
| 3154 |  | 
|---|
| 3155 | inc_irq_stat(apic_perf_irqs); | 
|---|
| 3156 |  | 
|---|
| 3157 | /* | 
|---|
| 3158 | * Ignore a range of extra bits in status that do not indicate | 
|---|
| 3159 | * overflow by themselves. | 
|---|
| 3160 | */ | 
|---|
| 3161 | status &= ~(GLOBAL_STATUS_COND_CHG | | 
|---|
| 3162 | GLOBAL_STATUS_ASIF | | 
|---|
| 3163 | GLOBAL_STATUS_LBRS_FROZEN); | 
|---|
| 3164 | if (!status) | 
|---|
| 3165 | return 0; | 
|---|
| 3166 | /* | 
|---|
| 3167 | * In case multiple PEBS events are sampled at the same time, | 
|---|
| 3168 | * it is possible to have GLOBAL_STATUS bit 62 set indicating | 
|---|
| 3169 | * PEBS buffer overflow and also seeing at most 3 PEBS counters | 
|---|
| 3170 | * having their bits set in the status register. This is a sign | 
|---|
| 3171 | * that there was at least one PEBS record pending at the time | 
|---|
| 3172 | * of the PMU interrupt. PEBS counters must only be processed | 
|---|
| 3173 | * via the drain_pebs() calls and not via the regular sample | 
|---|
| 3174 | * processing loop coming after that the function, otherwise | 
|---|
| 3175 | * phony regular samples may be generated in the sampling buffer | 
|---|
| 3176 | * not marked with the EXACT tag. Another possibility is to have | 
|---|
| 3177 | * one PEBS event and at least one non-PEBS event which overflows | 
|---|
| 3178 | * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will | 
|---|
| 3179 | * not be set, yet the overflow status bit for the PEBS counter will | 
|---|
| 3180 | * be on Skylake. | 
|---|
| 3181 | * | 
|---|
| 3182 | * To avoid this problem, we systematically ignore the PEBS-enabled | 
|---|
| 3183 | * counters from the GLOBAL_STATUS mask and we always process PEBS | 
|---|
| 3184 | * events via drain_pebs(). | 
|---|
| 3185 | */ | 
|---|
| 3186 | status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable); | 
|---|
| 3187 |  | 
|---|
| 3188 | /* | 
|---|
| 3189 | * PEBS overflow sets bit 62 in the global status register | 
|---|
| 3190 | */ | 
|---|
| 3191 | if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { | 
|---|
| 3192 | u64 pebs_enabled = cpuc->pebs_enabled; | 
|---|
| 3193 |  | 
|---|
| 3194 | handled++; | 
|---|
| 3195 | x86_pmu_handle_guest_pebs(regs, data: &data); | 
|---|
| 3196 | static_call(x86_pmu_drain_pebs)(regs, &data); | 
|---|
| 3197 |  | 
|---|
| 3198 | /* | 
|---|
| 3199 | * PMI throttle may be triggered, which stops the PEBS event. | 
|---|
| 3200 | * Although cpuc->pebs_enabled is updated accordingly, the | 
|---|
| 3201 | * MSR_IA32_PEBS_ENABLE is not updated. Because the | 
|---|
| 3202 | * cpuc->enabled has been forced to 0 in PMI. | 
|---|
| 3203 | * Update the MSR if pebs_enabled is changed. | 
|---|
| 3204 | */ | 
|---|
| 3205 | if (pebs_enabled != cpuc->pebs_enabled) | 
|---|
| 3206 | wrmsrq(MSR_IA32_PEBS_ENABLE, val: cpuc->pebs_enabled); | 
|---|
| 3207 |  | 
|---|
| 3208 | /* | 
|---|
| 3209 | * Above PEBS handler (PEBS counters snapshotting) has updated fixed | 
|---|
| 3210 | * counter 3 and perf metrics counts if they are in counter group, | 
|---|
| 3211 | * unnecessary to update again. | 
|---|
| 3212 | */ | 
|---|
| 3213 | if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] && | 
|---|
| 3214 | is_pebs_counter_event_group(event: cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS])) | 
|---|
| 3215 | status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT; | 
|---|
| 3216 | } | 
|---|
| 3217 |  | 
|---|
| 3218 | /* | 
|---|
| 3219 | * Intel PT | 
|---|
| 3220 | */ | 
|---|
| 3221 | if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { | 
|---|
| 3222 | handled++; | 
|---|
| 3223 | if (!perf_guest_handle_intel_pt_intr()) | 
|---|
| 3224 | intel_pt_interrupt(); | 
|---|
| 3225 | } | 
|---|
| 3226 |  | 
|---|
| 3227 | /* | 
|---|
| 3228 | * Intel Perf metrics | 
|---|
| 3229 | */ | 
|---|
| 3230 | if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { | 
|---|
| 3231 | handled++; | 
|---|
| 3232 | static_call(intel_pmu_update_topdown_event)(NULL, NULL); | 
|---|
| 3233 | } | 
|---|
| 3234 |  | 
|---|
| 3235 | status &= hybrid(cpuc->pmu, intel_ctrl); | 
|---|
| 3236 |  | 
|---|
| 3237 | /* | 
|---|
| 3238 | * Checkpointed counters can lead to 'spurious' PMIs because the | 
|---|
| 3239 | * rollback caused by the PMI will have cleared the overflow status | 
|---|
| 3240 | * bit. Therefore always force probe these counters. | 
|---|
| 3241 | */ | 
|---|
| 3242 | status |= cpuc->intel_cp_status; | 
|---|
| 3243 |  | 
|---|
| 3244 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 
|---|
| 3245 | struct perf_event *event = cpuc->events[bit]; | 
|---|
| 3246 | u64 last_period; | 
|---|
| 3247 |  | 
|---|
| 3248 | handled++; | 
|---|
| 3249 |  | 
|---|
| 3250 | if (!test_bit(bit, cpuc->active_mask)) | 
|---|
| 3251 | continue; | 
|---|
| 3252 |  | 
|---|
| 3253 | /* | 
|---|
| 3254 | * There may be unprocessed PEBS records in the PEBS buffer, | 
|---|
| 3255 | * which still stores the previous values. | 
|---|
| 3256 | * Process those records first before handling the latest value. | 
|---|
| 3257 | * For example, | 
|---|
| 3258 | * A is a regular counter | 
|---|
| 3259 | * B is a PEBS event which reads A | 
|---|
| 3260 | * C is a PEBS event | 
|---|
| 3261 | * | 
|---|
| 3262 | * The following can happen: | 
|---|
| 3263 | * B-assist			A=1 | 
|---|
| 3264 | * C				A=2 | 
|---|
| 3265 | * B-assist			A=3 | 
|---|
| 3266 | * A-overflow-PMI		A=4 | 
|---|
| 3267 | * C-assist-PMI (PEBS buffer)	A=5 | 
|---|
| 3268 | * | 
|---|
| 3269 | * The PEBS buffer has to be drained before handling the A-PMI | 
|---|
| 3270 | */ | 
|---|
| 3271 | if (is_pebs_counter_event_group(event)) | 
|---|
| 3272 | x86_pmu.drain_pebs(regs, &data); | 
|---|
| 3273 |  | 
|---|
| 3274 | last_period = event->hw.last_period; | 
|---|
| 3275 |  | 
|---|
| 3276 | if (!intel_pmu_save_and_restart(event)) | 
|---|
| 3277 | continue; | 
|---|
| 3278 |  | 
|---|
| 3279 | perf_sample_data_init(data: &data, addr: 0, period: last_period); | 
|---|
| 3280 |  | 
|---|
| 3281 | if (has_branch_stack(event)) | 
|---|
| 3282 | intel_pmu_lbr_save_brstack(data: &data, cpuc, event); | 
|---|
| 3283 |  | 
|---|
| 3284 | perf_event_overflow(event, data: &data, regs); | 
|---|
| 3285 | } | 
|---|
| 3286 |  | 
|---|
| 3287 | return handled; | 
|---|
| 3288 | } | 
|---|
| 3289 |  | 
|---|
| 3290 | /* | 
|---|
| 3291 | * This handler is triggered by the local APIC, so the APIC IRQ handling | 
|---|
| 3292 | * rules apply: | 
|---|
| 3293 | */ | 
|---|
| 3294 | static int intel_pmu_handle_irq(struct pt_regs *regs) | 
|---|
| 3295 | { | 
|---|
| 3296 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 3297 | bool late_ack = hybrid_bit(cpuc->pmu, late_ack); | 
|---|
| 3298 | bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack); | 
|---|
| 3299 | int loops; | 
|---|
| 3300 | u64 status; | 
|---|
| 3301 | int handled; | 
|---|
| 3302 | int pmu_enabled; | 
|---|
| 3303 |  | 
|---|
| 3304 | /* | 
|---|
| 3305 | * Save the PMU state. | 
|---|
| 3306 | * It needs to be restored when leaving the handler. | 
|---|
| 3307 | */ | 
|---|
| 3308 | pmu_enabled = cpuc->enabled; | 
|---|
| 3309 | /* | 
|---|
| 3310 | * In general, the early ACK is only applied for old platforms. | 
|---|
| 3311 | * For the big core starts from Haswell, the late ACK should be | 
|---|
| 3312 | * applied. | 
|---|
| 3313 | * For the small core after Tremont, we have to do the ACK right | 
|---|
| 3314 | * before re-enabling counters, which is in the middle of the | 
|---|
| 3315 | * NMI handler. | 
|---|
| 3316 | */ | 
|---|
| 3317 | if (!late_ack && !mid_ack) | 
|---|
| 3318 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 
|---|
| 3319 | intel_bts_disable_local(); | 
|---|
| 3320 | cpuc->enabled = 0; | 
|---|
| 3321 | __intel_pmu_disable_all(bts: true); | 
|---|
| 3322 | handled = intel_pmu_drain_bts_buffer(); | 
|---|
| 3323 | handled += intel_bts_interrupt(); | 
|---|
| 3324 | status = intel_pmu_get_status(); | 
|---|
| 3325 | if (!status) | 
|---|
| 3326 | goto done; | 
|---|
| 3327 |  | 
|---|
| 3328 | loops = 0; | 
|---|
| 3329 | again: | 
|---|
| 3330 | intel_pmu_lbr_read(); | 
|---|
| 3331 | intel_pmu_ack_status(ack: status); | 
|---|
| 3332 | if (++loops > 100) { | 
|---|
| 3333 | static bool warned; | 
|---|
| 3334 |  | 
|---|
| 3335 | if (!warned) { | 
|---|
| 3336 | WARN(1, "perfevents: irq loop stuck!\n"); | 
|---|
| 3337 | perf_event_print_debug(); | 
|---|
| 3338 | warned = true; | 
|---|
| 3339 | } | 
|---|
| 3340 | intel_pmu_reset(); | 
|---|
| 3341 | goto done; | 
|---|
| 3342 | } | 
|---|
| 3343 |  | 
|---|
| 3344 | handled += handle_pmi_common(regs, status); | 
|---|
| 3345 |  | 
|---|
| 3346 | /* | 
|---|
| 3347 | * Repeat if there is more work to be done: | 
|---|
| 3348 | */ | 
|---|
| 3349 | status = intel_pmu_get_status(); | 
|---|
| 3350 | if (status) | 
|---|
| 3351 | goto again; | 
|---|
| 3352 |  | 
|---|
| 3353 | done: | 
|---|
| 3354 | if (mid_ack) | 
|---|
| 3355 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 
|---|
| 3356 | /* Only restore PMU state when it's active. See x86_pmu_disable(). */ | 
|---|
| 3357 | cpuc->enabled = pmu_enabled; | 
|---|
| 3358 | if (pmu_enabled) | 
|---|
| 3359 | __intel_pmu_enable_all(added: 0, pmi: true); | 
|---|
| 3360 | intel_bts_enable_local(); | 
|---|
| 3361 |  | 
|---|
| 3362 | /* | 
|---|
| 3363 | * Only unmask the NMI after the overflow counters | 
|---|
| 3364 | * have been reset. This avoids spurious NMIs on | 
|---|
| 3365 | * Haswell CPUs. | 
|---|
| 3366 | */ | 
|---|
| 3367 | if (late_ack) | 
|---|
| 3368 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 
|---|
| 3369 | return handled; | 
|---|
| 3370 | } | 
|---|
| 3371 |  | 
|---|
| 3372 | static struct event_constraint * | 
|---|
| 3373 | intel_bts_constraints(struct perf_event *event) | 
|---|
| 3374 | { | 
|---|
| 3375 | if (unlikely(intel_pmu_has_bts(event))) | 
|---|
| 3376 | return &bts_constraint; | 
|---|
| 3377 |  | 
|---|
| 3378 | return NULL; | 
|---|
| 3379 | } | 
|---|
| 3380 |  | 
|---|
| 3381 | /* | 
|---|
| 3382 | * Note: matches a fake event, like Fixed2. | 
|---|
| 3383 | */ | 
|---|
| 3384 | static struct event_constraint * | 
|---|
| 3385 | intel_vlbr_constraints(struct perf_event *event) | 
|---|
| 3386 | { | 
|---|
| 3387 | struct event_constraint *c = &vlbr_constraint; | 
|---|
| 3388 |  | 
|---|
| 3389 | if (unlikely(constraint_match(c, event->hw.config))) { | 
|---|
| 3390 | event->hw.flags |= c->flags; | 
|---|
| 3391 | return c; | 
|---|
| 3392 | } | 
|---|
| 3393 |  | 
|---|
| 3394 | return NULL; | 
|---|
| 3395 | } | 
|---|
| 3396 |  | 
|---|
| 3397 | static int intel_alt_er(struct cpu_hw_events *cpuc, | 
|---|
| 3398 | int idx, u64 config) | 
|---|
| 3399 | { | 
|---|
| 3400 | struct extra_reg * = hybrid(cpuc->pmu, extra_regs); | 
|---|
| 3401 | int alt_idx = idx; | 
|---|
| 3402 |  | 
|---|
| 3403 | if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) | 
|---|
| 3404 | return idx; | 
|---|
| 3405 |  | 
|---|
| 3406 | if (idx == EXTRA_REG_RSP_0) | 
|---|
| 3407 | alt_idx = EXTRA_REG_RSP_1; | 
|---|
| 3408 |  | 
|---|
| 3409 | if (idx == EXTRA_REG_RSP_1) | 
|---|
| 3410 | alt_idx = EXTRA_REG_RSP_0; | 
|---|
| 3411 |  | 
|---|
| 3412 | if (config & ~extra_regs[alt_idx].valid_mask) | 
|---|
| 3413 | return idx; | 
|---|
| 3414 |  | 
|---|
| 3415 | return alt_idx; | 
|---|
| 3416 | } | 
|---|
| 3417 |  | 
|---|
| 3418 | static void intel_fixup_er(struct perf_event *event, int idx) | 
|---|
| 3419 | { | 
|---|
| 3420 | struct extra_reg * = hybrid(event->pmu, extra_regs); | 
|---|
| 3421 | event->hw.extra_reg.idx = idx; | 
|---|
| 3422 |  | 
|---|
| 3423 | if (idx == EXTRA_REG_RSP_0) { | 
|---|
| 3424 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 
|---|
| 3425 | event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event; | 
|---|
| 3426 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; | 
|---|
| 3427 | } else if (idx == EXTRA_REG_RSP_1) { | 
|---|
| 3428 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 
|---|
| 3429 | event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event; | 
|---|
| 3430 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | 
|---|
| 3431 | } | 
|---|
| 3432 | } | 
|---|
| 3433 |  | 
|---|
| 3434 | /* | 
|---|
| 3435 | * manage allocation of shared extra msr for certain events | 
|---|
| 3436 | * | 
|---|
| 3437 | * sharing can be: | 
|---|
| 3438 | * per-cpu: to be shared between the various events on a single PMU | 
|---|
| 3439 | * per-core: per-cpu + shared by HT threads | 
|---|
| 3440 | */ | 
|---|
| 3441 | static struct event_constraint * | 
|---|
| 3442 | __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, | 
|---|
| 3443 | struct perf_event *event, | 
|---|
| 3444 | struct hw_perf_event_extra *reg) | 
|---|
| 3445 | { | 
|---|
| 3446 | struct event_constraint *c = &emptyconstraint; | 
|---|
| 3447 | struct er_account *era; | 
|---|
| 3448 | unsigned long flags; | 
|---|
| 3449 | int idx = reg->idx; | 
|---|
| 3450 |  | 
|---|
| 3451 | /* | 
|---|
| 3452 | * reg->alloc can be set due to existing state, so for fake cpuc we | 
|---|
| 3453 | * need to ignore this, otherwise we might fail to allocate proper fake | 
|---|
| 3454 | * state for this extra reg constraint. Also see the comment below. | 
|---|
| 3455 | */ | 
|---|
| 3456 | if (reg->alloc && !cpuc->is_fake) | 
|---|
| 3457 | return NULL; /* call x86_get_event_constraint() */ | 
|---|
| 3458 |  | 
|---|
| 3459 | again: | 
|---|
| 3460 | era = &cpuc->shared_regs->regs[idx]; | 
|---|
| 3461 | /* | 
|---|
| 3462 | * we use spin_lock_irqsave() to avoid lockdep issues when | 
|---|
| 3463 | * passing a fake cpuc | 
|---|
| 3464 | */ | 
|---|
| 3465 | raw_spin_lock_irqsave(&era->lock, flags); | 
|---|
| 3466 |  | 
|---|
| 3467 | if (!atomic_read(v: &era->ref) || era->config == reg->config) { | 
|---|
| 3468 |  | 
|---|
| 3469 | /* | 
|---|
| 3470 | * If its a fake cpuc -- as per validate_{group,event}() we | 
|---|
| 3471 | * shouldn't touch event state and we can avoid doing so | 
|---|
| 3472 | * since both will only call get_event_constraints() once | 
|---|
| 3473 | * on each event, this avoids the need for reg->alloc. | 
|---|
| 3474 | * | 
|---|
| 3475 | * Not doing the ER fixup will only result in era->reg being | 
|---|
| 3476 | * wrong, but since we won't actually try and program hardware | 
|---|
| 3477 | * this isn't a problem either. | 
|---|
| 3478 | */ | 
|---|
| 3479 | if (!cpuc->is_fake) { | 
|---|
| 3480 | if (idx != reg->idx) | 
|---|
| 3481 | intel_fixup_er(event, idx); | 
|---|
| 3482 |  | 
|---|
| 3483 | /* | 
|---|
| 3484 | * x86_schedule_events() can call get_event_constraints() | 
|---|
| 3485 | * multiple times on events in the case of incremental | 
|---|
| 3486 | * scheduling(). reg->alloc ensures we only do the ER | 
|---|
| 3487 | * allocation once. | 
|---|
| 3488 | */ | 
|---|
| 3489 | reg->alloc = 1; | 
|---|
| 3490 | } | 
|---|
| 3491 |  | 
|---|
| 3492 | /* lock in msr value */ | 
|---|
| 3493 | era->config = reg->config; | 
|---|
| 3494 | era->reg = reg->reg; | 
|---|
| 3495 |  | 
|---|
| 3496 | /* one more user */ | 
|---|
| 3497 | atomic_inc(v: &era->ref); | 
|---|
| 3498 |  | 
|---|
| 3499 | /* | 
|---|
| 3500 | * need to call x86_get_event_constraint() | 
|---|
| 3501 | * to check if associated event has constraints | 
|---|
| 3502 | */ | 
|---|
| 3503 | c = NULL; | 
|---|
| 3504 | } else { | 
|---|
| 3505 | idx = intel_alt_er(cpuc, idx, config: reg->config); | 
|---|
| 3506 | if (idx != reg->idx) { | 
|---|
| 3507 | raw_spin_unlock_irqrestore(&era->lock, flags); | 
|---|
| 3508 | goto again; | 
|---|
| 3509 | } | 
|---|
| 3510 | } | 
|---|
| 3511 | raw_spin_unlock_irqrestore(&era->lock, flags); | 
|---|
| 3512 |  | 
|---|
| 3513 | return c; | 
|---|
| 3514 | } | 
|---|
| 3515 |  | 
|---|
| 3516 | static void | 
|---|
| 3517 | __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, | 
|---|
| 3518 | struct hw_perf_event_extra *reg) | 
|---|
| 3519 | { | 
|---|
| 3520 | struct er_account *era; | 
|---|
| 3521 |  | 
|---|
| 3522 | /* | 
|---|
| 3523 | * Only put constraint if extra reg was actually allocated. Also takes | 
|---|
| 3524 | * care of event which do not use an extra shared reg. | 
|---|
| 3525 | * | 
|---|
| 3526 | * Also, if this is a fake cpuc we shouldn't touch any event state | 
|---|
| 3527 | * (reg->alloc) and we don't care about leaving inconsistent cpuc state | 
|---|
| 3528 | * either since it'll be thrown out. | 
|---|
| 3529 | */ | 
|---|
| 3530 | if (!reg->alloc || cpuc->is_fake) | 
|---|
| 3531 | return; | 
|---|
| 3532 |  | 
|---|
| 3533 | era = &cpuc->shared_regs->regs[reg->idx]; | 
|---|
| 3534 |  | 
|---|
| 3535 | /* one fewer user */ | 
|---|
| 3536 | atomic_dec(v: &era->ref); | 
|---|
| 3537 |  | 
|---|
| 3538 | /* allocate again next time */ | 
|---|
| 3539 | reg->alloc = 0; | 
|---|
| 3540 | } | 
|---|
| 3541 |  | 
|---|
| 3542 | static struct event_constraint * | 
|---|
| 3543 | intel_shared_regs_constraints(struct cpu_hw_events *cpuc, | 
|---|
| 3544 | struct perf_event *event) | 
|---|
| 3545 | { | 
|---|
| 3546 | struct event_constraint *c = NULL, *d; | 
|---|
| 3547 | struct hw_perf_event_extra *xreg, *breg; | 
|---|
| 3548 |  | 
|---|
| 3549 | xreg = &event->hw.extra_reg; | 
|---|
| 3550 | if (xreg->idx != EXTRA_REG_NONE) { | 
|---|
| 3551 | c = __intel_shared_reg_get_constraints(cpuc, event, reg: xreg); | 
|---|
| 3552 | if (c == &emptyconstraint) | 
|---|
| 3553 | return c; | 
|---|
| 3554 | } | 
|---|
| 3555 | breg = &event->hw.branch_reg; | 
|---|
| 3556 | if (breg->idx != EXTRA_REG_NONE) { | 
|---|
| 3557 | d = __intel_shared_reg_get_constraints(cpuc, event, reg: breg); | 
|---|
| 3558 | if (d == &emptyconstraint) { | 
|---|
| 3559 | __intel_shared_reg_put_constraints(cpuc, reg: xreg); | 
|---|
| 3560 | c = d; | 
|---|
| 3561 | } | 
|---|
| 3562 | } | 
|---|
| 3563 | return c; | 
|---|
| 3564 | } | 
|---|
| 3565 |  | 
|---|
| 3566 | struct event_constraint * | 
|---|
| 3567 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 3568 | struct perf_event *event) | 
|---|
| 3569 | { | 
|---|
| 3570 | struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); | 
|---|
| 3571 | struct event_constraint *c; | 
|---|
| 3572 |  | 
|---|
| 3573 | if (event_constraints) { | 
|---|
| 3574 | for_each_event_constraint(c, event_constraints) { | 
|---|
| 3575 | if (constraint_match(c, ecode: event->hw.config)) { | 
|---|
| 3576 | event->hw.flags |= c->flags; | 
|---|
| 3577 | return c; | 
|---|
| 3578 | } | 
|---|
| 3579 | } | 
|---|
| 3580 | } | 
|---|
| 3581 |  | 
|---|
| 3582 | return &hybrid_var(cpuc->pmu, unconstrained); | 
|---|
| 3583 | } | 
|---|
| 3584 |  | 
|---|
| 3585 | static struct event_constraint * | 
|---|
| 3586 | __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 3587 | struct perf_event *event) | 
|---|
| 3588 | { | 
|---|
| 3589 | struct event_constraint *c; | 
|---|
| 3590 |  | 
|---|
| 3591 | c = intel_vlbr_constraints(event); | 
|---|
| 3592 | if (c) | 
|---|
| 3593 | return c; | 
|---|
| 3594 |  | 
|---|
| 3595 | c = intel_bts_constraints(event); | 
|---|
| 3596 | if (c) | 
|---|
| 3597 | return c; | 
|---|
| 3598 |  | 
|---|
| 3599 | c = intel_shared_regs_constraints(cpuc, event); | 
|---|
| 3600 | if (c) | 
|---|
| 3601 | return c; | 
|---|
| 3602 |  | 
|---|
| 3603 | c = intel_pebs_constraints(event); | 
|---|
| 3604 | if (c) | 
|---|
| 3605 | return c; | 
|---|
| 3606 |  | 
|---|
| 3607 | return x86_get_event_constraints(cpuc, idx, event); | 
|---|
| 3608 | } | 
|---|
| 3609 |  | 
|---|
| 3610 | static void | 
|---|
| 3611 | intel_start_scheduling(struct cpu_hw_events *cpuc) | 
|---|
| 3612 | { | 
|---|
| 3613 | struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; | 
|---|
| 3614 | struct intel_excl_states *xl; | 
|---|
| 3615 | int tid = cpuc->excl_thread_id; | 
|---|
| 3616 |  | 
|---|
| 3617 | /* | 
|---|
| 3618 | * nothing needed if in group validation mode | 
|---|
| 3619 | */ | 
|---|
| 3620 | if (cpuc->is_fake || !is_ht_workaround_enabled()) | 
|---|
| 3621 | return; | 
|---|
| 3622 |  | 
|---|
| 3623 | /* | 
|---|
| 3624 | * no exclusion needed | 
|---|
| 3625 | */ | 
|---|
| 3626 | if (WARN_ON_ONCE(!excl_cntrs)) | 
|---|
| 3627 | return; | 
|---|
| 3628 |  | 
|---|
| 3629 | xl = &excl_cntrs->states[tid]; | 
|---|
| 3630 |  | 
|---|
| 3631 | xl->sched_started = true; | 
|---|
| 3632 | /* | 
|---|
| 3633 | * lock shared state until we are done scheduling | 
|---|
| 3634 | * in stop_event_scheduling() | 
|---|
| 3635 | * makes scheduling appear as a transaction | 
|---|
| 3636 | */ | 
|---|
| 3637 | raw_spin_lock(&excl_cntrs->lock); | 
|---|
| 3638 | } | 
|---|
| 3639 |  | 
|---|
| 3640 | static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) | 
|---|
| 3641 | { | 
|---|
| 3642 | struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; | 
|---|
| 3643 | struct event_constraint *c = cpuc->event_constraint[idx]; | 
|---|
| 3644 | struct intel_excl_states *xl; | 
|---|
| 3645 | int tid = cpuc->excl_thread_id; | 
|---|
| 3646 |  | 
|---|
| 3647 | if (cpuc->is_fake || !is_ht_workaround_enabled()) | 
|---|
| 3648 | return; | 
|---|
| 3649 |  | 
|---|
| 3650 | if (WARN_ON_ONCE(!excl_cntrs)) | 
|---|
| 3651 | return; | 
|---|
| 3652 |  | 
|---|
| 3653 | if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) | 
|---|
| 3654 | return; | 
|---|
| 3655 |  | 
|---|
| 3656 | xl = &excl_cntrs->states[tid]; | 
|---|
| 3657 |  | 
|---|
| 3658 | lockdep_assert_held(&excl_cntrs->lock); | 
|---|
| 3659 |  | 
|---|
| 3660 | if (c->flags & PERF_X86_EVENT_EXCL) | 
|---|
| 3661 | xl->state[cntr] = INTEL_EXCL_EXCLUSIVE; | 
|---|
| 3662 | else | 
|---|
| 3663 | xl->state[cntr] = INTEL_EXCL_SHARED; | 
|---|
| 3664 | } | 
|---|
| 3665 |  | 
|---|
| 3666 | static void | 
|---|
| 3667 | intel_stop_scheduling(struct cpu_hw_events *cpuc) | 
|---|
| 3668 | { | 
|---|
| 3669 | struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; | 
|---|
| 3670 | struct intel_excl_states *xl; | 
|---|
| 3671 | int tid = cpuc->excl_thread_id; | 
|---|
| 3672 |  | 
|---|
| 3673 | /* | 
|---|
| 3674 | * nothing needed if in group validation mode | 
|---|
| 3675 | */ | 
|---|
| 3676 | if (cpuc->is_fake || !is_ht_workaround_enabled()) | 
|---|
| 3677 | return; | 
|---|
| 3678 | /* | 
|---|
| 3679 | * no exclusion needed | 
|---|
| 3680 | */ | 
|---|
| 3681 | if (WARN_ON_ONCE(!excl_cntrs)) | 
|---|
| 3682 | return; | 
|---|
| 3683 |  | 
|---|
| 3684 | xl = &excl_cntrs->states[tid]; | 
|---|
| 3685 |  | 
|---|
| 3686 | xl->sched_started = false; | 
|---|
| 3687 | /* | 
|---|
| 3688 | * release shared state lock (acquired in intel_start_scheduling()) | 
|---|
| 3689 | */ | 
|---|
| 3690 | raw_spin_unlock(&excl_cntrs->lock); | 
|---|
| 3691 | } | 
|---|
| 3692 |  | 
|---|
| 3693 | static struct event_constraint * | 
|---|
| 3694 | dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) | 
|---|
| 3695 | { | 
|---|
| 3696 | WARN_ON_ONCE(!cpuc->constraint_list); | 
|---|
| 3697 |  | 
|---|
| 3698 | if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { | 
|---|
| 3699 | struct event_constraint *cx; | 
|---|
| 3700 |  | 
|---|
| 3701 | /* | 
|---|
| 3702 | * grab pre-allocated constraint entry | 
|---|
| 3703 | */ | 
|---|
| 3704 | cx = &cpuc->constraint_list[idx]; | 
|---|
| 3705 |  | 
|---|
| 3706 | /* | 
|---|
| 3707 | * initialize dynamic constraint | 
|---|
| 3708 | * with static constraint | 
|---|
| 3709 | */ | 
|---|
| 3710 | *cx = *c; | 
|---|
| 3711 |  | 
|---|
| 3712 | /* | 
|---|
| 3713 | * mark constraint as dynamic | 
|---|
| 3714 | */ | 
|---|
| 3715 | cx->flags |= PERF_X86_EVENT_DYNAMIC; | 
|---|
| 3716 | c = cx; | 
|---|
| 3717 | } | 
|---|
| 3718 |  | 
|---|
| 3719 | return c; | 
|---|
| 3720 | } | 
|---|
| 3721 |  | 
|---|
| 3722 | static struct event_constraint * | 
|---|
| 3723 | intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, | 
|---|
| 3724 | int idx, struct event_constraint *c) | 
|---|
| 3725 | { | 
|---|
| 3726 | struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; | 
|---|
| 3727 | struct intel_excl_states *xlo; | 
|---|
| 3728 | int tid = cpuc->excl_thread_id; | 
|---|
| 3729 | int is_excl, i, w; | 
|---|
| 3730 |  | 
|---|
| 3731 | /* | 
|---|
| 3732 | * validating a group does not require | 
|---|
| 3733 | * enforcing cross-thread  exclusion | 
|---|
| 3734 | */ | 
|---|
| 3735 | if (cpuc->is_fake || !is_ht_workaround_enabled()) | 
|---|
| 3736 | return c; | 
|---|
| 3737 |  | 
|---|
| 3738 | /* | 
|---|
| 3739 | * no exclusion needed | 
|---|
| 3740 | */ | 
|---|
| 3741 | if (WARN_ON_ONCE(!excl_cntrs)) | 
|---|
| 3742 | return c; | 
|---|
| 3743 |  | 
|---|
| 3744 | /* | 
|---|
| 3745 | * because we modify the constraint, we need | 
|---|
| 3746 | * to make a copy. Static constraints come | 
|---|
| 3747 | * from static const tables. | 
|---|
| 3748 | * | 
|---|
| 3749 | * only needed when constraint has not yet | 
|---|
| 3750 | * been cloned (marked dynamic) | 
|---|
| 3751 | */ | 
|---|
| 3752 | c = dyn_constraint(cpuc, c, idx); | 
|---|
| 3753 |  | 
|---|
| 3754 | /* | 
|---|
| 3755 | * From here on, the constraint is dynamic. | 
|---|
| 3756 | * Either it was just allocated above, or it | 
|---|
| 3757 | * was allocated during a earlier invocation | 
|---|
| 3758 | * of this function | 
|---|
| 3759 | */ | 
|---|
| 3760 |  | 
|---|
| 3761 | /* | 
|---|
| 3762 | * state of sibling HT | 
|---|
| 3763 | */ | 
|---|
| 3764 | xlo = &excl_cntrs->states[tid ^ 1]; | 
|---|
| 3765 |  | 
|---|
| 3766 | /* | 
|---|
| 3767 | * event requires exclusive counter access | 
|---|
| 3768 | * across HT threads | 
|---|
| 3769 | */ | 
|---|
| 3770 | is_excl = c->flags & PERF_X86_EVENT_EXCL; | 
|---|
| 3771 | if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { | 
|---|
| 3772 | event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; | 
|---|
| 3773 | if (!cpuc->n_excl++) | 
|---|
| 3774 | WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); | 
|---|
| 3775 | } | 
|---|
| 3776 |  | 
|---|
| 3777 | /* | 
|---|
| 3778 | * Modify static constraint with current dynamic | 
|---|
| 3779 | * state of thread | 
|---|
| 3780 | * | 
|---|
| 3781 | * EXCLUSIVE: sibling counter measuring exclusive event | 
|---|
| 3782 | * SHARED   : sibling counter measuring non-exclusive event | 
|---|
| 3783 | * UNUSED   : sibling counter unused | 
|---|
| 3784 | */ | 
|---|
| 3785 | w = c->weight; | 
|---|
| 3786 | for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { | 
|---|
| 3787 | /* | 
|---|
| 3788 | * exclusive event in sibling counter | 
|---|
| 3789 | * our corresponding counter cannot be used | 
|---|
| 3790 | * regardless of our event | 
|---|
| 3791 | */ | 
|---|
| 3792 | if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) { | 
|---|
| 3793 | __clear_bit(i, c->idxmsk); | 
|---|
| 3794 | w--; | 
|---|
| 3795 | continue; | 
|---|
| 3796 | } | 
|---|
| 3797 | /* | 
|---|
| 3798 | * if measuring an exclusive event, sibling | 
|---|
| 3799 | * measuring non-exclusive, then counter cannot | 
|---|
| 3800 | * be used | 
|---|
| 3801 | */ | 
|---|
| 3802 | if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) { | 
|---|
| 3803 | __clear_bit(i, c->idxmsk); | 
|---|
| 3804 | w--; | 
|---|
| 3805 | continue; | 
|---|
| 3806 | } | 
|---|
| 3807 | } | 
|---|
| 3808 |  | 
|---|
| 3809 | /* | 
|---|
| 3810 | * if we return an empty mask, then switch | 
|---|
| 3811 | * back to static empty constraint to avoid | 
|---|
| 3812 | * the cost of freeing later on | 
|---|
| 3813 | */ | 
|---|
| 3814 | if (!w) | 
|---|
| 3815 | c = &emptyconstraint; | 
|---|
| 3816 |  | 
|---|
| 3817 | c->weight = w; | 
|---|
| 3818 |  | 
|---|
| 3819 | return c; | 
|---|
| 3820 | } | 
|---|
| 3821 |  | 
|---|
| 3822 | static struct event_constraint * | 
|---|
| 3823 | intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 3824 | struct perf_event *event) | 
|---|
| 3825 | { | 
|---|
| 3826 | struct event_constraint *c1, *c2; | 
|---|
| 3827 |  | 
|---|
| 3828 | c1 = cpuc->event_constraint[idx]; | 
|---|
| 3829 |  | 
|---|
| 3830 | /* | 
|---|
| 3831 | * first time only | 
|---|
| 3832 | * - static constraint: no change across incremental scheduling calls | 
|---|
| 3833 | * - dynamic constraint: handled by intel_get_excl_constraints() | 
|---|
| 3834 | */ | 
|---|
| 3835 | c2 = __intel_get_event_constraints(cpuc, idx, event); | 
|---|
| 3836 | if (c1) { | 
|---|
| 3837 | WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC)); | 
|---|
| 3838 | bitmap_copy(dst: c1->idxmsk, src: c2->idxmsk, X86_PMC_IDX_MAX); | 
|---|
| 3839 | c1->weight = c2->weight; | 
|---|
| 3840 | c2 = c1; | 
|---|
| 3841 | } | 
|---|
| 3842 |  | 
|---|
| 3843 | if (cpuc->excl_cntrs) | 
|---|
| 3844 | return intel_get_excl_constraints(cpuc, event, idx, c: c2); | 
|---|
| 3845 |  | 
|---|
| 3846 | if (event->hw.dyn_constraint != ~0ULL) { | 
|---|
| 3847 | c2 = dyn_constraint(cpuc, c: c2, idx); | 
|---|
| 3848 | c2->idxmsk64 &= event->hw.dyn_constraint; | 
|---|
| 3849 | c2->weight = hweight64(c2->idxmsk64); | 
|---|
| 3850 | } | 
|---|
| 3851 |  | 
|---|
| 3852 | return c2; | 
|---|
| 3853 | } | 
|---|
| 3854 |  | 
|---|
| 3855 | static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, | 
|---|
| 3856 | struct perf_event *event) | 
|---|
| 3857 | { | 
|---|
| 3858 | struct hw_perf_event *hwc = &event->hw; | 
|---|
| 3859 | struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; | 
|---|
| 3860 | int tid = cpuc->excl_thread_id; | 
|---|
| 3861 | struct intel_excl_states *xl; | 
|---|
| 3862 |  | 
|---|
| 3863 | /* | 
|---|
| 3864 | * nothing needed if in group validation mode | 
|---|
| 3865 | */ | 
|---|
| 3866 | if (cpuc->is_fake) | 
|---|
| 3867 | return; | 
|---|
| 3868 |  | 
|---|
| 3869 | if (WARN_ON_ONCE(!excl_cntrs)) | 
|---|
| 3870 | return; | 
|---|
| 3871 |  | 
|---|
| 3872 | if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { | 
|---|
| 3873 | hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; | 
|---|
| 3874 | if (!--cpuc->n_excl) | 
|---|
| 3875 | WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); | 
|---|
| 3876 | } | 
|---|
| 3877 |  | 
|---|
| 3878 | /* | 
|---|
| 3879 | * If event was actually assigned, then mark the counter state as | 
|---|
| 3880 | * unused now. | 
|---|
| 3881 | */ | 
|---|
| 3882 | if (hwc->idx >= 0) { | 
|---|
| 3883 | xl = &excl_cntrs->states[tid]; | 
|---|
| 3884 |  | 
|---|
| 3885 | /* | 
|---|
| 3886 | * put_constraint may be called from x86_schedule_events() | 
|---|
| 3887 | * which already has the lock held so here make locking | 
|---|
| 3888 | * conditional. | 
|---|
| 3889 | */ | 
|---|
| 3890 | if (!xl->sched_started) | 
|---|
| 3891 | raw_spin_lock(&excl_cntrs->lock); | 
|---|
| 3892 |  | 
|---|
| 3893 | xl->state[hwc->idx] = INTEL_EXCL_UNUSED; | 
|---|
| 3894 |  | 
|---|
| 3895 | if (!xl->sched_started) | 
|---|
| 3896 | raw_spin_unlock(&excl_cntrs->lock); | 
|---|
| 3897 | } | 
|---|
| 3898 | } | 
|---|
| 3899 |  | 
|---|
| 3900 | static void | 
|---|
| 3901 | intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, | 
|---|
| 3902 | struct perf_event *event) | 
|---|
| 3903 | { | 
|---|
| 3904 | struct hw_perf_event_extra *reg; | 
|---|
| 3905 |  | 
|---|
| 3906 | reg = &event->hw.extra_reg; | 
|---|
| 3907 | if (reg->idx != EXTRA_REG_NONE) | 
|---|
| 3908 | __intel_shared_reg_put_constraints(cpuc, reg); | 
|---|
| 3909 |  | 
|---|
| 3910 | reg = &event->hw.branch_reg; | 
|---|
| 3911 | if (reg->idx != EXTRA_REG_NONE) | 
|---|
| 3912 | __intel_shared_reg_put_constraints(cpuc, reg); | 
|---|
| 3913 | } | 
|---|
| 3914 |  | 
|---|
| 3915 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | 
|---|
| 3916 | struct perf_event *event) | 
|---|
| 3917 | { | 
|---|
| 3918 | intel_put_shared_regs_event_constraints(cpuc, event); | 
|---|
| 3919 |  | 
|---|
| 3920 | /* | 
|---|
| 3921 | * is PMU has exclusive counter restrictions, then | 
|---|
| 3922 | * all events are subject to and must call the | 
|---|
| 3923 | * put_excl_constraints() routine | 
|---|
| 3924 | */ | 
|---|
| 3925 | if (cpuc->excl_cntrs) | 
|---|
| 3926 | intel_put_excl_constraints(cpuc, event); | 
|---|
| 3927 | } | 
|---|
| 3928 |  | 
|---|
| 3929 | static void intel_pebs_aliases_core2(struct perf_event *event) | 
|---|
| 3930 | { | 
|---|
| 3931 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | 
|---|
| 3932 | /* | 
|---|
| 3933 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | 
|---|
| 3934 | * (0x003c) so that we can use it with PEBS. | 
|---|
| 3935 | * | 
|---|
| 3936 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | 
|---|
| 3937 | * PEBS capable. However we can use INST_RETIRED.ANY_P | 
|---|
| 3938 | * (0x00c0), which is a PEBS capable event, to get the same | 
|---|
| 3939 | * count. | 
|---|
| 3940 | * | 
|---|
| 3941 | * INST_RETIRED.ANY_P counts the number of cycles that retires | 
|---|
| 3942 | * CNTMASK instructions. By setting CNTMASK to a value (16) | 
|---|
| 3943 | * larger than the maximum number of instructions that can be | 
|---|
| 3944 | * retired per cycle (4) and then inverting the condition, we | 
|---|
| 3945 | * count all cycles that retire 16 or less instructions, which | 
|---|
| 3946 | * is every cycle. | 
|---|
| 3947 | * | 
|---|
| 3948 | * Thereby we gain a PEBS capable cycle counter. | 
|---|
| 3949 | */ | 
|---|
| 3950 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); | 
|---|
| 3951 |  | 
|---|
| 3952 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 
|---|
| 3953 | event->hw.config = alt_config; | 
|---|
| 3954 | } | 
|---|
| 3955 | } | 
|---|
| 3956 |  | 
|---|
| 3957 | static void intel_pebs_aliases_snb(struct perf_event *event) | 
|---|
| 3958 | { | 
|---|
| 3959 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | 
|---|
| 3960 | /* | 
|---|
| 3961 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | 
|---|
| 3962 | * (0x003c) so that we can use it with PEBS. | 
|---|
| 3963 | * | 
|---|
| 3964 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | 
|---|
| 3965 | * PEBS capable. However we can use UOPS_RETIRED.ALL | 
|---|
| 3966 | * (0x01c2), which is a PEBS capable event, to get the same | 
|---|
| 3967 | * count. | 
|---|
| 3968 | * | 
|---|
| 3969 | * UOPS_RETIRED.ALL counts the number of cycles that retires | 
|---|
| 3970 | * CNTMASK micro-ops. By setting CNTMASK to a value (16) | 
|---|
| 3971 | * larger than the maximum number of micro-ops that can be | 
|---|
| 3972 | * retired per cycle (4) and then inverting the condition, we | 
|---|
| 3973 | * count all cycles that retire 16 or less micro-ops, which | 
|---|
| 3974 | * is every cycle. | 
|---|
| 3975 | * | 
|---|
| 3976 | * Thereby we gain a PEBS capable cycle counter. | 
|---|
| 3977 | */ | 
|---|
| 3978 | u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); | 
|---|
| 3979 |  | 
|---|
| 3980 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 
|---|
| 3981 | event->hw.config = alt_config; | 
|---|
| 3982 | } | 
|---|
| 3983 | } | 
|---|
| 3984 |  | 
|---|
| 3985 | static void intel_pebs_aliases_precdist(struct perf_event *event) | 
|---|
| 3986 | { | 
|---|
| 3987 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | 
|---|
| 3988 | /* | 
|---|
| 3989 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | 
|---|
| 3990 | * (0x003c) so that we can use it with PEBS. | 
|---|
| 3991 | * | 
|---|
| 3992 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | 
|---|
| 3993 | * PEBS capable. However we can use INST_RETIRED.PREC_DIST | 
|---|
| 3994 | * (0x01c0), which is a PEBS capable event, to get the same | 
|---|
| 3995 | * count. | 
|---|
| 3996 | * | 
|---|
| 3997 | * The PREC_DIST event has special support to minimize sample | 
|---|
| 3998 | * shadowing effects. One drawback is that it can be | 
|---|
| 3999 | * only programmed on counter 1, but that seems like an | 
|---|
| 4000 | * acceptable trade off. | 
|---|
| 4001 | */ | 
|---|
| 4002 | u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16); | 
|---|
| 4003 |  | 
|---|
| 4004 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 
|---|
| 4005 | event->hw.config = alt_config; | 
|---|
| 4006 | } | 
|---|
| 4007 | } | 
|---|
| 4008 |  | 
|---|
| 4009 | static void intel_pebs_aliases_ivb(struct perf_event *event) | 
|---|
| 4010 | { | 
|---|
| 4011 | if (event->attr.precise_ip < 3) | 
|---|
| 4012 | return intel_pebs_aliases_snb(event); | 
|---|
| 4013 | return intel_pebs_aliases_precdist(event); | 
|---|
| 4014 | } | 
|---|
| 4015 |  | 
|---|
| 4016 | static void intel_pebs_aliases_skl(struct perf_event *event) | 
|---|
| 4017 | { | 
|---|
| 4018 | if (event->attr.precise_ip < 3) | 
|---|
| 4019 | return intel_pebs_aliases_core2(event); | 
|---|
| 4020 | return intel_pebs_aliases_precdist(event); | 
|---|
| 4021 | } | 
|---|
| 4022 |  | 
|---|
| 4023 | static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) | 
|---|
| 4024 | { | 
|---|
| 4025 | unsigned long flags = x86_pmu.large_pebs_flags; | 
|---|
| 4026 |  | 
|---|
| 4027 | if (event->attr.use_clockid) | 
|---|
| 4028 | flags &= ~PERF_SAMPLE_TIME; | 
|---|
| 4029 | if (!event->attr.exclude_kernel) | 
|---|
| 4030 | flags &= ~PERF_SAMPLE_REGS_USER; | 
|---|
| 4031 | if (event->attr.sample_regs_user & ~PEBS_GP_REGS) | 
|---|
| 4032 | flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); | 
|---|
| 4033 | return flags; | 
|---|
| 4034 | } | 
|---|
| 4035 |  | 
|---|
| 4036 | static int intel_pmu_bts_config(struct perf_event *event) | 
|---|
| 4037 | { | 
|---|
| 4038 | struct perf_event_attr *attr = &event->attr; | 
|---|
| 4039 |  | 
|---|
| 4040 | if (unlikely(intel_pmu_has_bts(event))) { | 
|---|
| 4041 | /* BTS is not supported by this architecture. */ | 
|---|
| 4042 | if (!x86_pmu.bts_active) | 
|---|
| 4043 | return -EOPNOTSUPP; | 
|---|
| 4044 |  | 
|---|
| 4045 | /* BTS is currently only allowed for user-mode. */ | 
|---|
| 4046 | if (!attr->exclude_kernel) | 
|---|
| 4047 | return -EOPNOTSUPP; | 
|---|
| 4048 |  | 
|---|
| 4049 | /* BTS is not allowed for precise events. */ | 
|---|
| 4050 | if (attr->precise_ip) | 
|---|
| 4051 | return -EOPNOTSUPP; | 
|---|
| 4052 |  | 
|---|
| 4053 | /* disallow bts if conflicting events are present */ | 
|---|
| 4054 | if (x86_add_exclusive(what: x86_lbr_exclusive_lbr)) | 
|---|
| 4055 | return -EBUSY; | 
|---|
| 4056 |  | 
|---|
| 4057 | event->destroy = hw_perf_lbr_event_destroy; | 
|---|
| 4058 | } | 
|---|
| 4059 |  | 
|---|
| 4060 | return 0; | 
|---|
| 4061 | } | 
|---|
| 4062 |  | 
|---|
| 4063 | static int core_pmu_hw_config(struct perf_event *event) | 
|---|
| 4064 | { | 
|---|
| 4065 | int ret = x86_pmu_hw_config(event); | 
|---|
| 4066 |  | 
|---|
| 4067 | if (ret) | 
|---|
| 4068 | return ret; | 
|---|
| 4069 |  | 
|---|
| 4070 | return intel_pmu_bts_config(event); | 
|---|
| 4071 | } | 
|---|
| 4072 |  | 
|---|
| 4073 | #define INTEL_TD_METRIC_AVAILABLE_MAX	(INTEL_TD_METRIC_RETIRING + \ | 
|---|
| 4074 | ((x86_pmu.num_topdown_events - 1) << 8)) | 
|---|
| 4075 |  | 
|---|
| 4076 | static bool is_available_metric_event(struct perf_event *event) | 
|---|
| 4077 | { | 
|---|
| 4078 | return is_metric_event(event) && | 
|---|
| 4079 | event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX; | 
|---|
| 4080 | } | 
|---|
| 4081 |  | 
|---|
| 4082 | static inline bool is_mem_loads_event(struct perf_event *event) | 
|---|
| 4083 | { | 
|---|
| 4084 | return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01); | 
|---|
| 4085 | } | 
|---|
| 4086 |  | 
|---|
| 4087 | static inline bool is_mem_loads_aux_event(struct perf_event *event) | 
|---|
| 4088 | { | 
|---|
| 4089 | return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82); | 
|---|
| 4090 | } | 
|---|
| 4091 |  | 
|---|
| 4092 | static inline bool require_mem_loads_aux_event(struct perf_event *event) | 
|---|
| 4093 | { | 
|---|
| 4094 | if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX)) | 
|---|
| 4095 | return false; | 
|---|
| 4096 |  | 
|---|
| 4097 | if (is_hybrid()) | 
|---|
| 4098 | return hybrid_pmu(pmu: event->pmu)->pmu_type == hybrid_big; | 
|---|
| 4099 |  | 
|---|
| 4100 | return true; | 
|---|
| 4101 | } | 
|---|
| 4102 |  | 
|---|
| 4103 | static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) | 
|---|
| 4104 | { | 
|---|
| 4105 | union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap); | 
|---|
| 4106 |  | 
|---|
| 4107 | return test_bit(idx, (unsigned long *)&intel_cap->capabilities); | 
|---|
| 4108 | } | 
|---|
| 4109 |  | 
|---|
| 4110 | static u64 intel_pmu_freq_start_period(struct perf_event *event) | 
|---|
| 4111 | { | 
|---|
| 4112 | int type = event->attr.type; | 
|---|
| 4113 | u64 config, factor; | 
|---|
| 4114 | s64 start; | 
|---|
| 4115 |  | 
|---|
| 4116 | /* | 
|---|
| 4117 | * The 127 is the lowest possible recommended SAV (sample after value) | 
|---|
| 4118 | * for a 4000 freq (default freq), according to the event list JSON file. | 
|---|
| 4119 | * Also, assume the workload is idle 50% time. | 
|---|
| 4120 | */ | 
|---|
| 4121 | factor = 64 * 4000; | 
|---|
| 4122 | if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE) | 
|---|
| 4123 | goto end; | 
|---|
| 4124 |  | 
|---|
| 4125 | /* | 
|---|
| 4126 | * The estimation of the start period in the freq mode is | 
|---|
| 4127 | * based on the below assumption. | 
|---|
| 4128 | * | 
|---|
| 4129 | * For a cycles or an instructions event, 1GHZ of the | 
|---|
| 4130 | * underlying platform, 1 IPC. The workload is idle 50% time. | 
|---|
| 4131 | * The start period = 1,000,000,000 * 1 / freq / 2. | 
|---|
| 4132 | *		    = 500,000,000 / freq | 
|---|
| 4133 | * | 
|---|
| 4134 | * Usually, the branch-related events occur less than the | 
|---|
| 4135 | * instructions event. According to the Intel event list JSON | 
|---|
| 4136 | * file, the SAV (sample after value) of a branch-related event | 
|---|
| 4137 | * is usually 1/4 of an instruction event. | 
|---|
| 4138 | * The start period of branch-related events = 125,000,000 / freq. | 
|---|
| 4139 | * | 
|---|
| 4140 | * The cache-related events occurs even less. The SAV is usually | 
|---|
| 4141 | * 1/20 of an instruction event. | 
|---|
| 4142 | * The start period of cache-related events = 25,000,000 / freq. | 
|---|
| 4143 | */ | 
|---|
| 4144 | config = event->attr.config & PERF_HW_EVENT_MASK; | 
|---|
| 4145 | if (type == PERF_TYPE_HARDWARE) { | 
|---|
| 4146 | switch (config) { | 
|---|
| 4147 | case PERF_COUNT_HW_CPU_CYCLES: | 
|---|
| 4148 | case PERF_COUNT_HW_INSTRUCTIONS: | 
|---|
| 4149 | case PERF_COUNT_HW_BUS_CYCLES: | 
|---|
| 4150 | case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND: | 
|---|
| 4151 | case PERF_COUNT_HW_STALLED_CYCLES_BACKEND: | 
|---|
| 4152 | case PERF_COUNT_HW_REF_CPU_CYCLES: | 
|---|
| 4153 | factor = 500000000; | 
|---|
| 4154 | break; | 
|---|
| 4155 | case PERF_COUNT_HW_BRANCH_INSTRUCTIONS: | 
|---|
| 4156 | case PERF_COUNT_HW_BRANCH_MISSES: | 
|---|
| 4157 | factor = 125000000; | 
|---|
| 4158 | break; | 
|---|
| 4159 | case PERF_COUNT_HW_CACHE_REFERENCES: | 
|---|
| 4160 | case PERF_COUNT_HW_CACHE_MISSES: | 
|---|
| 4161 | factor = 25000000; | 
|---|
| 4162 | break; | 
|---|
| 4163 | default: | 
|---|
| 4164 | goto end; | 
|---|
| 4165 | } | 
|---|
| 4166 | } | 
|---|
| 4167 |  | 
|---|
| 4168 | if (type == PERF_TYPE_HW_CACHE) | 
|---|
| 4169 | factor = 25000000; | 
|---|
| 4170 | end: | 
|---|
| 4171 | /* | 
|---|
| 4172 | * Usually, a prime or a number with less factors (close to prime) | 
|---|
| 4173 | * is chosen as an SAV, which makes it less likely that the sampling | 
|---|
| 4174 | * period synchronizes with some periodic event in the workload. | 
|---|
| 4175 | * Minus 1 to make it at least avoiding values near power of twos | 
|---|
| 4176 | * for the default freq. | 
|---|
| 4177 | */ | 
|---|
| 4178 | start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1; | 
|---|
| 4179 |  | 
|---|
| 4180 | if (start > x86_pmu.max_period) | 
|---|
| 4181 | start = x86_pmu.max_period; | 
|---|
| 4182 |  | 
|---|
| 4183 | if (x86_pmu.limit_period) | 
|---|
| 4184 | x86_pmu.limit_period(event, &start); | 
|---|
| 4185 |  | 
|---|
| 4186 | return start; | 
|---|
| 4187 | } | 
|---|
| 4188 |  | 
|---|
| 4189 | static inline bool intel_pmu_has_acr(struct pmu *pmu) | 
|---|
| 4190 | { | 
|---|
| 4191 | return !!hybrid(pmu, acr_cause_mask64); | 
|---|
| 4192 | } | 
|---|
| 4193 |  | 
|---|
| 4194 | static bool intel_pmu_is_acr_group(struct perf_event *event) | 
|---|
| 4195 | { | 
|---|
| 4196 | /* The group leader has the ACR flag set */ | 
|---|
| 4197 | if (is_acr_event_group(event)) | 
|---|
| 4198 | return true; | 
|---|
| 4199 |  | 
|---|
| 4200 | /* The acr_mask is set */ | 
|---|
| 4201 | if (event->attr.config2) | 
|---|
| 4202 | return true; | 
|---|
| 4203 |  | 
|---|
| 4204 | return false; | 
|---|
| 4205 | } | 
|---|
| 4206 |  | 
|---|
| 4207 | static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event, | 
|---|
| 4208 | u64 *cause_mask, int *num) | 
|---|
| 4209 | { | 
|---|
| 4210 | event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); | 
|---|
| 4211 | *cause_mask |= event->attr.config2; | 
|---|
| 4212 | *num += 1; | 
|---|
| 4213 | } | 
|---|
| 4214 |  | 
|---|
| 4215 | static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event, | 
|---|
| 4216 | int idx, u64 cause_mask) | 
|---|
| 4217 | { | 
|---|
| 4218 | if (test_bit(idx, (unsigned long *)&cause_mask)) | 
|---|
| 4219 | event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); | 
|---|
| 4220 | } | 
|---|
| 4221 |  | 
|---|
| 4222 | static int intel_pmu_hw_config(struct perf_event *event) | 
|---|
| 4223 | { | 
|---|
| 4224 | int ret = x86_pmu_hw_config(event); | 
|---|
| 4225 |  | 
|---|
| 4226 | if (ret) | 
|---|
| 4227 | return ret; | 
|---|
| 4228 |  | 
|---|
| 4229 | ret = intel_pmu_bts_config(event); | 
|---|
| 4230 | if (ret) | 
|---|
| 4231 | return ret; | 
|---|
| 4232 |  | 
|---|
| 4233 | if (event->attr.freq && event->attr.sample_freq) { | 
|---|
| 4234 | event->hw.sample_period = intel_pmu_freq_start_period(event); | 
|---|
| 4235 | event->hw.last_period = event->hw.sample_period; | 
|---|
| 4236 | local64_set(&event->hw.period_left, event->hw.sample_period); | 
|---|
| 4237 | } | 
|---|
| 4238 |  | 
|---|
| 4239 | if (event->attr.precise_ip) { | 
|---|
| 4240 | if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) | 
|---|
| 4241 | return -EINVAL; | 
|---|
| 4242 |  | 
|---|
| 4243 | if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { | 
|---|
| 4244 | event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; | 
|---|
| 4245 | if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) && | 
|---|
| 4246 | !has_aux_action(event)) { | 
|---|
| 4247 | event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; | 
|---|
| 4248 | event->attach_state |= PERF_ATTACH_SCHED_CB; | 
|---|
| 4249 | } | 
|---|
| 4250 | } | 
|---|
| 4251 | if (x86_pmu.pebs_aliases) | 
|---|
| 4252 | x86_pmu.pebs_aliases(event); | 
|---|
| 4253 | } | 
|---|
| 4254 |  | 
|---|
| 4255 | if (needs_branch_stack(event)) { | 
|---|
| 4256 | /* Avoid branch stack setup for counting events in SAMPLE READ */ | 
|---|
| 4257 | if (is_sampling_event(event) || | 
|---|
| 4258 | !(event->attr.sample_type & PERF_SAMPLE_READ)) | 
|---|
| 4259 | event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; | 
|---|
| 4260 | } | 
|---|
| 4261 |  | 
|---|
| 4262 | if (branch_sample_counters(event)) { | 
|---|
| 4263 | struct perf_event *leader, *sibling; | 
|---|
| 4264 | int num = 0; | 
|---|
| 4265 |  | 
|---|
| 4266 | if (!(x86_pmu.flags & PMU_FL_BR_CNTR) || | 
|---|
| 4267 | (event->attr.config & ~INTEL_ARCH_EVENT_MASK)) | 
|---|
| 4268 | return -EINVAL; | 
|---|
| 4269 |  | 
|---|
| 4270 | /* | 
|---|
| 4271 | * The branch counter logging is not supported in the call stack | 
|---|
| 4272 | * mode yet, since we cannot simply flush the LBR during e.g., | 
|---|
| 4273 | * multiplexing. Also, there is no obvious usage with the call | 
|---|
| 4274 | * stack mode. Simply forbids it for now. | 
|---|
| 4275 | * | 
|---|
| 4276 | * If any events in the group enable the branch counter logging | 
|---|
| 4277 | * feature, the group is treated as a branch counter logging | 
|---|
| 4278 | * group, which requires the extra space to store the counters. | 
|---|
| 4279 | */ | 
|---|
| 4280 | leader = event->group_leader; | 
|---|
| 4281 | if (branch_sample_call_stack(event: leader)) | 
|---|
| 4282 | return -EINVAL; | 
|---|
| 4283 | if (branch_sample_counters(event: leader)) { | 
|---|
| 4284 | num++; | 
|---|
| 4285 | leader->hw.dyn_constraint &= x86_pmu.lbr_counters; | 
|---|
| 4286 | } | 
|---|
| 4287 | leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; | 
|---|
| 4288 |  | 
|---|
| 4289 | for_each_sibling_event(sibling, leader) { | 
|---|
| 4290 | if (branch_sample_call_stack(event: sibling)) | 
|---|
| 4291 | return -EINVAL; | 
|---|
| 4292 | if (branch_sample_counters(event: sibling)) { | 
|---|
| 4293 | num++; | 
|---|
| 4294 | sibling->hw.dyn_constraint &= x86_pmu.lbr_counters; | 
|---|
| 4295 | } | 
|---|
| 4296 | } | 
|---|
| 4297 |  | 
|---|
| 4298 | if (num > fls(x: x86_pmu.lbr_counters)) | 
|---|
| 4299 | return -EINVAL; | 
|---|
| 4300 | /* | 
|---|
| 4301 | * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't | 
|---|
| 4302 | * require any branch stack setup. | 
|---|
| 4303 | * Clear the bit to avoid unnecessary branch stack setup. | 
|---|
| 4304 | */ | 
|---|
| 4305 | if (0 == (event->attr.branch_sample_type & | 
|---|
| 4306 | ~(PERF_SAMPLE_BRANCH_PLM_ALL | | 
|---|
| 4307 | PERF_SAMPLE_BRANCH_COUNTERS))) | 
|---|
| 4308 | event->hw.flags  &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK; | 
|---|
| 4309 |  | 
|---|
| 4310 | /* | 
|---|
| 4311 | * Force the leader to be a LBR event. So LBRs can be reset | 
|---|
| 4312 | * with the leader event. See intel_pmu_lbr_del() for details. | 
|---|
| 4313 | */ | 
|---|
| 4314 | if (!intel_pmu_needs_branch_stack(event: leader)) | 
|---|
| 4315 | return -EINVAL; | 
|---|
| 4316 | } | 
|---|
| 4317 |  | 
|---|
| 4318 | if (intel_pmu_needs_branch_stack(event)) { | 
|---|
| 4319 | ret = intel_pmu_setup_lbr_filter(event); | 
|---|
| 4320 | if (ret) | 
|---|
| 4321 | return ret; | 
|---|
| 4322 | event->attach_state |= PERF_ATTACH_SCHED_CB; | 
|---|
| 4323 |  | 
|---|
| 4324 | /* | 
|---|
| 4325 | * BTS is set up earlier in this path, so don't account twice | 
|---|
| 4326 | */ | 
|---|
| 4327 | if (!unlikely(intel_pmu_has_bts(event))) { | 
|---|
| 4328 | /* disallow lbr if conflicting events are present */ | 
|---|
| 4329 | if (x86_add_exclusive(what: x86_lbr_exclusive_lbr)) | 
|---|
| 4330 | return -EBUSY; | 
|---|
| 4331 |  | 
|---|
| 4332 | event->destroy = hw_perf_lbr_event_destroy; | 
|---|
| 4333 | } | 
|---|
| 4334 | } | 
|---|
| 4335 |  | 
|---|
| 4336 | if (event->attr.aux_output) { | 
|---|
| 4337 | if (!event->attr.precise_ip) | 
|---|
| 4338 | return -EINVAL; | 
|---|
| 4339 |  | 
|---|
| 4340 | event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; | 
|---|
| 4341 | } | 
|---|
| 4342 |  | 
|---|
| 4343 | if ((event->attr.sample_type & PERF_SAMPLE_READ) && | 
|---|
| 4344 | (x86_pmu.intel_cap.pebs_format >= 6) && | 
|---|
| 4345 | x86_pmu.intel_cap.pebs_baseline && | 
|---|
| 4346 | is_sampling_event(event) && | 
|---|
| 4347 | event->attr.precise_ip) | 
|---|
| 4348 | event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR; | 
|---|
| 4349 |  | 
|---|
| 4350 | if (intel_pmu_has_acr(pmu: event->pmu) && intel_pmu_is_acr_group(event)) { | 
|---|
| 4351 | struct perf_event *sibling, *leader = event->group_leader; | 
|---|
| 4352 | struct pmu *pmu = event->pmu; | 
|---|
| 4353 | bool has_sw_event = false; | 
|---|
| 4354 | int num = 0, idx = 0; | 
|---|
| 4355 | u64 cause_mask = 0; | 
|---|
| 4356 |  | 
|---|
| 4357 | /* Not support perf metrics */ | 
|---|
| 4358 | if (is_metric_event(event)) | 
|---|
| 4359 | return -EINVAL; | 
|---|
| 4360 |  | 
|---|
| 4361 | /* Not support freq mode */ | 
|---|
| 4362 | if (event->attr.freq) | 
|---|
| 4363 | return -EINVAL; | 
|---|
| 4364 |  | 
|---|
| 4365 | /* PDist is not supported */ | 
|---|
| 4366 | if (event->attr.config2 && event->attr.precise_ip > 2) | 
|---|
| 4367 | return -EINVAL; | 
|---|
| 4368 |  | 
|---|
| 4369 | /* The reload value cannot exceeds the max period */ | 
|---|
| 4370 | if (event->attr.sample_period > x86_pmu.max_period) | 
|---|
| 4371 | return -EINVAL; | 
|---|
| 4372 | /* | 
|---|
| 4373 | * The counter-constraints of each event cannot be finalized | 
|---|
| 4374 | * unless the whole group is scanned. However, it's hard | 
|---|
| 4375 | * to know whether the event is the last one of the group. | 
|---|
| 4376 | * Recalculate the counter-constraints for each event when | 
|---|
| 4377 | * adding a new event. | 
|---|
| 4378 | * | 
|---|
| 4379 | * The group is traversed twice, which may be optimized later. | 
|---|
| 4380 | * In the first round, | 
|---|
| 4381 | * - Find all events which do reload when other events | 
|---|
| 4382 | *   overflow and set the corresponding counter-constraints | 
|---|
| 4383 | * - Add all events, which can cause other events reload, | 
|---|
| 4384 | *   in the cause_mask | 
|---|
| 4385 | * - Error out if the number of events exceeds the HW limit | 
|---|
| 4386 | * - The ACR events must be contiguous. | 
|---|
| 4387 | *   Error out if there are non-X86 events between ACR events. | 
|---|
| 4388 | *   This is not a HW limit, but a SW limit. | 
|---|
| 4389 | *   With the assumption, the intel_pmu_acr_late_setup() can | 
|---|
| 4390 | *   easily convert the event idx to counter idx without | 
|---|
| 4391 | *   traversing the whole event list. | 
|---|
| 4392 | */ | 
|---|
| 4393 | if (!is_x86_event(event: leader)) | 
|---|
| 4394 | return -EINVAL; | 
|---|
| 4395 |  | 
|---|
| 4396 | if (leader->attr.config2) | 
|---|
| 4397 | intel_pmu_set_acr_cntr_constr(event: leader, cause_mask: &cause_mask, num: &num); | 
|---|
| 4398 |  | 
|---|
| 4399 | if (leader->nr_siblings) { | 
|---|
| 4400 | for_each_sibling_event(sibling, leader) { | 
|---|
| 4401 | if (!is_x86_event(event: sibling)) { | 
|---|
| 4402 | has_sw_event = true; | 
|---|
| 4403 | continue; | 
|---|
| 4404 | } | 
|---|
| 4405 | if (!sibling->attr.config2) | 
|---|
| 4406 | continue; | 
|---|
| 4407 | if (has_sw_event) | 
|---|
| 4408 | return -EINVAL; | 
|---|
| 4409 | intel_pmu_set_acr_cntr_constr(event: sibling, cause_mask: &cause_mask, num: &num); | 
|---|
| 4410 | } | 
|---|
| 4411 | } | 
|---|
| 4412 | if (leader != event && event->attr.config2) { | 
|---|
| 4413 | if (has_sw_event) | 
|---|
| 4414 | return -EINVAL; | 
|---|
| 4415 | intel_pmu_set_acr_cntr_constr(event, cause_mask: &cause_mask, num: &num); | 
|---|
| 4416 | } | 
|---|
| 4417 |  | 
|---|
| 4418 | if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) || | 
|---|
| 4419 | num > hweight64(hybrid(event->pmu, acr_cntr_mask64))) | 
|---|
| 4420 | return -EINVAL; | 
|---|
| 4421 | /* | 
|---|
| 4422 | * In the second round, apply the counter-constraints for | 
|---|
| 4423 | * the events which can cause other events reload. | 
|---|
| 4424 | */ | 
|---|
| 4425 | intel_pmu_set_acr_caused_constr(event: leader, idx: idx++, cause_mask); | 
|---|
| 4426 |  | 
|---|
| 4427 | if (leader->nr_siblings) { | 
|---|
| 4428 | for_each_sibling_event(sibling, leader) | 
|---|
| 4429 | intel_pmu_set_acr_caused_constr(event: sibling, idx: idx++, cause_mask); | 
|---|
| 4430 | } | 
|---|
| 4431 |  | 
|---|
| 4432 | if (leader != event) | 
|---|
| 4433 | intel_pmu_set_acr_caused_constr(event, idx, cause_mask); | 
|---|
| 4434 |  | 
|---|
| 4435 | leader->hw.flags |= PERF_X86_EVENT_ACR; | 
|---|
| 4436 | } | 
|---|
| 4437 |  | 
|---|
| 4438 | if ((event->attr.type == PERF_TYPE_HARDWARE) || | 
|---|
| 4439 | (event->attr.type == PERF_TYPE_HW_CACHE)) | 
|---|
| 4440 | return 0; | 
|---|
| 4441 |  | 
|---|
| 4442 | /* | 
|---|
| 4443 | * Config Topdown slots and metric events | 
|---|
| 4444 | * | 
|---|
| 4445 | * The slots event on Fixed Counter 3 can support sampling, | 
|---|
| 4446 | * which will be handled normally in x86_perf_event_update(). | 
|---|
| 4447 | * | 
|---|
| 4448 | * Metric events don't support sampling and require being paired | 
|---|
| 4449 | * with a slots event as group leader. When the slots event | 
|---|
| 4450 | * is used in a metrics group, it too cannot support sampling. | 
|---|
| 4451 | */ | 
|---|
| 4452 | if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) { | 
|---|
| 4453 | /* The metrics_clear can only be set for the slots event */ | 
|---|
| 4454 | if (event->attr.config1 && | 
|---|
| 4455 | (!is_slots_event(event) || (event->attr.config1 & ~INTEL_TD_CFG_METRIC_CLEAR))) | 
|---|
| 4456 | return -EINVAL; | 
|---|
| 4457 |  | 
|---|
| 4458 | if (event->attr.config2) | 
|---|
| 4459 | return -EINVAL; | 
|---|
| 4460 |  | 
|---|
| 4461 | /* | 
|---|
| 4462 | * The TopDown metrics events and slots event don't | 
|---|
| 4463 | * support any filters. | 
|---|
| 4464 | */ | 
|---|
| 4465 | if (event->attr.config & X86_ALL_EVENT_FLAGS) | 
|---|
| 4466 | return -EINVAL; | 
|---|
| 4467 |  | 
|---|
| 4468 | if (is_available_metric_event(event)) { | 
|---|
| 4469 | struct perf_event *leader = event->group_leader; | 
|---|
| 4470 |  | 
|---|
| 4471 | /* The metric events don't support sampling. */ | 
|---|
| 4472 | if (is_sampling_event(event)) | 
|---|
| 4473 | return -EINVAL; | 
|---|
| 4474 |  | 
|---|
| 4475 | /* The metric events require a slots group leader. */ | 
|---|
| 4476 | if (!is_slots_event(event: leader)) | 
|---|
| 4477 | return -EINVAL; | 
|---|
| 4478 |  | 
|---|
| 4479 | /* | 
|---|
| 4480 | * The leader/SLOTS must not be a sampling event for | 
|---|
| 4481 | * metric use; hardware requires it starts at 0 when used | 
|---|
| 4482 | * in conjunction with MSR_PERF_METRICS. | 
|---|
| 4483 | */ | 
|---|
| 4484 | if (is_sampling_event(event: leader)) | 
|---|
| 4485 | return -EINVAL; | 
|---|
| 4486 |  | 
|---|
| 4487 | event->event_caps |= PERF_EV_CAP_SIBLING; | 
|---|
| 4488 | /* | 
|---|
| 4489 | * Only once we have a METRICs sibling do we | 
|---|
| 4490 | * need TopDown magic. | 
|---|
| 4491 | */ | 
|---|
| 4492 | leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; | 
|---|
| 4493 | event->hw.flags  |= PERF_X86_EVENT_TOPDOWN; | 
|---|
| 4494 | } | 
|---|
| 4495 | } | 
|---|
| 4496 |  | 
|---|
| 4497 | /* | 
|---|
| 4498 | * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR | 
|---|
| 4499 | * doesn't function quite right. As a work-around it needs to always be | 
|---|
| 4500 | * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82). | 
|---|
| 4501 | * The actual count of this second event is irrelevant it just needs | 
|---|
| 4502 | * to be active to make the first event function correctly. | 
|---|
| 4503 | * | 
|---|
| 4504 | * In a group, the auxiliary event must be in front of the load latency | 
|---|
| 4505 | * event. The rule is to simplify the implementation of the check. | 
|---|
| 4506 | * That's because perf cannot have a complete group at the moment. | 
|---|
| 4507 | */ | 
|---|
| 4508 | if (require_mem_loads_aux_event(event) && | 
|---|
| 4509 | (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) && | 
|---|
| 4510 | is_mem_loads_event(event)) { | 
|---|
| 4511 | struct perf_event *leader = event->group_leader; | 
|---|
| 4512 | struct perf_event *sibling = NULL; | 
|---|
| 4513 |  | 
|---|
| 4514 | /* | 
|---|
| 4515 | * When this memload event is also the first event (no group | 
|---|
| 4516 | * exists yet), then there is no aux event before it. | 
|---|
| 4517 | */ | 
|---|
| 4518 | if (leader == event) | 
|---|
| 4519 | return -ENODATA; | 
|---|
| 4520 |  | 
|---|
| 4521 | if (!is_mem_loads_aux_event(event: leader)) { | 
|---|
| 4522 | for_each_sibling_event(sibling, leader) { | 
|---|
| 4523 | if (is_mem_loads_aux_event(event: sibling)) | 
|---|
| 4524 | break; | 
|---|
| 4525 | } | 
|---|
| 4526 | if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list)) | 
|---|
| 4527 | return -ENODATA; | 
|---|
| 4528 | } | 
|---|
| 4529 | } | 
|---|
| 4530 |  | 
|---|
| 4531 | if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) | 
|---|
| 4532 | return 0; | 
|---|
| 4533 |  | 
|---|
| 4534 | if (x86_pmu.version < 3) | 
|---|
| 4535 | return -EINVAL; | 
|---|
| 4536 |  | 
|---|
| 4537 | ret = perf_allow_cpu(); | 
|---|
| 4538 | if (ret) | 
|---|
| 4539 | return ret; | 
|---|
| 4540 |  | 
|---|
| 4541 | event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; | 
|---|
| 4542 |  | 
|---|
| 4543 | return 0; | 
|---|
| 4544 | } | 
|---|
| 4545 |  | 
|---|
| 4546 | /* | 
|---|
| 4547 | * Currently, the only caller of this function is the atomic_switch_perf_msrs(). | 
|---|
| 4548 | * The host perf context helps to prepare the values of the real hardware for | 
|---|
| 4549 | * a set of msrs that need to be switched atomically in a vmx transaction. | 
|---|
| 4550 | * | 
|---|
| 4551 | * For example, the pseudocode needed to add a new msr should look like: | 
|---|
| 4552 | * | 
|---|
| 4553 | * arr[(*nr)++] = (struct perf_guest_switch_msr){ | 
|---|
| 4554 | *	.msr = the hardware msr address, | 
|---|
| 4555 | *	.host = the value the hardware has when it doesn't run a guest, | 
|---|
| 4556 | *	.guest = the value the hardware has when it runs a guest, | 
|---|
| 4557 | * }; | 
|---|
| 4558 | * | 
|---|
| 4559 | * These values have nothing to do with the emulated values the guest sees | 
|---|
| 4560 | * when it uses {RD,WR}MSR, which should be handled by the KVM context, | 
|---|
| 4561 | * specifically in the intel_pmu_{get,set}_msr(). | 
|---|
| 4562 | */ | 
|---|
| 4563 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) | 
|---|
| 4564 | { | 
|---|
| 4565 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 4566 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 
|---|
| 4567 | struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data; | 
|---|
| 4568 | u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); | 
|---|
| 4569 | u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable; | 
|---|
| 4570 | int global_ctrl, pebs_enable; | 
|---|
| 4571 |  | 
|---|
| 4572 | /* | 
|---|
| 4573 | * In addition to obeying exclude_guest/exclude_host, remove bits being | 
|---|
| 4574 | * used for PEBS when running a guest, because PEBS writes to virtual | 
|---|
| 4575 | * addresses (not physical addresses). | 
|---|
| 4576 | */ | 
|---|
| 4577 | *nr = 0; | 
|---|
| 4578 | global_ctrl = (*nr)++; | 
|---|
| 4579 | arr[global_ctrl] = (struct perf_guest_switch_msr){ | 
|---|
| 4580 | .msr = MSR_CORE_PERF_GLOBAL_CTRL, | 
|---|
| 4581 | .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask, | 
|---|
| 4582 | .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask, | 
|---|
| 4583 | }; | 
|---|
| 4584 |  | 
|---|
| 4585 | if (!x86_pmu.ds_pebs) | 
|---|
| 4586 | return arr; | 
|---|
| 4587 |  | 
|---|
| 4588 | /* | 
|---|
| 4589 | * If PMU counter has PEBS enabled it is not enough to | 
|---|
| 4590 | * disable counter on a guest entry since PEBS memory | 
|---|
| 4591 | * write can overshoot guest entry and corrupt guest | 
|---|
| 4592 | * memory. Disabling PEBS solves the problem. | 
|---|
| 4593 | * | 
|---|
| 4594 | * Don't do this if the CPU already enforces it. | 
|---|
| 4595 | */ | 
|---|
| 4596 | if (x86_pmu.pebs_no_isolation) { | 
|---|
| 4597 | arr[(*nr)++] = (struct perf_guest_switch_msr){ | 
|---|
| 4598 | .msr = MSR_IA32_PEBS_ENABLE, | 
|---|
| 4599 | .host = cpuc->pebs_enabled, | 
|---|
| 4600 | .guest = 0, | 
|---|
| 4601 | }; | 
|---|
| 4602 | return arr; | 
|---|
| 4603 | } | 
|---|
| 4604 |  | 
|---|
| 4605 | if (!kvm_pmu || !x86_pmu.pebs_ept) | 
|---|
| 4606 | return arr; | 
|---|
| 4607 |  | 
|---|
| 4608 | arr[(*nr)++] = (struct perf_guest_switch_msr){ | 
|---|
| 4609 | .msr = MSR_IA32_DS_AREA, | 
|---|
| 4610 | .host = (unsigned long)cpuc->ds, | 
|---|
| 4611 | .guest = kvm_pmu->ds_area, | 
|---|
| 4612 | }; | 
|---|
| 4613 |  | 
|---|
| 4614 | if (x86_pmu.intel_cap.pebs_baseline) { | 
|---|
| 4615 | arr[(*nr)++] = (struct perf_guest_switch_msr){ | 
|---|
| 4616 | .msr = MSR_PEBS_DATA_CFG, | 
|---|
| 4617 | .host = cpuc->active_pebs_data_cfg, | 
|---|
| 4618 | .guest = kvm_pmu->pebs_data_cfg, | 
|---|
| 4619 | }; | 
|---|
| 4620 | } | 
|---|
| 4621 |  | 
|---|
| 4622 | pebs_enable = (*nr)++; | 
|---|
| 4623 | arr[pebs_enable] = (struct perf_guest_switch_msr){ | 
|---|
| 4624 | .msr = MSR_IA32_PEBS_ENABLE, | 
|---|
| 4625 | .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, | 
|---|
| 4626 | .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable, | 
|---|
| 4627 | }; | 
|---|
| 4628 |  | 
|---|
| 4629 | if (arr[pebs_enable].host) { | 
|---|
| 4630 | /* Disable guest PEBS if host PEBS is enabled. */ | 
|---|
| 4631 | arr[pebs_enable].guest = 0; | 
|---|
| 4632 | } else { | 
|---|
| 4633 | /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */ | 
|---|
| 4634 | arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask; | 
|---|
| 4635 | arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask; | 
|---|
| 4636 | /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */ | 
|---|
| 4637 | arr[global_ctrl].guest |= arr[pebs_enable].guest; | 
|---|
| 4638 | } | 
|---|
| 4639 |  | 
|---|
| 4640 | return arr; | 
|---|
| 4641 | } | 
|---|
| 4642 |  | 
|---|
| 4643 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data) | 
|---|
| 4644 | { | 
|---|
| 4645 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 4646 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 
|---|
| 4647 | int idx; | 
|---|
| 4648 |  | 
|---|
| 4649 | for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { | 
|---|
| 4650 | struct perf_event *event = cpuc->events[idx]; | 
|---|
| 4651 |  | 
|---|
| 4652 | arr[idx].msr = x86_pmu_config_addr(index: idx); | 
|---|
| 4653 | arr[idx].host = arr[idx].guest = 0; | 
|---|
| 4654 |  | 
|---|
| 4655 | if (!test_bit(idx, cpuc->active_mask)) | 
|---|
| 4656 | continue; | 
|---|
| 4657 |  | 
|---|
| 4658 | arr[idx].host = arr[idx].guest = | 
|---|
| 4659 | event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; | 
|---|
| 4660 |  | 
|---|
| 4661 | if (event->attr.exclude_host) | 
|---|
| 4662 | arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | 
|---|
| 4663 | else if (event->attr.exclude_guest) | 
|---|
| 4664 | arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | 
|---|
| 4665 | } | 
|---|
| 4666 |  | 
|---|
| 4667 | *nr = x86_pmu_max_num_counters(pmu: cpuc->pmu); | 
|---|
| 4668 | return arr; | 
|---|
| 4669 | } | 
|---|
| 4670 |  | 
|---|
| 4671 | static void core_pmu_enable_event(struct perf_event *event) | 
|---|
| 4672 | { | 
|---|
| 4673 | if (!event->attr.exclude_host) | 
|---|
| 4674 | x86_pmu_enable_event(event); | 
|---|
| 4675 | } | 
|---|
| 4676 |  | 
|---|
| 4677 | static void core_pmu_enable_all(int added) | 
|---|
| 4678 | { | 
|---|
| 4679 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 4680 | int idx; | 
|---|
| 4681 |  | 
|---|
| 4682 | for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { | 
|---|
| 4683 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; | 
|---|
| 4684 |  | 
|---|
| 4685 | if (!test_bit(idx, cpuc->active_mask) || | 
|---|
| 4686 | cpuc->events[idx]->attr.exclude_host) | 
|---|
| 4687 | continue; | 
|---|
| 4688 |  | 
|---|
| 4689 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); | 
|---|
| 4690 | } | 
|---|
| 4691 | } | 
|---|
| 4692 |  | 
|---|
| 4693 | static int hsw_hw_config(struct perf_event *event) | 
|---|
| 4694 | { | 
|---|
| 4695 | int ret = intel_pmu_hw_config(event); | 
|---|
| 4696 |  | 
|---|
| 4697 | if (ret) | 
|---|
| 4698 | return ret; | 
|---|
| 4699 | if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE)) | 
|---|
| 4700 | return 0; | 
|---|
| 4701 | event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); | 
|---|
| 4702 |  | 
|---|
| 4703 | /* | 
|---|
| 4704 | * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with | 
|---|
| 4705 | * PEBS or in ANY thread mode. Since the results are non-sensical forbid | 
|---|
| 4706 | * this combination. | 
|---|
| 4707 | */ | 
|---|
| 4708 | if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && | 
|---|
| 4709 | ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || | 
|---|
| 4710 | event->attr.precise_ip > 0)) | 
|---|
| 4711 | return -EOPNOTSUPP; | 
|---|
| 4712 |  | 
|---|
| 4713 | if (event_is_checkpointed(event)) { | 
|---|
| 4714 | /* | 
|---|
| 4715 | * Sampling of checkpointed events can cause situations where | 
|---|
| 4716 | * the CPU constantly aborts because of a overflow, which is | 
|---|
| 4717 | * then checkpointed back and ignored. Forbid checkpointing | 
|---|
| 4718 | * for sampling. | 
|---|
| 4719 | * | 
|---|
| 4720 | * But still allow a long sampling period, so that perf stat | 
|---|
| 4721 | * from KVM works. | 
|---|
| 4722 | */ | 
|---|
| 4723 | if (event->attr.sample_period > 0 && | 
|---|
| 4724 | event->attr.sample_period < 0x7fffffff) | 
|---|
| 4725 | return -EOPNOTSUPP; | 
|---|
| 4726 | } | 
|---|
| 4727 | return 0; | 
|---|
| 4728 | } | 
|---|
| 4729 |  | 
|---|
| 4730 | static struct event_constraint counter0_constraint = | 
|---|
| 4731 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1); | 
|---|
| 4732 |  | 
|---|
| 4733 | static struct event_constraint counter1_constraint = | 
|---|
| 4734 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x2); | 
|---|
| 4735 |  | 
|---|
| 4736 | static struct event_constraint counter0_1_constraint = | 
|---|
| 4737 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x3); | 
|---|
| 4738 |  | 
|---|
| 4739 | static struct event_constraint counter2_constraint = | 
|---|
| 4740 | EVENT_CONSTRAINT(0, 0x4, 0); | 
|---|
| 4741 |  | 
|---|
| 4742 | static struct event_constraint fixed0_constraint = | 
|---|
| 4743 | FIXED_EVENT_CONSTRAINT(0x00c0, 0); | 
|---|
| 4744 |  | 
|---|
| 4745 | static struct event_constraint fixed0_counter0_constraint = | 
|---|
| 4746 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL); | 
|---|
| 4747 |  | 
|---|
| 4748 | static struct event_constraint fixed0_counter0_1_constraint = | 
|---|
| 4749 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL); | 
|---|
| 4750 |  | 
|---|
| 4751 | static struct event_constraint counters_1_7_constraint = | 
|---|
| 4752 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL); | 
|---|
| 4753 |  | 
|---|
| 4754 | static struct event_constraint * | 
|---|
| 4755 | hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4756 | struct perf_event *event) | 
|---|
| 4757 | { | 
|---|
| 4758 | struct event_constraint *c; | 
|---|
| 4759 |  | 
|---|
| 4760 | c = intel_get_event_constraints(cpuc, idx, event); | 
|---|
| 4761 |  | 
|---|
| 4762 | /* Handle special quirk on in_tx_checkpointed only in counter 2 */ | 
|---|
| 4763 | if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { | 
|---|
| 4764 | if (c->idxmsk64 & (1U << 2)) | 
|---|
| 4765 | return &counter2_constraint; | 
|---|
| 4766 | return &emptyconstraint; | 
|---|
| 4767 | } | 
|---|
| 4768 |  | 
|---|
| 4769 | return c; | 
|---|
| 4770 | } | 
|---|
| 4771 |  | 
|---|
| 4772 | static struct event_constraint * | 
|---|
| 4773 | icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4774 | struct perf_event *event) | 
|---|
| 4775 | { | 
|---|
| 4776 | /* | 
|---|
| 4777 | * Fixed counter 0 has less skid. | 
|---|
| 4778 | * Force instruction:ppp in Fixed counter 0 | 
|---|
| 4779 | */ | 
|---|
| 4780 | if ((event->attr.precise_ip == 3) && | 
|---|
| 4781 | constraint_match(c: &fixed0_constraint, ecode: event->hw.config)) | 
|---|
| 4782 | return &fixed0_constraint; | 
|---|
| 4783 |  | 
|---|
| 4784 | return hsw_get_event_constraints(cpuc, idx, event); | 
|---|
| 4785 | } | 
|---|
| 4786 |  | 
|---|
| 4787 | static struct event_constraint * | 
|---|
| 4788 | glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4789 | struct perf_event *event) | 
|---|
| 4790 | { | 
|---|
| 4791 | struct event_constraint *c; | 
|---|
| 4792 |  | 
|---|
| 4793 | c = icl_get_event_constraints(cpuc, idx, event); | 
|---|
| 4794 |  | 
|---|
| 4795 | /* | 
|---|
| 4796 | * The :ppp indicates the Precise Distribution (PDist) facility, which | 
|---|
| 4797 | * is only supported on the GP counter 0. If a :ppp event which is not | 
|---|
| 4798 | * available on the GP counter 0, error out. | 
|---|
| 4799 | * Exception: Instruction PDIR is only available on the fixed counter 0. | 
|---|
| 4800 | */ | 
|---|
| 4801 | if ((event->attr.precise_ip == 3) && | 
|---|
| 4802 | !constraint_match(c: &fixed0_constraint, ecode: event->hw.config)) { | 
|---|
| 4803 | if (c->idxmsk64 & BIT_ULL(0)) | 
|---|
| 4804 | return &counter0_constraint; | 
|---|
| 4805 |  | 
|---|
| 4806 | return &emptyconstraint; | 
|---|
| 4807 | } | 
|---|
| 4808 |  | 
|---|
| 4809 | return c; | 
|---|
| 4810 | } | 
|---|
| 4811 |  | 
|---|
| 4812 | static struct event_constraint * | 
|---|
| 4813 | glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4814 | struct perf_event *event) | 
|---|
| 4815 | { | 
|---|
| 4816 | struct event_constraint *c; | 
|---|
| 4817 |  | 
|---|
| 4818 | /* :ppp means to do reduced skid PEBS which is PMC0 only. */ | 
|---|
| 4819 | if (event->attr.precise_ip == 3) | 
|---|
| 4820 | return &counter0_constraint; | 
|---|
| 4821 |  | 
|---|
| 4822 | c = intel_get_event_constraints(cpuc, idx, event); | 
|---|
| 4823 |  | 
|---|
| 4824 | return c; | 
|---|
| 4825 | } | 
|---|
| 4826 |  | 
|---|
| 4827 | static struct event_constraint * | 
|---|
| 4828 | tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4829 | struct perf_event *event) | 
|---|
| 4830 | { | 
|---|
| 4831 | struct event_constraint *c; | 
|---|
| 4832 |  | 
|---|
| 4833 | c = intel_get_event_constraints(cpuc, idx, event); | 
|---|
| 4834 |  | 
|---|
| 4835 | /* | 
|---|
| 4836 | * :ppp means to do reduced skid PEBS, | 
|---|
| 4837 | * which is available on PMC0 and fixed counter 0. | 
|---|
| 4838 | */ | 
|---|
| 4839 | if (event->attr.precise_ip == 3) { | 
|---|
| 4840 | /* Force instruction:ppp on PMC0 and Fixed counter 0 */ | 
|---|
| 4841 | if (constraint_match(c: &fixed0_constraint, ecode: event->hw.config)) | 
|---|
| 4842 | return &fixed0_counter0_constraint; | 
|---|
| 4843 |  | 
|---|
| 4844 | return &counter0_constraint; | 
|---|
| 4845 | } | 
|---|
| 4846 |  | 
|---|
| 4847 | return c; | 
|---|
| 4848 | } | 
|---|
| 4849 |  | 
|---|
| 4850 | static bool allow_tsx_force_abort = true; | 
|---|
| 4851 |  | 
|---|
| 4852 | static struct event_constraint * | 
|---|
| 4853 | tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4854 | struct perf_event *event) | 
|---|
| 4855 | { | 
|---|
| 4856 | struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); | 
|---|
| 4857 |  | 
|---|
| 4858 | /* | 
|---|
| 4859 | * Without TFA we must not use PMC3. | 
|---|
| 4860 | */ | 
|---|
| 4861 | if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { | 
|---|
| 4862 | c = dyn_constraint(cpuc, c, idx); | 
|---|
| 4863 | c->idxmsk64 &= ~(1ULL << 3); | 
|---|
| 4864 | c->weight--; | 
|---|
| 4865 | } | 
|---|
| 4866 |  | 
|---|
| 4867 | return c; | 
|---|
| 4868 | } | 
|---|
| 4869 |  | 
|---|
| 4870 | static struct event_constraint * | 
|---|
| 4871 | adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4872 | struct perf_event *event) | 
|---|
| 4873 | { | 
|---|
| 4874 | struct x86_hybrid_pmu *pmu = hybrid_pmu(pmu: event->pmu); | 
|---|
| 4875 |  | 
|---|
| 4876 | if (pmu->pmu_type == hybrid_big) | 
|---|
| 4877 | return glc_get_event_constraints(cpuc, idx, event); | 
|---|
| 4878 | else if (pmu->pmu_type == hybrid_small) | 
|---|
| 4879 | return tnt_get_event_constraints(cpuc, idx, event); | 
|---|
| 4880 |  | 
|---|
| 4881 | WARN_ON(1); | 
|---|
| 4882 | return &emptyconstraint; | 
|---|
| 4883 | } | 
|---|
| 4884 |  | 
|---|
| 4885 | static struct event_constraint * | 
|---|
| 4886 | cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4887 | struct perf_event *event) | 
|---|
| 4888 | { | 
|---|
| 4889 | struct event_constraint *c; | 
|---|
| 4890 |  | 
|---|
| 4891 | c = intel_get_event_constraints(cpuc, idx, event); | 
|---|
| 4892 |  | 
|---|
| 4893 | /* | 
|---|
| 4894 | * The :ppp indicates the Precise Distribution (PDist) facility, which | 
|---|
| 4895 | * is only supported on the GP counter 0 & 1 and Fixed counter 0. | 
|---|
| 4896 | * If a :ppp event which is not available on the above eligible counters, | 
|---|
| 4897 | * error out. | 
|---|
| 4898 | */ | 
|---|
| 4899 | if (event->attr.precise_ip == 3) { | 
|---|
| 4900 | /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ | 
|---|
| 4901 | if (constraint_match(c: &fixed0_constraint, ecode: event->hw.config)) { | 
|---|
| 4902 | /* The fixed counter 0 doesn't support LBR event logging. */ | 
|---|
| 4903 | if (branch_sample_counters(event)) | 
|---|
| 4904 | return &counter0_1_constraint; | 
|---|
| 4905 | else | 
|---|
| 4906 | return &fixed0_counter0_1_constraint; | 
|---|
| 4907 | } | 
|---|
| 4908 |  | 
|---|
| 4909 | switch (c->idxmsk64 & 0x3ull) { | 
|---|
| 4910 | case 0x1: | 
|---|
| 4911 | return &counter0_constraint; | 
|---|
| 4912 | case 0x2: | 
|---|
| 4913 | return &counter1_constraint; | 
|---|
| 4914 | case 0x3: | 
|---|
| 4915 | return &counter0_1_constraint; | 
|---|
| 4916 | } | 
|---|
| 4917 | return &emptyconstraint; | 
|---|
| 4918 | } | 
|---|
| 4919 |  | 
|---|
| 4920 | return c; | 
|---|
| 4921 | } | 
|---|
| 4922 |  | 
|---|
| 4923 | static struct event_constraint * | 
|---|
| 4924 | rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4925 | struct perf_event *event) | 
|---|
| 4926 | { | 
|---|
| 4927 | struct event_constraint *c; | 
|---|
| 4928 |  | 
|---|
| 4929 | c = glc_get_event_constraints(cpuc, idx, event); | 
|---|
| 4930 |  | 
|---|
| 4931 | /* The Retire Latency is not supported by the fixed counter 0. */ | 
|---|
| 4932 | if (event->attr.precise_ip && | 
|---|
| 4933 | (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) && | 
|---|
| 4934 | constraint_match(c: &fixed0_constraint, ecode: event->hw.config)) { | 
|---|
| 4935 | /* | 
|---|
| 4936 | * The Instruction PDIR is only available | 
|---|
| 4937 | * on the fixed counter 0. Error out for this case. | 
|---|
| 4938 | */ | 
|---|
| 4939 | if (event->attr.precise_ip == 3) | 
|---|
| 4940 | return &emptyconstraint; | 
|---|
| 4941 | return &counters_1_7_constraint; | 
|---|
| 4942 | } | 
|---|
| 4943 |  | 
|---|
| 4944 | return c; | 
|---|
| 4945 | } | 
|---|
| 4946 |  | 
|---|
| 4947 | static struct event_constraint * | 
|---|
| 4948 | mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4949 | struct perf_event *event) | 
|---|
| 4950 | { | 
|---|
| 4951 | struct x86_hybrid_pmu *pmu = hybrid_pmu(pmu: event->pmu); | 
|---|
| 4952 |  | 
|---|
| 4953 | if (pmu->pmu_type == hybrid_big) | 
|---|
| 4954 | return rwc_get_event_constraints(cpuc, idx, event); | 
|---|
| 4955 | if (pmu->pmu_type == hybrid_small) | 
|---|
| 4956 | return cmt_get_event_constraints(cpuc, idx, event); | 
|---|
| 4957 |  | 
|---|
| 4958 | WARN_ON(1); | 
|---|
| 4959 | return &emptyconstraint; | 
|---|
| 4960 | } | 
|---|
| 4961 |  | 
|---|
| 4962 | static int adl_hw_config(struct perf_event *event) | 
|---|
| 4963 | { | 
|---|
| 4964 | struct x86_hybrid_pmu *pmu = hybrid_pmu(pmu: event->pmu); | 
|---|
| 4965 |  | 
|---|
| 4966 | if (pmu->pmu_type == hybrid_big) | 
|---|
| 4967 | return hsw_hw_config(event); | 
|---|
| 4968 | else if (pmu->pmu_type == hybrid_small) | 
|---|
| 4969 | return intel_pmu_hw_config(event); | 
|---|
| 4970 |  | 
|---|
| 4971 | WARN_ON(1); | 
|---|
| 4972 | return -EOPNOTSUPP; | 
|---|
| 4973 | } | 
|---|
| 4974 |  | 
|---|
| 4975 | static enum intel_cpu_type adl_get_hybrid_cpu_type(void) | 
|---|
| 4976 | { | 
|---|
| 4977 | return INTEL_CPU_TYPE_CORE; | 
|---|
| 4978 | } | 
|---|
| 4979 |  | 
|---|
| 4980 | static inline bool erratum_hsw11(struct perf_event *event) | 
|---|
| 4981 | { | 
|---|
| 4982 | return (event->hw.config & INTEL_ARCH_EVENT_MASK) == | 
|---|
| 4983 | X86_CONFIG(.event=0xc0, .umask=0x01); | 
|---|
| 4984 | } | 
|---|
| 4985 |  | 
|---|
| 4986 | static struct event_constraint * | 
|---|
| 4987 | arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 
|---|
| 4988 | struct perf_event *event) | 
|---|
| 4989 | { | 
|---|
| 4990 | struct x86_hybrid_pmu *pmu = hybrid_pmu(pmu: event->pmu); | 
|---|
| 4991 |  | 
|---|
| 4992 | if (pmu->pmu_type == hybrid_tiny) | 
|---|
| 4993 | return cmt_get_event_constraints(cpuc, idx, event); | 
|---|
| 4994 |  | 
|---|
| 4995 | return mtl_get_event_constraints(cpuc, idx, event); | 
|---|
| 4996 | } | 
|---|
| 4997 |  | 
|---|
| 4998 | static int arl_h_hw_config(struct perf_event *event) | 
|---|
| 4999 | { | 
|---|
| 5000 | struct x86_hybrid_pmu *pmu = hybrid_pmu(pmu: event->pmu); | 
|---|
| 5001 |  | 
|---|
| 5002 | if (pmu->pmu_type == hybrid_tiny) | 
|---|
| 5003 | return intel_pmu_hw_config(event); | 
|---|
| 5004 |  | 
|---|
| 5005 | return adl_hw_config(event); | 
|---|
| 5006 | } | 
|---|
| 5007 |  | 
|---|
| 5008 | /* | 
|---|
| 5009 | * The HSW11 requires a period larger than 100 which is the same as the BDM11. | 
|---|
| 5010 | * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL. | 
|---|
| 5011 | * | 
|---|
| 5012 | * The message 'interrupt took too long' can be observed on any counter which | 
|---|
| 5013 | * was armed with a period < 32 and two events expired in the same NMI. | 
|---|
| 5014 | * A minimum period of 32 is enforced for the rest of the events. | 
|---|
| 5015 | */ | 
|---|
| 5016 | static void hsw_limit_period(struct perf_event *event, s64 *left) | 
|---|
| 5017 | { | 
|---|
| 5018 | *left = max(*left, erratum_hsw11(event) ? 128 : 32); | 
|---|
| 5019 | } | 
|---|
| 5020 |  | 
|---|
| 5021 | /* | 
|---|
| 5022 | * Broadwell: | 
|---|
| 5023 | * | 
|---|
| 5024 | * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared | 
|---|
| 5025 | * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine | 
|---|
| 5026 | * the two to enforce a minimum period of 128 (the smallest value that has bits | 
|---|
| 5027 | * 0-5 cleared and >= 100). | 
|---|
| 5028 | * | 
|---|
| 5029 | * Because of how the code in x86_perf_event_set_period() works, the truncation | 
|---|
| 5030 | * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period | 
|---|
| 5031 | * to make up for the 'lost' events due to carrying the 'error' in period_left. | 
|---|
| 5032 | * | 
|---|
| 5033 | * Therefore the effective (average) period matches the requested period, | 
|---|
| 5034 | * despite coarser hardware granularity. | 
|---|
| 5035 | */ | 
|---|
| 5036 | static void bdw_limit_period(struct perf_event *event, s64 *left) | 
|---|
| 5037 | { | 
|---|
| 5038 | if (erratum_hsw11(event)) { | 
|---|
| 5039 | if (*left < 128) | 
|---|
| 5040 | *left = 128; | 
|---|
| 5041 | *left &= ~0x3fULL; | 
|---|
| 5042 | } | 
|---|
| 5043 | } | 
|---|
| 5044 |  | 
|---|
| 5045 | static void nhm_limit_period(struct perf_event *event, s64 *left) | 
|---|
| 5046 | { | 
|---|
| 5047 | *left = max(*left, 32LL); | 
|---|
| 5048 | } | 
|---|
| 5049 |  | 
|---|
| 5050 | static void glc_limit_period(struct perf_event *event, s64 *left) | 
|---|
| 5051 | { | 
|---|
| 5052 | if (event->attr.precise_ip == 3) | 
|---|
| 5053 | *left = max(*left, 128LL); | 
|---|
| 5054 | } | 
|---|
| 5055 |  | 
|---|
| 5056 | PMU_FORMAT_ATTR(event, "config:0-7"); | 
|---|
| 5057 | PMU_FORMAT_ATTR(umask, "config:8-15"); | 
|---|
| 5058 | PMU_FORMAT_ATTR(edge, "config:18"); | 
|---|
| 5059 | PMU_FORMAT_ATTR(pc, "config:19"); | 
|---|
| 5060 | PMU_FORMAT_ATTR(any, "config:21"); /* v3 + */ | 
|---|
| 5061 | PMU_FORMAT_ATTR(inv, "config:23"); | 
|---|
| 5062 | PMU_FORMAT_ATTR(cmask, "config:24-31"); | 
|---|
| 5063 | PMU_FORMAT_ATTR(in_tx, "config:32"); | 
|---|
| 5064 | PMU_FORMAT_ATTR(in_tx_cp, "config:33"); | 
|---|
| 5065 | PMU_FORMAT_ATTR(eq, "config:36"); /* v6 + */ | 
|---|
| 5066 |  | 
|---|
| 5067 | PMU_FORMAT_ATTR(metrics_clear, "config1:0"); /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */ | 
|---|
| 5068 |  | 
|---|
| 5069 | static ssize_t umask2_show(struct device *dev, | 
|---|
| 5070 | struct device_attribute *attr, | 
|---|
| 5071 | char *page) | 
|---|
| 5072 | { | 
|---|
| 5073 | u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2; | 
|---|
| 5074 |  | 
|---|
| 5075 | if (mask == ARCH_PERFMON_EVENTSEL_UMASK2) | 
|---|
| 5076 | return sprintf(buf: page, fmt: "config:8-15,40-47\n"); | 
|---|
| 5077 |  | 
|---|
| 5078 | /* Roll back to the old format if umask2 is not supported. */ | 
|---|
| 5079 | return sprintf(buf: page, fmt: "config:8-15\n"); | 
|---|
| 5080 | } | 
|---|
| 5081 |  | 
|---|
| 5082 | static struct device_attribute format_attr_umask2  = | 
|---|
| 5083 | __ATTR(umask, 0444, umask2_show, NULL); | 
|---|
| 5084 |  | 
|---|
| 5085 | static struct attribute *format_evtsel_ext_attrs[] = { | 
|---|
| 5086 | &format_attr_umask2.attr, | 
|---|
| 5087 | &format_attr_eq.attr, | 
|---|
| 5088 | &format_attr_metrics_clear.attr, | 
|---|
| 5089 | NULL | 
|---|
| 5090 | }; | 
|---|
| 5091 |  | 
|---|
| 5092 | static umode_t | 
|---|
| 5093 | evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 5094 | { | 
|---|
| 5095 | struct device *dev = kobj_to_dev(kobj); | 
|---|
| 5096 | u64 mask; | 
|---|
| 5097 |  | 
|---|
| 5098 | /* | 
|---|
| 5099 | * The umask and umask2 have different formats but share the | 
|---|
| 5100 | * same attr name. In update mode, the previous value of the | 
|---|
| 5101 | * umask is unconditionally removed before is_visible. If | 
|---|
| 5102 | * umask2 format is not enumerated, it's impossible to roll | 
|---|
| 5103 | * back to the old format. | 
|---|
| 5104 | * Does the check in umask2_show rather than is_visible. | 
|---|
| 5105 | */ | 
|---|
| 5106 | if (i == 0) | 
|---|
| 5107 | return attr->mode; | 
|---|
| 5108 |  | 
|---|
| 5109 | mask = hybrid(dev_get_drvdata(dev), config_mask); | 
|---|
| 5110 | if (i == 1) | 
|---|
| 5111 | return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0; | 
|---|
| 5112 |  | 
|---|
| 5113 | /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */ | 
|---|
| 5114 | if (i == 2) { | 
|---|
| 5115 | union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap); | 
|---|
| 5116 |  | 
|---|
| 5117 | return intel_cap.rdpmc_metrics_clear ? attr->mode : 0; | 
|---|
| 5118 | } | 
|---|
| 5119 |  | 
|---|
| 5120 | return 0; | 
|---|
| 5121 | } | 
|---|
| 5122 |  | 
|---|
| 5123 | static struct attribute *intel_arch_formats_attr[] = { | 
|---|
| 5124 | &format_attr_event.attr, | 
|---|
| 5125 | &format_attr_umask.attr, | 
|---|
| 5126 | &format_attr_edge.attr, | 
|---|
| 5127 | &format_attr_pc.attr, | 
|---|
| 5128 | &format_attr_inv.attr, | 
|---|
| 5129 | &format_attr_cmask.attr, | 
|---|
| 5130 | NULL, | 
|---|
| 5131 | }; | 
|---|
| 5132 |  | 
|---|
| 5133 | ssize_t intel_event_sysfs_show(char *page, u64 config) | 
|---|
| 5134 | { | 
|---|
| 5135 | u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); | 
|---|
| 5136 |  | 
|---|
| 5137 | return x86_event_sysfs_show(page, config, event); | 
|---|
| 5138 | } | 
|---|
| 5139 |  | 
|---|
| 5140 | static struct intel_shared_regs *allocate_shared_regs(int cpu) | 
|---|
| 5141 | { | 
|---|
| 5142 | struct intel_shared_regs *regs; | 
|---|
| 5143 | int i; | 
|---|
| 5144 |  | 
|---|
| 5145 | regs = kzalloc_node(sizeof(struct intel_shared_regs), | 
|---|
| 5146 | GFP_KERNEL, cpu_to_node(cpu)); | 
|---|
| 5147 | if (regs) { | 
|---|
| 5148 | /* | 
|---|
| 5149 | * initialize the locks to keep lockdep happy | 
|---|
| 5150 | */ | 
|---|
| 5151 | for (i = 0; i < EXTRA_REG_MAX; i++) | 
|---|
| 5152 | raw_spin_lock_init(®s->regs[i].lock); | 
|---|
| 5153 |  | 
|---|
| 5154 | regs->core_id = -1; | 
|---|
| 5155 | } | 
|---|
| 5156 | return regs; | 
|---|
| 5157 | } | 
|---|
| 5158 |  | 
|---|
| 5159 | static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) | 
|---|
| 5160 | { | 
|---|
| 5161 | struct intel_excl_cntrs *c; | 
|---|
| 5162 |  | 
|---|
| 5163 | c = kzalloc_node(sizeof(struct intel_excl_cntrs), | 
|---|
| 5164 | GFP_KERNEL, cpu_to_node(cpu)); | 
|---|
| 5165 | if (c) { | 
|---|
| 5166 | raw_spin_lock_init(&c->lock); | 
|---|
| 5167 | c->core_id = -1; | 
|---|
| 5168 | } | 
|---|
| 5169 | return c; | 
|---|
| 5170 | } | 
|---|
| 5171 |  | 
|---|
| 5172 |  | 
|---|
| 5173 | int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) | 
|---|
| 5174 | { | 
|---|
| 5175 | cpuc->pebs_record_size = x86_pmu.pebs_record_size; | 
|---|
| 5176 |  | 
|---|
| 5177 | if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { | 
|---|
| 5178 | cpuc->shared_regs = allocate_shared_regs(cpu); | 
|---|
| 5179 | if (!cpuc->shared_regs) | 
|---|
| 5180 | goto err; | 
|---|
| 5181 | } | 
|---|
| 5182 |  | 
|---|
| 5183 | if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) { | 
|---|
| 5184 | size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); | 
|---|
| 5185 |  | 
|---|
| 5186 | cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); | 
|---|
| 5187 | if (!cpuc->constraint_list) | 
|---|
| 5188 | goto err_shared_regs; | 
|---|
| 5189 | } | 
|---|
| 5190 |  | 
|---|
| 5191 | if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { | 
|---|
| 5192 | cpuc->excl_cntrs = allocate_excl_cntrs(cpu); | 
|---|
| 5193 | if (!cpuc->excl_cntrs) | 
|---|
| 5194 | goto err_constraint_list; | 
|---|
| 5195 |  | 
|---|
| 5196 | cpuc->excl_thread_id = 0; | 
|---|
| 5197 | } | 
|---|
| 5198 |  | 
|---|
| 5199 | return 0; | 
|---|
| 5200 |  | 
|---|
| 5201 | err_constraint_list: | 
|---|
| 5202 | kfree(objp: cpuc->constraint_list); | 
|---|
| 5203 | cpuc->constraint_list = NULL; | 
|---|
| 5204 |  | 
|---|
| 5205 | err_shared_regs: | 
|---|
| 5206 | kfree(objp: cpuc->shared_regs); | 
|---|
| 5207 | cpuc->shared_regs = NULL; | 
|---|
| 5208 |  | 
|---|
| 5209 | err: | 
|---|
| 5210 | return -ENOMEM; | 
|---|
| 5211 | } | 
|---|
| 5212 |  | 
|---|
| 5213 | static int intel_pmu_cpu_prepare(int cpu) | 
|---|
| 5214 | { | 
|---|
| 5215 | return intel_cpuc_prepare(cpuc: &per_cpu(cpu_hw_events, cpu), cpu); | 
|---|
| 5216 | } | 
|---|
| 5217 |  | 
|---|
| 5218 | static void flip_smm_bit(void *data) | 
|---|
| 5219 | { | 
|---|
| 5220 | unsigned long set = *(unsigned long *)data; | 
|---|
| 5221 |  | 
|---|
| 5222 | if (set > 0) { | 
|---|
| 5223 | msr_set_bit(MSR_IA32_DEBUGCTLMSR, | 
|---|
| 5224 | DEBUGCTLMSR_FREEZE_IN_SMM_BIT); | 
|---|
| 5225 | } else { | 
|---|
| 5226 | msr_clear_bit(MSR_IA32_DEBUGCTLMSR, | 
|---|
| 5227 | DEBUGCTLMSR_FREEZE_IN_SMM_BIT); | 
|---|
| 5228 | } | 
|---|
| 5229 | } | 
|---|
| 5230 |  | 
|---|
| 5231 | static void intel_pmu_check_counters_mask(u64 *cntr_mask, | 
|---|
| 5232 | u64 *fixed_cntr_mask, | 
|---|
| 5233 | u64 *intel_ctrl) | 
|---|
| 5234 | { | 
|---|
| 5235 | unsigned int bit; | 
|---|
| 5236 |  | 
|---|
| 5237 | bit = fls64(x: *cntr_mask); | 
|---|
| 5238 | if (bit > INTEL_PMC_MAX_GENERIC) { | 
|---|
| 5239 | WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", | 
|---|
| 5240 | bit, INTEL_PMC_MAX_GENERIC); | 
|---|
| 5241 | *cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); | 
|---|
| 5242 | } | 
|---|
| 5243 | *intel_ctrl = *cntr_mask; | 
|---|
| 5244 |  | 
|---|
| 5245 | bit = fls64(x: *fixed_cntr_mask); | 
|---|
| 5246 | if (bit > INTEL_PMC_MAX_FIXED) { | 
|---|
| 5247 | WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", | 
|---|
| 5248 | bit, INTEL_PMC_MAX_FIXED); | 
|---|
| 5249 | *fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0); | 
|---|
| 5250 | } | 
|---|
| 5251 |  | 
|---|
| 5252 | *intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED; | 
|---|
| 5253 | } | 
|---|
| 5254 |  | 
|---|
| 5255 | static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, | 
|---|
| 5256 | u64 cntr_mask, | 
|---|
| 5257 | u64 fixed_cntr_mask, | 
|---|
| 5258 | u64 intel_ctrl); | 
|---|
| 5259 |  | 
|---|
| 5260 | static void intel_pmu_check_extra_regs(struct extra_reg *); | 
|---|
| 5261 |  | 
|---|
| 5262 | static inline bool intel_pmu_broken_perf_cap(void) | 
|---|
| 5263 | { | 
|---|
| 5264 | /* The Perf Metric (Bit 15) is always cleared */ | 
|---|
| 5265 | if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE || | 
|---|
| 5266 | boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L) | 
|---|
| 5267 | return true; | 
|---|
| 5268 |  | 
|---|
| 5269 | return false; | 
|---|
| 5270 | } | 
|---|
| 5271 |  | 
|---|
| 5272 | static void update_pmu_cap(struct pmu *pmu) | 
|---|
| 5273 | { | 
|---|
| 5274 | unsigned int cntr, fixed_cntr, ecx, edx; | 
|---|
| 5275 | union cpuid35_eax eax; | 
|---|
| 5276 | union cpuid35_ebx ebx; | 
|---|
| 5277 |  | 
|---|
| 5278 | cpuid(ARCH_PERFMON_EXT_LEAF, eax: &eax.full, ebx: &ebx.full, ecx: &ecx, edx: &edx); | 
|---|
| 5279 |  | 
|---|
| 5280 | if (ebx.split.umask2) | 
|---|
| 5281 | hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2; | 
|---|
| 5282 | if (ebx.split.eq) | 
|---|
| 5283 | hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ; | 
|---|
| 5284 |  | 
|---|
| 5285 | if (eax.split.cntr_subleaf) { | 
|---|
| 5286 | cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, | 
|---|
| 5287 | eax: &cntr, ebx: &fixed_cntr, ecx: &ecx, edx: &edx); | 
|---|
| 5288 | hybrid(pmu, cntr_mask64) = cntr; | 
|---|
| 5289 | hybrid(pmu, fixed_cntr_mask64) = fixed_cntr; | 
|---|
| 5290 | } | 
|---|
| 5291 |  | 
|---|
| 5292 | if (eax.split.acr_subleaf) { | 
|---|
| 5293 | cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF, | 
|---|
| 5294 | eax: &cntr, ebx: &fixed_cntr, ecx: &ecx, edx: &edx); | 
|---|
| 5295 | /* The mask of the counters which can be reloaded */ | 
|---|
| 5296 | hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED); | 
|---|
| 5297 |  | 
|---|
| 5298 | /* The mask of the counters which can cause a reload of reloadable counters */ | 
|---|
| 5299 | hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED); | 
|---|
| 5300 | } | 
|---|
| 5301 |  | 
|---|
| 5302 | if (!intel_pmu_broken_perf_cap()) { | 
|---|
| 5303 | /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ | 
|---|
| 5304 | rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities); | 
|---|
| 5305 | } | 
|---|
| 5306 | } | 
|---|
| 5307 |  | 
|---|
| 5308 | static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) | 
|---|
| 5309 | { | 
|---|
| 5310 | intel_pmu_check_counters_mask(cntr_mask: &pmu->cntr_mask64, fixed_cntr_mask: &pmu->fixed_cntr_mask64, | 
|---|
| 5311 | intel_ctrl: &pmu->intel_ctrl); | 
|---|
| 5312 | pmu->pebs_events_mask = intel_pmu_pebs_mask(cntr_mask: pmu->cntr_mask64); | 
|---|
| 5313 | pmu->unconstrained = (struct event_constraint) | 
|---|
| 5314 | __EVENT_CONSTRAINT(0, pmu->cntr_mask64, | 
|---|
| 5315 | 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); | 
|---|
| 5316 |  | 
|---|
| 5317 | if (pmu->intel_cap.perf_metrics) | 
|---|
| 5318 | pmu->intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS; | 
|---|
| 5319 | else | 
|---|
| 5320 | pmu->intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS; | 
|---|
| 5321 |  | 
|---|
| 5322 | intel_pmu_check_event_constraints(event_constraints: pmu->event_constraints, | 
|---|
| 5323 | cntr_mask: pmu->cntr_mask64, | 
|---|
| 5324 | fixed_cntr_mask: pmu->fixed_cntr_mask64, | 
|---|
| 5325 | intel_ctrl: pmu->intel_ctrl); | 
|---|
| 5326 |  | 
|---|
| 5327 | intel_pmu_check_extra_regs(extra_regs: pmu->extra_regs); | 
|---|
| 5328 | } | 
|---|
| 5329 |  | 
|---|
| 5330 | static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) | 
|---|
| 5331 | { | 
|---|
| 5332 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | 
|---|
| 5333 | enum intel_cpu_type cpu_type = c->topo.intel_type; | 
|---|
| 5334 | int i; | 
|---|
| 5335 |  | 
|---|
| 5336 | /* | 
|---|
| 5337 | * This is running on a CPU model that is known to have hybrid | 
|---|
| 5338 | * configurations. But the CPU told us it is not hybrid, shame | 
|---|
| 5339 | * on it. There should be a fixup function provided for these | 
|---|
| 5340 | * troublesome CPUs (->get_hybrid_cpu_type). | 
|---|
| 5341 | */ | 
|---|
| 5342 | if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) { | 
|---|
| 5343 | if (x86_pmu.get_hybrid_cpu_type) | 
|---|
| 5344 | cpu_type = x86_pmu.get_hybrid_cpu_type(); | 
|---|
| 5345 | else | 
|---|
| 5346 | return NULL; | 
|---|
| 5347 | } | 
|---|
| 5348 |  | 
|---|
| 5349 | /* | 
|---|
| 5350 | * This essentially just maps between the 'hybrid_cpu_type' | 
|---|
| 5351 | * and 'hybrid_pmu_type' enums except for ARL-H processor | 
|---|
| 5352 | * which needs to compare atom uarch native id since ARL-H | 
|---|
| 5353 | * contains two different atom uarchs. | 
|---|
| 5354 | */ | 
|---|
| 5355 | for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { | 
|---|
| 5356 | enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; | 
|---|
| 5357 | u32 native_id; | 
|---|
| 5358 |  | 
|---|
| 5359 | if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big) | 
|---|
| 5360 | return &x86_pmu.hybrid_pmu[i]; | 
|---|
| 5361 | if (cpu_type == INTEL_CPU_TYPE_ATOM) { | 
|---|
| 5362 | if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small) | 
|---|
| 5363 | return &x86_pmu.hybrid_pmu[i]; | 
|---|
| 5364 |  | 
|---|
| 5365 | native_id = c->topo.intel_native_model_id; | 
|---|
| 5366 | if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small) | 
|---|
| 5367 | return &x86_pmu.hybrid_pmu[i]; | 
|---|
| 5368 | if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny) | 
|---|
| 5369 | return &x86_pmu.hybrid_pmu[i]; | 
|---|
| 5370 | } | 
|---|
| 5371 | } | 
|---|
| 5372 |  | 
|---|
| 5373 | return NULL; | 
|---|
| 5374 | } | 
|---|
| 5375 |  | 
|---|
| 5376 | static bool init_hybrid_pmu(int cpu) | 
|---|
| 5377 | { | 
|---|
| 5378 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 
|---|
| 5379 | struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu(); | 
|---|
| 5380 |  | 
|---|
| 5381 | if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { | 
|---|
| 5382 | cpuc->pmu = NULL; | 
|---|
| 5383 | return false; | 
|---|
| 5384 | } | 
|---|
| 5385 |  | 
|---|
| 5386 | /* Only check and dump the PMU information for the first CPU */ | 
|---|
| 5387 | if (!cpumask_empty(srcp: &pmu->supported_cpus)) | 
|---|
| 5388 | goto end; | 
|---|
| 5389 |  | 
|---|
| 5390 | if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) | 
|---|
| 5391 | update_pmu_cap(pmu: &pmu->pmu); | 
|---|
| 5392 |  | 
|---|
| 5393 | intel_pmu_check_hybrid_pmus(pmu); | 
|---|
| 5394 |  | 
|---|
| 5395 | if (!check_hw_exists(pmu: &pmu->pmu, cntr_mask: pmu->cntr_mask, fixed_cntr_mask: pmu->fixed_cntr_mask)) | 
|---|
| 5396 | return false; | 
|---|
| 5397 |  | 
|---|
| 5398 | pr_info( "%s PMU driver: ", pmu->name); | 
|---|
| 5399 |  | 
|---|
| 5400 | pr_cont( "\n"); | 
|---|
| 5401 |  | 
|---|
| 5402 | x86_pmu_show_pmu_cap(pmu: &pmu->pmu); | 
|---|
| 5403 |  | 
|---|
| 5404 | end: | 
|---|
| 5405 | cpumask_set_cpu(cpu, dstp: &pmu->supported_cpus); | 
|---|
| 5406 | cpuc->pmu = &pmu->pmu; | 
|---|
| 5407 |  | 
|---|
| 5408 | return true; | 
|---|
| 5409 | } | 
|---|
| 5410 |  | 
|---|
| 5411 | static void intel_pmu_cpu_starting(int cpu) | 
|---|
| 5412 | { | 
|---|
| 5413 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 
|---|
| 5414 | int core_id = topology_core_id(cpu); | 
|---|
| 5415 | int i; | 
|---|
| 5416 |  | 
|---|
| 5417 | if (is_hybrid() && !init_hybrid_pmu(cpu)) | 
|---|
| 5418 | return; | 
|---|
| 5419 |  | 
|---|
| 5420 | init_debug_store_on_cpu(cpu); | 
|---|
| 5421 | /* | 
|---|
| 5422 | * Deal with CPUs that don't clear their LBRs on power-up, and that may | 
|---|
| 5423 | * even boot with LBRs enabled. | 
|---|
| 5424 | */ | 
|---|
| 5425 | if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr) | 
|---|
| 5426 | msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT); | 
|---|
| 5427 | intel_pmu_lbr_reset(); | 
|---|
| 5428 |  | 
|---|
| 5429 | cpuc->lbr_sel = NULL; | 
|---|
| 5430 |  | 
|---|
| 5431 | if (x86_pmu.flags & PMU_FL_TFA) { | 
|---|
| 5432 | WARN_ON_ONCE(cpuc->tfa_shadow); | 
|---|
| 5433 | cpuc->tfa_shadow = ~0ULL; | 
|---|
| 5434 | intel_set_tfa(cpuc, on: false); | 
|---|
| 5435 | } | 
|---|
| 5436 |  | 
|---|
| 5437 | if (x86_pmu.version > 1) | 
|---|
| 5438 | flip_smm_bit(data: &x86_pmu.attr_freeze_on_smi); | 
|---|
| 5439 |  | 
|---|
| 5440 | /* | 
|---|
| 5441 | * Disable perf metrics if any added CPU doesn't support it. | 
|---|
| 5442 | * | 
|---|
| 5443 | * Turn off the check for a hybrid architecture, because the | 
|---|
| 5444 | * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate | 
|---|
| 5445 | * the architecture features. The perf metrics is a model-specific | 
|---|
| 5446 | * feature for now. The corresponding bit should always be 0 on | 
|---|
| 5447 | * a hybrid platform, e.g., Alder Lake. | 
|---|
| 5448 | */ | 
|---|
| 5449 | if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { | 
|---|
| 5450 | union perf_capabilities perf_cap; | 
|---|
| 5451 |  | 
|---|
| 5452 | rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); | 
|---|
| 5453 | if (!perf_cap.perf_metrics) { | 
|---|
| 5454 | x86_pmu.intel_cap.perf_metrics = 0; | 
|---|
| 5455 | x86_pmu.intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS; | 
|---|
| 5456 | } | 
|---|
| 5457 | } | 
|---|
| 5458 |  | 
|---|
| 5459 | if (!cpuc->shared_regs) | 
|---|
| 5460 | return; | 
|---|
| 5461 |  | 
|---|
| 5462 | if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { | 
|---|
| 5463 | for_each_cpu(i, topology_sibling_cpumask(cpu)) { | 
|---|
| 5464 | struct intel_shared_regs *pc; | 
|---|
| 5465 |  | 
|---|
| 5466 | pc = per_cpu(cpu_hw_events, i).shared_regs; | 
|---|
| 5467 | if (pc && pc->core_id == core_id) { | 
|---|
| 5468 | cpuc->kfree_on_online[0] = cpuc->shared_regs; | 
|---|
| 5469 | cpuc->shared_regs = pc; | 
|---|
| 5470 | break; | 
|---|
| 5471 | } | 
|---|
| 5472 | } | 
|---|
| 5473 | cpuc->shared_regs->core_id = core_id; | 
|---|
| 5474 | cpuc->shared_regs->refcnt++; | 
|---|
| 5475 | } | 
|---|
| 5476 |  | 
|---|
| 5477 | if (x86_pmu.lbr_sel_map) | 
|---|
| 5478 | cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; | 
|---|
| 5479 |  | 
|---|
| 5480 | if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { | 
|---|
| 5481 | for_each_cpu(i, topology_sibling_cpumask(cpu)) { | 
|---|
| 5482 | struct cpu_hw_events *sibling; | 
|---|
| 5483 | struct intel_excl_cntrs *c; | 
|---|
| 5484 |  | 
|---|
| 5485 | sibling = &per_cpu(cpu_hw_events, i); | 
|---|
| 5486 | c = sibling->excl_cntrs; | 
|---|
| 5487 | if (c && c->core_id == core_id) { | 
|---|
| 5488 | cpuc->kfree_on_online[1] = cpuc->excl_cntrs; | 
|---|
| 5489 | cpuc->excl_cntrs = c; | 
|---|
| 5490 | if (!sibling->excl_thread_id) | 
|---|
| 5491 | cpuc->excl_thread_id = 1; | 
|---|
| 5492 | break; | 
|---|
| 5493 | } | 
|---|
| 5494 | } | 
|---|
| 5495 | cpuc->excl_cntrs->core_id = core_id; | 
|---|
| 5496 | cpuc->excl_cntrs->refcnt++; | 
|---|
| 5497 | } | 
|---|
| 5498 | } | 
|---|
| 5499 |  | 
|---|
| 5500 | static void free_excl_cntrs(struct cpu_hw_events *cpuc) | 
|---|
| 5501 | { | 
|---|
| 5502 | struct intel_excl_cntrs *c; | 
|---|
| 5503 |  | 
|---|
| 5504 | c = cpuc->excl_cntrs; | 
|---|
| 5505 | if (c) { | 
|---|
| 5506 | if (c->core_id == -1 || --c->refcnt == 0) | 
|---|
| 5507 | kfree(objp: c); | 
|---|
| 5508 | cpuc->excl_cntrs = NULL; | 
|---|
| 5509 | } | 
|---|
| 5510 |  | 
|---|
| 5511 | kfree(objp: cpuc->constraint_list); | 
|---|
| 5512 | cpuc->constraint_list = NULL; | 
|---|
| 5513 | } | 
|---|
| 5514 |  | 
|---|
| 5515 | static void intel_pmu_cpu_dying(int cpu) | 
|---|
| 5516 | { | 
|---|
| 5517 | fini_debug_store_on_cpu(cpu); | 
|---|
| 5518 | } | 
|---|
| 5519 |  | 
|---|
| 5520 | void intel_cpuc_finish(struct cpu_hw_events *cpuc) | 
|---|
| 5521 | { | 
|---|
| 5522 | struct intel_shared_regs *pc; | 
|---|
| 5523 |  | 
|---|
| 5524 | pc = cpuc->shared_regs; | 
|---|
| 5525 | if (pc) { | 
|---|
| 5526 | if (pc->core_id == -1 || --pc->refcnt == 0) | 
|---|
| 5527 | kfree(objp: pc); | 
|---|
| 5528 | cpuc->shared_regs = NULL; | 
|---|
| 5529 | } | 
|---|
| 5530 |  | 
|---|
| 5531 | free_excl_cntrs(cpuc); | 
|---|
| 5532 | } | 
|---|
| 5533 |  | 
|---|
| 5534 | static void intel_pmu_cpu_dead(int cpu) | 
|---|
| 5535 | { | 
|---|
| 5536 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 
|---|
| 5537 |  | 
|---|
| 5538 | intel_cpuc_finish(cpuc); | 
|---|
| 5539 |  | 
|---|
| 5540 | if (is_hybrid() && cpuc->pmu) | 
|---|
| 5541 | cpumask_clear_cpu(cpu, dstp: &hybrid_pmu(pmu: cpuc->pmu)->supported_cpus); | 
|---|
| 5542 | } | 
|---|
| 5543 |  | 
|---|
| 5544 | static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, | 
|---|
| 5545 | struct task_struct *task, bool sched_in) | 
|---|
| 5546 | { | 
|---|
| 5547 | intel_pmu_pebs_sched_task(pmu_ctx, sched_in); | 
|---|
| 5548 | intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in); | 
|---|
| 5549 | } | 
|---|
| 5550 |  | 
|---|
| 5551 | static int intel_pmu_check_period(struct perf_event *event, u64 value) | 
|---|
| 5552 | { | 
|---|
| 5553 | return intel_pmu_has_bts_period(event, period: value) ? -EINVAL : 0; | 
|---|
| 5554 | } | 
|---|
| 5555 |  | 
|---|
| 5556 | static void intel_aux_output_init(void) | 
|---|
| 5557 | { | 
|---|
| 5558 | /* Refer also intel_pmu_aux_output_match() */ | 
|---|
| 5559 | if (x86_pmu.intel_cap.pebs_output_pt_available) | 
|---|
| 5560 | x86_pmu.assign = intel_pmu_assign_event; | 
|---|
| 5561 | } | 
|---|
| 5562 |  | 
|---|
| 5563 | static int intel_pmu_aux_output_match(struct perf_event *event) | 
|---|
| 5564 | { | 
|---|
| 5565 | /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */ | 
|---|
| 5566 | if (!x86_pmu.intel_cap.pebs_output_pt_available) | 
|---|
| 5567 | return 0; | 
|---|
| 5568 |  | 
|---|
| 5569 | return is_intel_pt_event(event); | 
|---|
| 5570 | } | 
|---|
| 5571 |  | 
|---|
| 5572 | static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret) | 
|---|
| 5573 | { | 
|---|
| 5574 | struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu); | 
|---|
| 5575 |  | 
|---|
| 5576 | *ret = !cpumask_test_cpu(cpu, cpumask: &hpmu->supported_cpus); | 
|---|
| 5577 | } | 
|---|
| 5578 |  | 
|---|
| 5579 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 
|---|
| 5580 |  | 
|---|
| 5581 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 
|---|
| 5582 |  | 
|---|
| 5583 | PMU_FORMAT_ATTR(frontend, "config1:0-23"); | 
|---|
| 5584 |  | 
|---|
| 5585 | PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63"); | 
|---|
| 5586 |  | 
|---|
| 5587 | static struct attribute *intel_arch3_formats_attr[] = { | 
|---|
| 5588 | &format_attr_event.attr, | 
|---|
| 5589 | &format_attr_umask.attr, | 
|---|
| 5590 | &format_attr_edge.attr, | 
|---|
| 5591 | &format_attr_pc.attr, | 
|---|
| 5592 | &format_attr_any.attr, | 
|---|
| 5593 | &format_attr_inv.attr, | 
|---|
| 5594 | &format_attr_cmask.attr, | 
|---|
| 5595 | NULL, | 
|---|
| 5596 | }; | 
|---|
| 5597 |  | 
|---|
| 5598 | static struct attribute *hsw_format_attr[] = { | 
|---|
| 5599 | &format_attr_in_tx.attr, | 
|---|
| 5600 | &format_attr_in_tx_cp.attr, | 
|---|
| 5601 | &format_attr_offcore_rsp.attr, | 
|---|
| 5602 | &format_attr_ldlat.attr, | 
|---|
| 5603 | NULL | 
|---|
| 5604 | }; | 
|---|
| 5605 |  | 
|---|
| 5606 | static struct attribute *nhm_format_attr[] = { | 
|---|
| 5607 | &format_attr_offcore_rsp.attr, | 
|---|
| 5608 | &format_attr_ldlat.attr, | 
|---|
| 5609 | NULL | 
|---|
| 5610 | }; | 
|---|
| 5611 |  | 
|---|
| 5612 | static struct attribute *slm_format_attr[] = { | 
|---|
| 5613 | &format_attr_offcore_rsp.attr, | 
|---|
| 5614 | NULL | 
|---|
| 5615 | }; | 
|---|
| 5616 |  | 
|---|
| 5617 | static struct attribute *cmt_format_attr[] = { | 
|---|
| 5618 | &format_attr_offcore_rsp.attr, | 
|---|
| 5619 | &format_attr_ldlat.attr, | 
|---|
| 5620 | &format_attr_snoop_rsp.attr, | 
|---|
| 5621 | NULL | 
|---|
| 5622 | }; | 
|---|
| 5623 |  | 
|---|
| 5624 | static struct attribute *skl_format_attr[] = { | 
|---|
| 5625 | &format_attr_frontend.attr, | 
|---|
| 5626 | NULL, | 
|---|
| 5627 | }; | 
|---|
| 5628 |  | 
|---|
| 5629 | static __initconst const struct x86_pmu core_pmu = { | 
|---|
| 5630 | .name			= "core", | 
|---|
| 5631 | .handle_irq		= x86_pmu_handle_irq, | 
|---|
| 5632 | .disable_all		= x86_pmu_disable_all, | 
|---|
| 5633 | .enable_all		= core_pmu_enable_all, | 
|---|
| 5634 | .enable			= core_pmu_enable_event, | 
|---|
| 5635 | .disable		= x86_pmu_disable_event, | 
|---|
| 5636 | .hw_config		= core_pmu_hw_config, | 
|---|
| 5637 | .schedule_events	= x86_schedule_events, | 
|---|
| 5638 | .eventsel		= MSR_ARCH_PERFMON_EVENTSEL0, | 
|---|
| 5639 | .perfctr		= MSR_ARCH_PERFMON_PERFCTR0, | 
|---|
| 5640 | .fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0, | 
|---|
| 5641 | .event_map		= intel_pmu_event_map, | 
|---|
| 5642 | .max_events		= ARRAY_SIZE(intel_perfmon_event_map), | 
|---|
| 5643 | .apic			= 1, | 
|---|
| 5644 | .large_pebs_flags	= LARGE_PEBS_FLAGS, | 
|---|
| 5645 |  | 
|---|
| 5646 | /* | 
|---|
| 5647 | * Intel PMCs cannot be accessed sanely above 32-bit width, | 
|---|
| 5648 | * so we install an artificial 1<<31 period regardless of | 
|---|
| 5649 | * the generic event period: | 
|---|
| 5650 | */ | 
|---|
| 5651 | .max_period		= (1ULL<<31) - 1, | 
|---|
| 5652 | .get_event_constraints	= intel_get_event_constraints, | 
|---|
| 5653 | .put_event_constraints	= intel_put_event_constraints, | 
|---|
| 5654 | .event_constraints	= intel_core_event_constraints, | 
|---|
| 5655 | .guest_get_msrs		= core_guest_get_msrs, | 
|---|
| 5656 | .format_attrs		= intel_arch_formats_attr, | 
|---|
| 5657 | .events_sysfs_show	= intel_event_sysfs_show, | 
|---|
| 5658 |  | 
|---|
| 5659 | /* | 
|---|
| 5660 | * Virtual (or funny metal) CPU can define x86_pmu.extra_regs | 
|---|
| 5661 | * together with PMU version 1 and thus be using core_pmu with | 
|---|
| 5662 | * shared_regs. We need following callbacks here to allocate | 
|---|
| 5663 | * it properly. | 
|---|
| 5664 | */ | 
|---|
| 5665 | .cpu_prepare		= intel_pmu_cpu_prepare, | 
|---|
| 5666 | .cpu_starting		= intel_pmu_cpu_starting, | 
|---|
| 5667 | .cpu_dying		= intel_pmu_cpu_dying, | 
|---|
| 5668 | .cpu_dead		= intel_pmu_cpu_dead, | 
|---|
| 5669 |  | 
|---|
| 5670 | .check_period		= intel_pmu_check_period, | 
|---|
| 5671 |  | 
|---|
| 5672 | .lbr_reset		= intel_pmu_lbr_reset_64, | 
|---|
| 5673 | .lbr_read		= intel_pmu_lbr_read_64, | 
|---|
| 5674 | .lbr_save		= intel_pmu_lbr_save, | 
|---|
| 5675 | .lbr_restore		= intel_pmu_lbr_restore, | 
|---|
| 5676 | }; | 
|---|
| 5677 |  | 
|---|
| 5678 | static __initconst const struct x86_pmu intel_pmu = { | 
|---|
| 5679 | .name			= "Intel", | 
|---|
| 5680 | .handle_irq		= intel_pmu_handle_irq, | 
|---|
| 5681 | .disable_all		= intel_pmu_disable_all, | 
|---|
| 5682 | .enable_all		= intel_pmu_enable_all, | 
|---|
| 5683 | .enable			= intel_pmu_enable_event, | 
|---|
| 5684 | .disable		= intel_pmu_disable_event, | 
|---|
| 5685 | .add			= intel_pmu_add_event, | 
|---|
| 5686 | .del			= intel_pmu_del_event, | 
|---|
| 5687 | .read			= intel_pmu_read_event, | 
|---|
| 5688 | .set_period		= intel_pmu_set_period, | 
|---|
| 5689 | .update			= intel_pmu_update, | 
|---|
| 5690 | .hw_config		= intel_pmu_hw_config, | 
|---|
| 5691 | .schedule_events	= x86_schedule_events, | 
|---|
| 5692 | .eventsel		= MSR_ARCH_PERFMON_EVENTSEL0, | 
|---|
| 5693 | .perfctr		= MSR_ARCH_PERFMON_PERFCTR0, | 
|---|
| 5694 | .fixedctr		= MSR_ARCH_PERFMON_FIXED_CTR0, | 
|---|
| 5695 | .event_map		= intel_pmu_event_map, | 
|---|
| 5696 | .max_events		= ARRAY_SIZE(intel_perfmon_event_map), | 
|---|
| 5697 | .apic			= 1, | 
|---|
| 5698 | .large_pebs_flags	= LARGE_PEBS_FLAGS, | 
|---|
| 5699 | /* | 
|---|
| 5700 | * Intel PMCs cannot be accessed sanely above 32 bit width, | 
|---|
| 5701 | * so we install an artificial 1<<31 period regardless of | 
|---|
| 5702 | * the generic event period: | 
|---|
| 5703 | */ | 
|---|
| 5704 | .max_period		= (1ULL << 31) - 1, | 
|---|
| 5705 | .get_event_constraints	= intel_get_event_constraints, | 
|---|
| 5706 | .put_event_constraints	= intel_put_event_constraints, | 
|---|
| 5707 | .pebs_aliases		= intel_pebs_aliases_core2, | 
|---|
| 5708 |  | 
|---|
| 5709 | .format_attrs		= intel_arch3_formats_attr, | 
|---|
| 5710 | .events_sysfs_show	= intel_event_sysfs_show, | 
|---|
| 5711 |  | 
|---|
| 5712 | .cpu_prepare		= intel_pmu_cpu_prepare, | 
|---|
| 5713 | .cpu_starting		= intel_pmu_cpu_starting, | 
|---|
| 5714 | .cpu_dying		= intel_pmu_cpu_dying, | 
|---|
| 5715 | .cpu_dead		= intel_pmu_cpu_dead, | 
|---|
| 5716 |  | 
|---|
| 5717 | .guest_get_msrs		= intel_guest_get_msrs, | 
|---|
| 5718 | .sched_task		= intel_pmu_sched_task, | 
|---|
| 5719 |  | 
|---|
| 5720 | .check_period		= intel_pmu_check_period, | 
|---|
| 5721 |  | 
|---|
| 5722 | .aux_output_match	= intel_pmu_aux_output_match, | 
|---|
| 5723 |  | 
|---|
| 5724 | .lbr_reset		= intel_pmu_lbr_reset_64, | 
|---|
| 5725 | .lbr_read		= intel_pmu_lbr_read_64, | 
|---|
| 5726 | .lbr_save		= intel_pmu_lbr_save, | 
|---|
| 5727 | .lbr_restore		= intel_pmu_lbr_restore, | 
|---|
| 5728 |  | 
|---|
| 5729 | /* | 
|---|
| 5730 | * SMM has access to all 4 rings and while traditionally SMM code only | 
|---|
| 5731 | * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. | 
|---|
| 5732 | * | 
|---|
| 5733 | * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction | 
|---|
| 5734 | * between SMM or not, this results in what should be pure userspace | 
|---|
| 5735 | * counters including SMM data. | 
|---|
| 5736 | * | 
|---|
| 5737 | * This is a clear privilege issue, therefore globally disable | 
|---|
| 5738 | * counting SMM by default. | 
|---|
| 5739 | */ | 
|---|
| 5740 | .attr_freeze_on_smi	= 1, | 
|---|
| 5741 | }; | 
|---|
| 5742 |  | 
|---|
| 5743 | static __init void intel_clovertown_quirk(void) | 
|---|
| 5744 | { | 
|---|
| 5745 | /* | 
|---|
| 5746 | * PEBS is unreliable due to: | 
|---|
| 5747 | * | 
|---|
| 5748 | *   AJ67  - PEBS may experience CPL leaks | 
|---|
| 5749 | *   AJ68  - PEBS PMI may be delayed by one event | 
|---|
| 5750 | *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] | 
|---|
| 5751 | *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS | 
|---|
| 5752 | * | 
|---|
| 5753 | * AJ67 could be worked around by restricting the OS/USR flags. | 
|---|
| 5754 | * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. | 
|---|
| 5755 | * | 
|---|
| 5756 | * AJ106 could possibly be worked around by not allowing LBR | 
|---|
| 5757 | *       usage from PEBS, including the fixup. | 
|---|
| 5758 | * AJ68  could possibly be worked around by always programming | 
|---|
| 5759 | *	 a pebs_event_reset[0] value and coping with the lost events. | 
|---|
| 5760 | * | 
|---|
| 5761 | * But taken together it might just make sense to not enable PEBS on | 
|---|
| 5762 | * these chips. | 
|---|
| 5763 | */ | 
|---|
| 5764 | pr_warn( "PEBS disabled due to CPU errata\n"); | 
|---|
| 5765 | x86_pmu.ds_pebs = 0; | 
|---|
| 5766 | x86_pmu.pebs_constraints = NULL; | 
|---|
| 5767 | } | 
|---|
| 5768 |  | 
|---|
| 5769 | static const struct x86_cpu_id isolation_ucodes[] = { | 
|---|
| 5770 | X86_MATCH_VFM_STEPS(INTEL_HASWELL,	 3,  3, 0x0000001f), | 
|---|
| 5771 | X86_MATCH_VFM_STEPS(INTEL_HASWELL_L,	 1,  1, 0x0000001e), | 
|---|
| 5772 | X86_MATCH_VFM_STEPS(INTEL_HASWELL_G,	 1,  1, 0x00000015), | 
|---|
| 5773 | X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 2,  2, 0x00000037), | 
|---|
| 5774 | X86_MATCH_VFM_STEPS(INTEL_HASWELL_X,	 4,  4, 0x0000000a), | 
|---|
| 5775 | X86_MATCH_VFM_STEPS(INTEL_BROADWELL,	 4,  4, 0x00000023), | 
|---|
| 5776 | X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G,	 1,  1, 0x00000014), | 
|---|
| 5777 | X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 2,  2, 0x00000010), | 
|---|
| 5778 | X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 3,  3, 0x07000009), | 
|---|
| 5779 | X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 4,  4, 0x0f000009), | 
|---|
| 5780 | X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D,	 5,  5, 0x0e000002), | 
|---|
| 5781 | X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X,	 1,  1, 0x0b000014), | 
|---|
| 5782 | X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 3,  3, 0x00000021), | 
|---|
| 5783 | X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	 4,  7, 0x00000000), | 
|---|
| 5784 | X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X,	11, 11, 0x00000000), | 
|---|
| 5785 | X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L,	 3,  3, 0x0000007c), | 
|---|
| 5786 | X86_MATCH_VFM_STEPS(INTEL_SKYLAKE,	 3,  3, 0x0000007c), | 
|---|
| 5787 | X86_MATCH_VFM_STEPS(INTEL_KABYLAKE,	 9, 13, 0x0000004e), | 
|---|
| 5788 | X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L,	 9, 12, 0x0000004e), | 
|---|
| 5789 | {} | 
|---|
| 5790 | }; | 
|---|
| 5791 |  | 
|---|
| 5792 | static void intel_check_pebs_isolation(void) | 
|---|
| 5793 | { | 
|---|
| 5794 | x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(table: isolation_ucodes); | 
|---|
| 5795 | } | 
|---|
| 5796 |  | 
|---|
| 5797 | static __init void intel_pebs_isolation_quirk(void) | 
|---|
| 5798 | { | 
|---|
| 5799 | WARN_ON_ONCE(x86_pmu.check_microcode); | 
|---|
| 5800 | x86_pmu.check_microcode = intel_check_pebs_isolation; | 
|---|
| 5801 | intel_check_pebs_isolation(); | 
|---|
| 5802 | } | 
|---|
| 5803 |  | 
|---|
| 5804 | static const struct x86_cpu_id pebs_ucodes[] = { | 
|---|
| 5805 | X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE,	7, 7, 0x00000028), | 
|---|
| 5806 | X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	6, 6, 0x00000618), | 
|---|
| 5807 | X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X,	7, 7, 0x0000070c), | 
|---|
| 5808 | {} | 
|---|
| 5809 | }; | 
|---|
| 5810 |  | 
|---|
| 5811 | static bool intel_snb_pebs_broken(void) | 
|---|
| 5812 | { | 
|---|
| 5813 | return !x86_match_min_microcode_rev(table: pebs_ucodes); | 
|---|
| 5814 | } | 
|---|
| 5815 |  | 
|---|
| 5816 | static void intel_snb_check_microcode(void) | 
|---|
| 5817 | { | 
|---|
| 5818 | if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) | 
|---|
| 5819 | return; | 
|---|
| 5820 |  | 
|---|
| 5821 | /* | 
|---|
| 5822 | * Serialized by the microcode lock.. | 
|---|
| 5823 | */ | 
|---|
| 5824 | if (x86_pmu.pebs_broken) { | 
|---|
| 5825 | pr_info( "PEBS enabled due to microcode update\n"); | 
|---|
| 5826 | x86_pmu.pebs_broken = 0; | 
|---|
| 5827 | } else { | 
|---|
| 5828 | pr_info( "PEBS disabled due to CPU errata, please upgrade microcode\n"); | 
|---|
| 5829 | x86_pmu.pebs_broken = 1; | 
|---|
| 5830 | } | 
|---|
| 5831 | } | 
|---|
| 5832 |  | 
|---|
| 5833 | static bool is_lbr_from(unsigned long msr) | 
|---|
| 5834 | { | 
|---|
| 5835 | unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; | 
|---|
| 5836 |  | 
|---|
| 5837 | return x86_pmu.lbr_from <= msr && msr < lbr_from_nr; | 
|---|
| 5838 | } | 
|---|
| 5839 |  | 
|---|
| 5840 | /* | 
|---|
| 5841 | * Under certain circumstances, access certain MSR may cause #GP. | 
|---|
| 5842 | * The function tests if the input MSR can be safely accessed. | 
|---|
| 5843 | */ | 
|---|
| 5844 | static bool check_msr(unsigned long msr, u64 mask) | 
|---|
| 5845 | { | 
|---|
| 5846 | u64 val_old, val_new, val_tmp; | 
|---|
| 5847 |  | 
|---|
| 5848 | /* | 
|---|
| 5849 | * Disable the check for real HW, so we don't | 
|---|
| 5850 | * mess with potentially enabled registers: | 
|---|
| 5851 | */ | 
|---|
| 5852 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) | 
|---|
| 5853 | return true; | 
|---|
| 5854 |  | 
|---|
| 5855 | /* | 
|---|
| 5856 | * Read the current value, change it and read it back to see if it | 
|---|
| 5857 | * matches, this is needed to detect certain hardware emulators | 
|---|
| 5858 | * (qemu/kvm) that don't trap on the MSR access and always return 0s. | 
|---|
| 5859 | */ | 
|---|
| 5860 | if (rdmsrq_safe(msr, p: &val_old)) | 
|---|
| 5861 | return false; | 
|---|
| 5862 |  | 
|---|
| 5863 | /* | 
|---|
| 5864 | * Only change the bits which can be updated by wrmsrq. | 
|---|
| 5865 | */ | 
|---|
| 5866 | val_tmp = val_old ^ mask; | 
|---|
| 5867 |  | 
|---|
| 5868 | if (is_lbr_from(msr)) | 
|---|
| 5869 | val_tmp = lbr_from_signext_quirk_wr(val: val_tmp); | 
|---|
| 5870 |  | 
|---|
| 5871 | if (wrmsrq_safe(msr, val: val_tmp) || | 
|---|
| 5872 | rdmsrq_safe(msr, p: &val_new)) | 
|---|
| 5873 | return false; | 
|---|
| 5874 |  | 
|---|
| 5875 | /* | 
|---|
| 5876 | * Quirk only affects validation in wrmsr(), so wrmsrq()'s value | 
|---|
| 5877 | * should equal rdmsrq()'s even with the quirk. | 
|---|
| 5878 | */ | 
|---|
| 5879 | if (val_new != val_tmp) | 
|---|
| 5880 | return false; | 
|---|
| 5881 |  | 
|---|
| 5882 | if (is_lbr_from(msr)) | 
|---|
| 5883 | val_old = lbr_from_signext_quirk_wr(val: val_old); | 
|---|
| 5884 |  | 
|---|
| 5885 | /* Here it's sure that the MSR can be safely accessed. | 
|---|
| 5886 | * Restore the old value and return. | 
|---|
| 5887 | */ | 
|---|
| 5888 | wrmsrq(msr, val: val_old); | 
|---|
| 5889 |  | 
|---|
| 5890 | return true; | 
|---|
| 5891 | } | 
|---|
| 5892 |  | 
|---|
| 5893 | static __init void intel_sandybridge_quirk(void) | 
|---|
| 5894 | { | 
|---|
| 5895 | x86_pmu.check_microcode = intel_snb_check_microcode; | 
|---|
| 5896 | cpus_read_lock(); | 
|---|
| 5897 | intel_snb_check_microcode(); | 
|---|
| 5898 | cpus_read_unlock(); | 
|---|
| 5899 | } | 
|---|
| 5900 |  | 
|---|
| 5901 | static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { | 
|---|
| 5902 | { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles"}, | 
|---|
| 5903 | { .id: PERF_COUNT_HW_INSTRUCTIONS, .name: "instructions"}, | 
|---|
| 5904 | { .id: PERF_COUNT_HW_BUS_CYCLES, .name: "bus cycles"}, | 
|---|
| 5905 | { .id: PERF_COUNT_HW_CACHE_REFERENCES, .name: "cache references"}, | 
|---|
| 5906 | { .id: PERF_COUNT_HW_CACHE_MISSES, .name: "cache misses"}, | 
|---|
| 5907 | { .id: PERF_COUNT_HW_BRANCH_INSTRUCTIONS, .name: "branch instructions"}, | 
|---|
| 5908 | { .id: PERF_COUNT_HW_BRANCH_MISSES, .name: "branch misses"}, | 
|---|
| 5909 | }; | 
|---|
| 5910 |  | 
|---|
| 5911 | static __init void intel_arch_events_quirk(void) | 
|---|
| 5912 | { | 
|---|
| 5913 | int bit; | 
|---|
| 5914 |  | 
|---|
| 5915 | /* disable event that reported as not present by cpuid */ | 
|---|
| 5916 | for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { | 
|---|
| 5917 | intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; | 
|---|
| 5918 | pr_warn( "CPUID marked event: \'%s\' unavailable\n", | 
|---|
| 5919 | intel_arch_events_map[bit].name); | 
|---|
| 5920 | } | 
|---|
| 5921 | } | 
|---|
| 5922 |  | 
|---|
| 5923 | static __init void intel_nehalem_quirk(void) | 
|---|
| 5924 | { | 
|---|
| 5925 | union cpuid10_ebx ebx; | 
|---|
| 5926 |  | 
|---|
| 5927 | ebx.full = x86_pmu.events_maskl; | 
|---|
| 5928 | if (ebx.split.no_branch_misses_retired) { | 
|---|
| 5929 | /* | 
|---|
| 5930 | * Erratum AAJ80 detected, we work it around by using | 
|---|
| 5931 | * the BR_MISP_EXEC.ANY event. This will over-count | 
|---|
| 5932 | * branch-misses, but it's still much better than the | 
|---|
| 5933 | * architectural event which is often completely bogus: | 
|---|
| 5934 | */ | 
|---|
| 5935 | intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; | 
|---|
| 5936 | ebx.split.no_branch_misses_retired = 0; | 
|---|
| 5937 | x86_pmu.events_maskl = ebx.full; | 
|---|
| 5938 | pr_info( "CPU erratum AAJ80 worked around\n"); | 
|---|
| 5939 | } | 
|---|
| 5940 | } | 
|---|
| 5941 |  | 
|---|
| 5942 | /* | 
|---|
| 5943 | * enable software workaround for errata: | 
|---|
| 5944 | * SNB: BJ122 | 
|---|
| 5945 | * IVB: BV98 | 
|---|
| 5946 | * HSW: HSD29 | 
|---|
| 5947 | * | 
|---|
| 5948 | * Only needed when HT is enabled. However detecting | 
|---|
| 5949 | * if HT is enabled is difficult (model specific). So instead, | 
|---|
| 5950 | * we enable the workaround in the early boot, and verify if | 
|---|
| 5951 | * it is needed in a later initcall phase once we have valid | 
|---|
| 5952 | * topology information to check if HT is actually enabled | 
|---|
| 5953 | */ | 
|---|
| 5954 | static __init void intel_ht_bug(void) | 
|---|
| 5955 | { | 
|---|
| 5956 | x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED; | 
|---|
| 5957 |  | 
|---|
| 5958 | x86_pmu.start_scheduling = intel_start_scheduling; | 
|---|
| 5959 | x86_pmu.commit_scheduling = intel_commit_scheduling; | 
|---|
| 5960 | x86_pmu.stop_scheduling = intel_stop_scheduling; | 
|---|
| 5961 | } | 
|---|
| 5962 |  | 
|---|
| 5963 | EVENT_ATTR_STR(mem-loads,	mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); | 
|---|
| 5964 | EVENT_ATTR_STR(mem-stores,	mem_st_hsw, "event=0xd0,umask=0x82") | 
|---|
| 5965 |  | 
|---|
| 5966 | /* Haswell special events */ | 
|---|
| 5967 | EVENT_ATTR_STR(tx-start,	tx_start, "event=0xc9,umask=0x1"); | 
|---|
| 5968 | EVENT_ATTR_STR(tx-commit,	tx_commit, "event=0xc9,umask=0x2"); | 
|---|
| 5969 | EVENT_ATTR_STR(tx-abort,	tx_abort, "event=0xc9,umask=0x4"); | 
|---|
| 5970 | EVENT_ATTR_STR(tx-capacity,	tx_capacity, "event=0x54,umask=0x2"); | 
|---|
| 5971 | EVENT_ATTR_STR(tx-conflict,	tx_conflict, "event=0x54,umask=0x1"); | 
|---|
| 5972 | EVENT_ATTR_STR(el-start,	el_start, "event=0xc8,umask=0x1"); | 
|---|
| 5973 | EVENT_ATTR_STR(el-commit,	el_commit, "event=0xc8,umask=0x2"); | 
|---|
| 5974 | EVENT_ATTR_STR(el-abort,	el_abort, "event=0xc8,umask=0x4"); | 
|---|
| 5975 | EVENT_ATTR_STR(el-capacity,	el_capacity, "event=0x54,umask=0x2"); | 
|---|
| 5976 | EVENT_ATTR_STR(el-conflict,	el_conflict, "event=0x54,umask=0x1"); | 
|---|
| 5977 | EVENT_ATTR_STR(cycles-t,	cycles_t, "event=0x3c,in_tx=1"); | 
|---|
| 5978 | EVENT_ATTR_STR(cycles-ct,	cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); | 
|---|
| 5979 |  | 
|---|
| 5980 | static struct attribute *hsw_events_attrs[] = { | 
|---|
| 5981 | EVENT_PTR(td_slots_issued), | 
|---|
| 5982 | EVENT_PTR(td_slots_retired), | 
|---|
| 5983 | EVENT_PTR(td_fetch_bubbles), | 
|---|
| 5984 | EVENT_PTR(td_total_slots), | 
|---|
| 5985 | EVENT_PTR(td_total_slots_scale), | 
|---|
| 5986 | EVENT_PTR(td_recovery_bubbles), | 
|---|
| 5987 | EVENT_PTR(td_recovery_bubbles_scale), | 
|---|
| 5988 | NULL | 
|---|
| 5989 | }; | 
|---|
| 5990 |  | 
|---|
| 5991 | static struct attribute *hsw_mem_events_attrs[] = { | 
|---|
| 5992 | EVENT_PTR(mem_ld_hsw), | 
|---|
| 5993 | EVENT_PTR(mem_st_hsw), | 
|---|
| 5994 | NULL, | 
|---|
| 5995 | }; | 
|---|
| 5996 |  | 
|---|
| 5997 | static struct attribute *hsw_tsx_events_attrs[] = { | 
|---|
| 5998 | EVENT_PTR(tx_start), | 
|---|
| 5999 | EVENT_PTR(tx_commit), | 
|---|
| 6000 | EVENT_PTR(tx_abort), | 
|---|
| 6001 | EVENT_PTR(tx_capacity), | 
|---|
| 6002 | EVENT_PTR(tx_conflict), | 
|---|
| 6003 | EVENT_PTR(el_start), | 
|---|
| 6004 | EVENT_PTR(el_commit), | 
|---|
| 6005 | EVENT_PTR(el_abort), | 
|---|
| 6006 | EVENT_PTR(el_capacity), | 
|---|
| 6007 | EVENT_PTR(el_conflict), | 
|---|
| 6008 | EVENT_PTR(cycles_t), | 
|---|
| 6009 | EVENT_PTR(cycles_ct), | 
|---|
| 6010 | NULL | 
|---|
| 6011 | }; | 
|---|
| 6012 |  | 
|---|
| 6013 | EVENT_ATTR_STR(tx-capacity-read,  tx_capacity_read, "event=0x54,umask=0x80"); | 
|---|
| 6014 | EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2"); | 
|---|
| 6015 | EVENT_ATTR_STR(el-capacity-read,  el_capacity_read, "event=0x54,umask=0x80"); | 
|---|
| 6016 | EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2"); | 
|---|
| 6017 |  | 
|---|
| 6018 | static struct attribute *icl_events_attrs[] = { | 
|---|
| 6019 | EVENT_PTR(mem_ld_hsw), | 
|---|
| 6020 | EVENT_PTR(mem_st_hsw), | 
|---|
| 6021 | NULL, | 
|---|
| 6022 | }; | 
|---|
| 6023 |  | 
|---|
| 6024 | static struct attribute *icl_td_events_attrs[] = { | 
|---|
| 6025 | EVENT_PTR(slots), | 
|---|
| 6026 | EVENT_PTR(td_retiring), | 
|---|
| 6027 | EVENT_PTR(td_bad_spec), | 
|---|
| 6028 | EVENT_PTR(td_fe_bound), | 
|---|
| 6029 | EVENT_PTR(td_be_bound), | 
|---|
| 6030 | NULL, | 
|---|
| 6031 | }; | 
|---|
| 6032 |  | 
|---|
| 6033 | static struct attribute *icl_tsx_events_attrs[] = { | 
|---|
| 6034 | EVENT_PTR(tx_start), | 
|---|
| 6035 | EVENT_PTR(tx_abort), | 
|---|
| 6036 | EVENT_PTR(tx_commit), | 
|---|
| 6037 | EVENT_PTR(tx_capacity_read), | 
|---|
| 6038 | EVENT_PTR(tx_capacity_write), | 
|---|
| 6039 | EVENT_PTR(tx_conflict), | 
|---|
| 6040 | EVENT_PTR(el_start), | 
|---|
| 6041 | EVENT_PTR(el_abort), | 
|---|
| 6042 | EVENT_PTR(el_commit), | 
|---|
| 6043 | EVENT_PTR(el_capacity_read), | 
|---|
| 6044 | EVENT_PTR(el_capacity_write), | 
|---|
| 6045 | EVENT_PTR(el_conflict), | 
|---|
| 6046 | EVENT_PTR(cycles_t), | 
|---|
| 6047 | EVENT_PTR(cycles_ct), | 
|---|
| 6048 | NULL, | 
|---|
| 6049 | }; | 
|---|
| 6050 |  | 
|---|
| 6051 |  | 
|---|
| 6052 | EVENT_ATTR_STR(mem-stores,	mem_st_spr, "event=0xcd,umask=0x2"); | 
|---|
| 6053 | EVENT_ATTR_STR(mem-loads-aux,	mem_ld_aux, "event=0x03,umask=0x82"); | 
|---|
| 6054 |  | 
|---|
| 6055 | static struct attribute *glc_events_attrs[] = { | 
|---|
| 6056 | EVENT_PTR(mem_ld_hsw), | 
|---|
| 6057 | EVENT_PTR(mem_st_spr), | 
|---|
| 6058 | EVENT_PTR(mem_ld_aux), | 
|---|
| 6059 | NULL, | 
|---|
| 6060 | }; | 
|---|
| 6061 |  | 
|---|
| 6062 | static struct attribute *glc_td_events_attrs[] = { | 
|---|
| 6063 | EVENT_PTR(slots), | 
|---|
| 6064 | EVENT_PTR(td_retiring), | 
|---|
| 6065 | EVENT_PTR(td_bad_spec), | 
|---|
| 6066 | EVENT_PTR(td_fe_bound), | 
|---|
| 6067 | EVENT_PTR(td_be_bound), | 
|---|
| 6068 | EVENT_PTR(td_heavy_ops), | 
|---|
| 6069 | EVENT_PTR(td_br_mispredict), | 
|---|
| 6070 | EVENT_PTR(td_fetch_lat), | 
|---|
| 6071 | EVENT_PTR(td_mem_bound), | 
|---|
| 6072 | NULL, | 
|---|
| 6073 | }; | 
|---|
| 6074 |  | 
|---|
| 6075 | static struct attribute *glc_tsx_events_attrs[] = { | 
|---|
| 6076 | EVENT_PTR(tx_start), | 
|---|
| 6077 | EVENT_PTR(tx_abort), | 
|---|
| 6078 | EVENT_PTR(tx_commit), | 
|---|
| 6079 | EVENT_PTR(tx_capacity_read), | 
|---|
| 6080 | EVENT_PTR(tx_capacity_write), | 
|---|
| 6081 | EVENT_PTR(tx_conflict), | 
|---|
| 6082 | EVENT_PTR(cycles_t), | 
|---|
| 6083 | EVENT_PTR(cycles_ct), | 
|---|
| 6084 | NULL, | 
|---|
| 6085 | }; | 
|---|
| 6086 |  | 
|---|
| 6087 | static ssize_t freeze_on_smi_show(struct device *cdev, | 
|---|
| 6088 | struct device_attribute *attr, | 
|---|
| 6089 | char *buf) | 
|---|
| 6090 | { | 
|---|
| 6091 | return sprintf(buf, fmt: "%lu\n", x86_pmu.attr_freeze_on_smi); | 
|---|
| 6092 | } | 
|---|
| 6093 |  | 
|---|
| 6094 | static DEFINE_MUTEX(freeze_on_smi_mutex); | 
|---|
| 6095 |  | 
|---|
| 6096 | static ssize_t freeze_on_smi_store(struct device *cdev, | 
|---|
| 6097 | struct device_attribute *attr, | 
|---|
| 6098 | const char *buf, size_t count) | 
|---|
| 6099 | { | 
|---|
| 6100 | unsigned long val; | 
|---|
| 6101 | ssize_t ret; | 
|---|
| 6102 |  | 
|---|
| 6103 | ret = kstrtoul(s: buf, base: 0, res: &val); | 
|---|
| 6104 | if (ret) | 
|---|
| 6105 | return ret; | 
|---|
| 6106 |  | 
|---|
| 6107 | if (val > 1) | 
|---|
| 6108 | return -EINVAL; | 
|---|
| 6109 |  | 
|---|
| 6110 | mutex_lock(lock: &freeze_on_smi_mutex); | 
|---|
| 6111 |  | 
|---|
| 6112 | if (x86_pmu.attr_freeze_on_smi == val) | 
|---|
| 6113 | goto done; | 
|---|
| 6114 |  | 
|---|
| 6115 | x86_pmu.attr_freeze_on_smi = val; | 
|---|
| 6116 |  | 
|---|
| 6117 | cpus_read_lock(); | 
|---|
| 6118 | on_each_cpu(func: flip_smm_bit, info: &val, wait: 1); | 
|---|
| 6119 | cpus_read_unlock(); | 
|---|
| 6120 | done: | 
|---|
| 6121 | mutex_unlock(lock: &freeze_on_smi_mutex); | 
|---|
| 6122 |  | 
|---|
| 6123 | return count; | 
|---|
| 6124 | } | 
|---|
| 6125 |  | 
|---|
| 6126 | static void update_tfa_sched(void *ignored) | 
|---|
| 6127 | { | 
|---|
| 6128 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 6129 |  | 
|---|
| 6130 | /* | 
|---|
| 6131 | * check if PMC3 is used | 
|---|
| 6132 | * and if so force schedule out for all event types all contexts | 
|---|
| 6133 | */ | 
|---|
| 6134 | if (test_bit(3, cpuc->active_mask)) | 
|---|
| 6135 | perf_pmu_resched(pmu: x86_get_pmu(smp_processor_id())); | 
|---|
| 6136 | } | 
|---|
| 6137 |  | 
|---|
| 6138 | static ssize_t show_sysctl_tfa(struct device *cdev, | 
|---|
| 6139 | struct device_attribute *attr, | 
|---|
| 6140 | char *buf) | 
|---|
| 6141 | { | 
|---|
| 6142 | return snprintf(buf, size: 40, fmt: "%d\n", allow_tsx_force_abort); | 
|---|
| 6143 | } | 
|---|
| 6144 |  | 
|---|
| 6145 | static ssize_t set_sysctl_tfa(struct device *cdev, | 
|---|
| 6146 | struct device_attribute *attr, | 
|---|
| 6147 | const char *buf, size_t count) | 
|---|
| 6148 | { | 
|---|
| 6149 | bool val; | 
|---|
| 6150 | ssize_t ret; | 
|---|
| 6151 |  | 
|---|
| 6152 | ret = kstrtobool(s: buf, res: &val); | 
|---|
| 6153 | if (ret) | 
|---|
| 6154 | return ret; | 
|---|
| 6155 |  | 
|---|
| 6156 | /* no change */ | 
|---|
| 6157 | if (val == allow_tsx_force_abort) | 
|---|
| 6158 | return count; | 
|---|
| 6159 |  | 
|---|
| 6160 | allow_tsx_force_abort = val; | 
|---|
| 6161 |  | 
|---|
| 6162 | cpus_read_lock(); | 
|---|
| 6163 | on_each_cpu(func: update_tfa_sched, NULL, wait: 1); | 
|---|
| 6164 | cpus_read_unlock(); | 
|---|
| 6165 |  | 
|---|
| 6166 | return count; | 
|---|
| 6167 | } | 
|---|
| 6168 |  | 
|---|
| 6169 |  | 
|---|
| 6170 | static DEVICE_ATTR_RW(freeze_on_smi); | 
|---|
| 6171 |  | 
|---|
| 6172 | static ssize_t branches_show(struct device *cdev, | 
|---|
| 6173 | struct device_attribute *attr, | 
|---|
| 6174 | char *buf) | 
|---|
| 6175 | { | 
|---|
| 6176 | return snprintf(buf, PAGE_SIZE, fmt: "%d\n", x86_pmu.lbr_nr); | 
|---|
| 6177 | } | 
|---|
| 6178 |  | 
|---|
| 6179 | static DEVICE_ATTR_RO(branches); | 
|---|
| 6180 |  | 
|---|
| 6181 | static ssize_t branch_counter_nr_show(struct device *cdev, | 
|---|
| 6182 | struct device_attribute *attr, | 
|---|
| 6183 | char *buf) | 
|---|
| 6184 | { | 
|---|
| 6185 | return snprintf(buf, PAGE_SIZE, fmt: "%d\n", fls(x: x86_pmu.lbr_counters)); | 
|---|
| 6186 | } | 
|---|
| 6187 |  | 
|---|
| 6188 | static DEVICE_ATTR_RO(branch_counter_nr); | 
|---|
| 6189 |  | 
|---|
| 6190 | static ssize_t branch_counter_width_show(struct device *cdev, | 
|---|
| 6191 | struct device_attribute *attr, | 
|---|
| 6192 | char *buf) | 
|---|
| 6193 | { | 
|---|
| 6194 | return snprintf(buf, PAGE_SIZE, fmt: "%d\n", LBR_INFO_BR_CNTR_BITS); | 
|---|
| 6195 | } | 
|---|
| 6196 |  | 
|---|
| 6197 | static DEVICE_ATTR_RO(branch_counter_width); | 
|---|
| 6198 |  | 
|---|
| 6199 | static struct attribute *lbr_attrs[] = { | 
|---|
| 6200 | &dev_attr_branches.attr, | 
|---|
| 6201 | &dev_attr_branch_counter_nr.attr, | 
|---|
| 6202 | &dev_attr_branch_counter_width.attr, | 
|---|
| 6203 | NULL | 
|---|
| 6204 | }; | 
|---|
| 6205 |  | 
|---|
| 6206 | static umode_t | 
|---|
| 6207 | lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6208 | { | 
|---|
| 6209 | /* branches */ | 
|---|
| 6210 | if (i == 0) | 
|---|
| 6211 | return x86_pmu.lbr_nr ? attr->mode : 0; | 
|---|
| 6212 |  | 
|---|
| 6213 | return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0; | 
|---|
| 6214 | } | 
|---|
| 6215 |  | 
|---|
| 6216 | static char pmu_name_str[30]; | 
|---|
| 6217 |  | 
|---|
| 6218 | static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str); | 
|---|
| 6219 |  | 
|---|
| 6220 | static struct attribute *intel_pmu_caps_attrs[] = { | 
|---|
| 6221 | &dev_attr_pmu_name.attr.attr, | 
|---|
| 6222 | NULL | 
|---|
| 6223 | }; | 
|---|
| 6224 |  | 
|---|
| 6225 | static DEVICE_ATTR(allow_tsx_force_abort, 0644, | 
|---|
| 6226 | show_sysctl_tfa, | 
|---|
| 6227 | set_sysctl_tfa); | 
|---|
| 6228 |  | 
|---|
| 6229 | static struct attribute *intel_pmu_attrs[] = { | 
|---|
| 6230 | &dev_attr_freeze_on_smi.attr, | 
|---|
| 6231 | &dev_attr_allow_tsx_force_abort.attr, | 
|---|
| 6232 | NULL, | 
|---|
| 6233 | }; | 
|---|
| 6234 |  | 
|---|
| 6235 | static umode_t | 
|---|
| 6236 | default_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6237 | { | 
|---|
| 6238 | if (attr == &dev_attr_allow_tsx_force_abort.attr) | 
|---|
| 6239 | return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; | 
|---|
| 6240 |  | 
|---|
| 6241 | return attr->mode; | 
|---|
| 6242 | } | 
|---|
| 6243 |  | 
|---|
| 6244 | static umode_t | 
|---|
| 6245 | tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6246 | { | 
|---|
| 6247 | return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0; | 
|---|
| 6248 | } | 
|---|
| 6249 |  | 
|---|
| 6250 | static umode_t | 
|---|
| 6251 | pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6252 | { | 
|---|
| 6253 | return x86_pmu.ds_pebs ? attr->mode : 0; | 
|---|
| 6254 | } | 
|---|
| 6255 |  | 
|---|
| 6256 | static umode_t | 
|---|
| 6257 | mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6258 | { | 
|---|
| 6259 | if (attr == &event_attr_mem_ld_aux.attr.attr) | 
|---|
| 6260 | return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0; | 
|---|
| 6261 |  | 
|---|
| 6262 | return pebs_is_visible(kobj, attr, i); | 
|---|
| 6263 | } | 
|---|
| 6264 |  | 
|---|
| 6265 | static umode_t | 
|---|
| 6266 | exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6267 | { | 
|---|
| 6268 | return x86_pmu.version >= 2 ? attr->mode : 0; | 
|---|
| 6269 | } | 
|---|
| 6270 |  | 
|---|
| 6271 | static umode_t | 
|---|
| 6272 | td_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6273 | { | 
|---|
| 6274 | /* | 
|---|
| 6275 | * Hide the perf metrics topdown events | 
|---|
| 6276 | * if the feature is not enumerated. | 
|---|
| 6277 | */ | 
|---|
| 6278 | if (x86_pmu.num_topdown_events) | 
|---|
| 6279 | return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0; | 
|---|
| 6280 |  | 
|---|
| 6281 | return attr->mode; | 
|---|
| 6282 | } | 
|---|
| 6283 |  | 
|---|
| 6284 | PMU_FORMAT_ATTR(acr_mask, "config2:0-63"); | 
|---|
| 6285 |  | 
|---|
| 6286 | static struct attribute *format_acr_attrs[] = { | 
|---|
| 6287 | &format_attr_acr_mask.attr, | 
|---|
| 6288 | NULL | 
|---|
| 6289 | }; | 
|---|
| 6290 |  | 
|---|
| 6291 | static umode_t | 
|---|
| 6292 | acr_is_visible(struct kobject *kobj, struct attribute *attr, int i) | 
|---|
| 6293 | { | 
|---|
| 6294 | struct device *dev = kobj_to_dev(kobj); | 
|---|
| 6295 |  | 
|---|
| 6296 | return intel_pmu_has_acr(pmu: dev_get_drvdata(dev)) ? attr->mode : 0; | 
|---|
| 6297 | } | 
|---|
| 6298 |  | 
|---|
| 6299 | static struct attribute_group group_events_td  = { | 
|---|
| 6300 | .name = "events", | 
|---|
| 6301 | .is_visible = td_is_visible, | 
|---|
| 6302 | }; | 
|---|
| 6303 |  | 
|---|
| 6304 | static struct attribute_group group_events_mem = { | 
|---|
| 6305 | .name       = "events", | 
|---|
| 6306 | .is_visible = mem_is_visible, | 
|---|
| 6307 | }; | 
|---|
| 6308 |  | 
|---|
| 6309 | static struct attribute_group group_events_tsx = { | 
|---|
| 6310 | .name       = "events", | 
|---|
| 6311 | .is_visible = tsx_is_visible, | 
|---|
| 6312 | }; | 
|---|
| 6313 |  | 
|---|
| 6314 | static struct attribute_group group_caps_gen = { | 
|---|
| 6315 | .name  = "caps", | 
|---|
| 6316 | .attrs = intel_pmu_caps_attrs, | 
|---|
| 6317 | }; | 
|---|
| 6318 |  | 
|---|
| 6319 | static struct attribute_group group_caps_lbr = { | 
|---|
| 6320 | .name       = "caps", | 
|---|
| 6321 | .attrs	    = lbr_attrs, | 
|---|
| 6322 | .is_visible = lbr_is_visible, | 
|---|
| 6323 | }; | 
|---|
| 6324 |  | 
|---|
| 6325 | static struct attribute_group  = { | 
|---|
| 6326 | .name       = "format", | 
|---|
| 6327 | .is_visible = exra_is_visible, | 
|---|
| 6328 | }; | 
|---|
| 6329 |  | 
|---|
| 6330 | static struct attribute_group  = { | 
|---|
| 6331 | .name       = "format", | 
|---|
| 6332 | .is_visible = exra_is_visible, | 
|---|
| 6333 | }; | 
|---|
| 6334 |  | 
|---|
| 6335 | static struct attribute_group group_format_evtsel_ext = { | 
|---|
| 6336 | .name       = "format", | 
|---|
| 6337 | .attrs      = format_evtsel_ext_attrs, | 
|---|
| 6338 | .is_visible = evtsel_ext_is_visible, | 
|---|
| 6339 | }; | 
|---|
| 6340 |  | 
|---|
| 6341 | static struct attribute_group group_format_acr = { | 
|---|
| 6342 | .name       = "format", | 
|---|
| 6343 | .attrs      = format_acr_attrs, | 
|---|
| 6344 | .is_visible = acr_is_visible, | 
|---|
| 6345 | }; | 
|---|
| 6346 |  | 
|---|
| 6347 | static struct attribute_group group_default = { | 
|---|
| 6348 | .attrs      = intel_pmu_attrs, | 
|---|
| 6349 | .is_visible = default_is_visible, | 
|---|
| 6350 | }; | 
|---|
| 6351 |  | 
|---|
| 6352 | static const struct attribute_group *attr_update[] = { | 
|---|
| 6353 | &group_events_td, | 
|---|
| 6354 | &group_events_mem, | 
|---|
| 6355 | &group_events_tsx, | 
|---|
| 6356 | &group_caps_gen, | 
|---|
| 6357 | &group_caps_lbr, | 
|---|
| 6358 | &group_format_extra, | 
|---|
| 6359 | &group_format_extra_skl, | 
|---|
| 6360 | &group_format_evtsel_ext, | 
|---|
| 6361 | &group_format_acr, | 
|---|
| 6362 | &group_default, | 
|---|
| 6363 | NULL, | 
|---|
| 6364 | }; | 
|---|
| 6365 |  | 
|---|
| 6366 | EVENT_ATTR_STR_HYBRID(slots,                 slots_adl, "event=0x00,umask=0x4",                       hybrid_big); | 
|---|
| 6367 | EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small); | 
|---|
| 6368 | EVENT_ATTR_STR_HYBRID(topdown-bad-spec,      td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small); | 
|---|
| 6369 | EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small); | 
|---|
| 6370 | EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small); | 
|---|
| 6371 | EVENT_ATTR_STR_HYBRID(topdown-heavy-ops,     td_heavy_ops_adl, "event=0x00,umask=0x84",                      hybrid_big); | 
|---|
| 6372 | EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85",                      hybrid_big); | 
|---|
| 6373 | EVENT_ATTR_STR_HYBRID(topdown-fetch-lat,     td_fetch_lat_adl, "event=0x00,umask=0x86",                      hybrid_big); | 
|---|
| 6374 | EVENT_ATTR_STR_HYBRID(topdown-mem-bound,     td_mem_bound_adl, "event=0x00,umask=0x87",                      hybrid_big); | 
|---|
| 6375 |  | 
|---|
| 6376 | static struct attribute *adl_hybrid_events_attrs[] = { | 
|---|
| 6377 | EVENT_PTR(slots_adl), | 
|---|
| 6378 | EVENT_PTR(td_retiring_adl), | 
|---|
| 6379 | EVENT_PTR(td_bad_spec_adl), | 
|---|
| 6380 | EVENT_PTR(td_fe_bound_adl), | 
|---|
| 6381 | EVENT_PTR(td_be_bound_adl), | 
|---|
| 6382 | EVENT_PTR(td_heavy_ops_adl), | 
|---|
| 6383 | EVENT_PTR(td_br_mis_adl), | 
|---|
| 6384 | EVENT_PTR(td_fetch_lat_adl), | 
|---|
| 6385 | EVENT_PTR(td_mem_bound_adl), | 
|---|
| 6386 | NULL, | 
|---|
| 6387 | }; | 
|---|
| 6388 |  | 
|---|
| 6389 | EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small); | 
|---|
| 6390 | EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small); | 
|---|
| 6391 | EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small); | 
|---|
| 6392 |  | 
|---|
| 6393 | static struct attribute *lnl_hybrid_events_attrs[] = { | 
|---|
| 6394 | EVENT_PTR(slots_adl), | 
|---|
| 6395 | EVENT_PTR(td_retiring_lnl), | 
|---|
| 6396 | EVENT_PTR(td_bad_spec_adl), | 
|---|
| 6397 | EVENT_PTR(td_fe_bound_lnl), | 
|---|
| 6398 | EVENT_PTR(td_be_bound_lnl), | 
|---|
| 6399 | EVENT_PTR(td_heavy_ops_adl), | 
|---|
| 6400 | EVENT_PTR(td_br_mis_adl), | 
|---|
| 6401 | EVENT_PTR(td_fetch_lat_adl), | 
|---|
| 6402 | EVENT_PTR(td_mem_bound_adl), | 
|---|
| 6403 | NULL | 
|---|
| 6404 | }; | 
|---|
| 6405 |  | 
|---|
| 6406 | /* The event string must be in PMU IDX order. */ | 
|---|
| 6407 | EVENT_ATTR_STR_HYBRID(topdown-retiring, | 
|---|
| 6408 | td_retiring_arl_h, | 
|---|
| 6409 | "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0", | 
|---|
| 6410 | hybrid_big_small_tiny); | 
|---|
| 6411 | EVENT_ATTR_STR_HYBRID(topdown-bad-spec, | 
|---|
| 6412 | td_bad_spec_arl_h, | 
|---|
| 6413 | "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0", | 
|---|
| 6414 | hybrid_big_small_tiny); | 
|---|
| 6415 | EVENT_ATTR_STR_HYBRID(topdown-fe-bound, | 
|---|
| 6416 | td_fe_bound_arl_h, | 
|---|
| 6417 | "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0", | 
|---|
| 6418 | hybrid_big_small_tiny); | 
|---|
| 6419 | EVENT_ATTR_STR_HYBRID(topdown-be-bound, | 
|---|
| 6420 | td_be_bound_arl_h, | 
|---|
| 6421 | "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0", | 
|---|
| 6422 | hybrid_big_small_tiny); | 
|---|
| 6423 |  | 
|---|
| 6424 | static struct attribute *arl_h_hybrid_events_attrs[] = { | 
|---|
| 6425 | EVENT_PTR(slots_adl), | 
|---|
| 6426 | EVENT_PTR(td_retiring_arl_h), | 
|---|
| 6427 | EVENT_PTR(td_bad_spec_arl_h), | 
|---|
| 6428 | EVENT_PTR(td_fe_bound_arl_h), | 
|---|
| 6429 | EVENT_PTR(td_be_bound_arl_h), | 
|---|
| 6430 | EVENT_PTR(td_heavy_ops_adl), | 
|---|
| 6431 | EVENT_PTR(td_br_mis_adl), | 
|---|
| 6432 | EVENT_PTR(td_fetch_lat_adl), | 
|---|
| 6433 | EVENT_PTR(td_mem_bound_adl), | 
|---|
| 6434 | NULL, | 
|---|
| 6435 | }; | 
|---|
| 6436 |  | 
|---|
| 6437 | /* Must be in IDX order */ | 
|---|
| 6438 | EVENT_ATTR_STR_HYBRID(mem-loads,     mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); | 
|---|
| 6439 | EVENT_ATTR_STR_HYBRID(mem-stores,    mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2",                 hybrid_big_small); | 
|---|
| 6440 | EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82",                                     hybrid_big); | 
|---|
| 6441 |  | 
|---|
| 6442 | static struct attribute *adl_hybrid_mem_attrs[] = { | 
|---|
| 6443 | EVENT_PTR(mem_ld_adl), | 
|---|
| 6444 | EVENT_PTR(mem_st_adl), | 
|---|
| 6445 | EVENT_PTR(mem_ld_aux_adl), | 
|---|
| 6446 | NULL, | 
|---|
| 6447 | }; | 
|---|
| 6448 |  | 
|---|
| 6449 | static struct attribute *mtl_hybrid_mem_attrs[] = { | 
|---|
| 6450 | EVENT_PTR(mem_ld_adl), | 
|---|
| 6451 | EVENT_PTR(mem_st_adl), | 
|---|
| 6452 | NULL | 
|---|
| 6453 | }; | 
|---|
| 6454 |  | 
|---|
| 6455 | EVENT_ATTR_STR_HYBRID(mem-loads, | 
|---|
| 6456 | mem_ld_arl_h, | 
|---|
| 6457 | "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3", | 
|---|
| 6458 | hybrid_big_small_tiny); | 
|---|
| 6459 | EVENT_ATTR_STR_HYBRID(mem-stores, | 
|---|
| 6460 | mem_st_arl_h, | 
|---|
| 6461 | "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6", | 
|---|
| 6462 | hybrid_big_small_tiny); | 
|---|
| 6463 |  | 
|---|
| 6464 | static struct attribute *arl_h_hybrid_mem_attrs[] = { | 
|---|
| 6465 | EVENT_PTR(mem_ld_arl_h), | 
|---|
| 6466 | EVENT_PTR(mem_st_arl_h), | 
|---|
| 6467 | NULL, | 
|---|
| 6468 | }; | 
|---|
| 6469 |  | 
|---|
| 6470 | EVENT_ATTR_STR_HYBRID(tx-start,          tx_start_adl, "event=0xc9,umask=0x1",          hybrid_big); | 
|---|
| 6471 | EVENT_ATTR_STR_HYBRID(tx-commit,         tx_commit_adl, "event=0xc9,umask=0x2",          hybrid_big); | 
|---|
| 6472 | EVENT_ATTR_STR_HYBRID(tx-abort,          tx_abort_adl, "event=0xc9,umask=0x4",          hybrid_big); | 
|---|
| 6473 | EVENT_ATTR_STR_HYBRID(tx-conflict,       tx_conflict_adl, "event=0x54,umask=0x1",          hybrid_big); | 
|---|
| 6474 | EVENT_ATTR_STR_HYBRID(cycles-t,          cycles_t_adl, "event=0x3c,in_tx=1",            hybrid_big); | 
|---|
| 6475 | EVENT_ATTR_STR_HYBRID(cycles-ct,         cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big); | 
|---|
| 6476 | EVENT_ATTR_STR_HYBRID(tx-capacity-read,  tx_capacity_read_adl, "event=0x54,umask=0x80",         hybrid_big); | 
|---|
| 6477 | EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2",          hybrid_big); | 
|---|
| 6478 |  | 
|---|
| 6479 | static struct attribute *adl_hybrid_tsx_attrs[] = { | 
|---|
| 6480 | EVENT_PTR(tx_start_adl), | 
|---|
| 6481 | EVENT_PTR(tx_abort_adl), | 
|---|
| 6482 | EVENT_PTR(tx_commit_adl), | 
|---|
| 6483 | EVENT_PTR(tx_capacity_read_adl), | 
|---|
| 6484 | EVENT_PTR(tx_capacity_write_adl), | 
|---|
| 6485 | EVENT_PTR(tx_conflict_adl), | 
|---|
| 6486 | EVENT_PTR(cycles_t_adl), | 
|---|
| 6487 | EVENT_PTR(cycles_ct_adl), | 
|---|
| 6488 | NULL, | 
|---|
| 6489 | }; | 
|---|
| 6490 |  | 
|---|
| 6491 | FORMAT_ATTR_HYBRID(in_tx,       hybrid_big); | 
|---|
| 6492 | FORMAT_ATTR_HYBRID(in_tx_cp,    hybrid_big); | 
|---|
| 6493 | FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny); | 
|---|
| 6494 | FORMAT_ATTR_HYBRID(ldlat,       hybrid_big_small_tiny); | 
|---|
| 6495 | FORMAT_ATTR_HYBRID(frontend,    hybrid_big); | 
|---|
| 6496 |  | 
|---|
| 6497 | #define ADL_HYBRID_RTM_FORMAT_ATTR	\ | 
|---|
| 6498 | FORMAT_HYBRID_PTR(in_tx),	\ | 
|---|
| 6499 | FORMAT_HYBRID_PTR(in_tx_cp) | 
|---|
| 6500 |  | 
|---|
| 6501 | #define ADL_HYBRID_FORMAT_ATTR		\ | 
|---|
| 6502 | FORMAT_HYBRID_PTR(offcore_rsp),	\ | 
|---|
| 6503 | FORMAT_HYBRID_PTR(ldlat),	\ | 
|---|
| 6504 | FORMAT_HYBRID_PTR(frontend) | 
|---|
| 6505 |  | 
|---|
| 6506 | static struct attribute *[] = { | 
|---|
| 6507 | ADL_HYBRID_RTM_FORMAT_ATTR, | 
|---|
| 6508 | ADL_HYBRID_FORMAT_ATTR, | 
|---|
| 6509 | NULL | 
|---|
| 6510 | }; | 
|---|
| 6511 |  | 
|---|
| 6512 | static struct attribute *[] = { | 
|---|
| 6513 | ADL_HYBRID_FORMAT_ATTR, | 
|---|
| 6514 | NULL | 
|---|
| 6515 | }; | 
|---|
| 6516 |  | 
|---|
| 6517 | FORMAT_ATTR_HYBRID(snoop_rsp,	hybrid_small_tiny); | 
|---|
| 6518 |  | 
|---|
| 6519 | static struct attribute *[] = { | 
|---|
| 6520 | ADL_HYBRID_RTM_FORMAT_ATTR, | 
|---|
| 6521 | ADL_HYBRID_FORMAT_ATTR, | 
|---|
| 6522 | FORMAT_HYBRID_PTR(snoop_rsp), | 
|---|
| 6523 | NULL | 
|---|
| 6524 | }; | 
|---|
| 6525 |  | 
|---|
| 6526 | static struct attribute *[] = { | 
|---|
| 6527 | ADL_HYBRID_FORMAT_ATTR, | 
|---|
| 6528 | FORMAT_HYBRID_PTR(snoop_rsp), | 
|---|
| 6529 | NULL | 
|---|
| 6530 | }; | 
|---|
| 6531 |  | 
|---|
| 6532 | static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) | 
|---|
| 6533 | { | 
|---|
| 6534 | struct device *dev = kobj_to_dev(kobj); | 
|---|
| 6535 | struct x86_hybrid_pmu *pmu = | 
|---|
| 6536 | container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); | 
|---|
| 6537 | struct perf_pmu_events_hybrid_attr *pmu_attr = | 
|---|
| 6538 | container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); | 
|---|
| 6539 |  | 
|---|
| 6540 | return pmu->pmu_type & pmu_attr->pmu_type; | 
|---|
| 6541 | } | 
|---|
| 6542 |  | 
|---|
| 6543 | static umode_t hybrid_events_is_visible(struct kobject *kobj, | 
|---|
| 6544 | struct attribute *attr, int i) | 
|---|
| 6545 | { | 
|---|
| 6546 | return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0; | 
|---|
| 6547 | } | 
|---|
| 6548 |  | 
|---|
| 6549 | static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu) | 
|---|
| 6550 | { | 
|---|
| 6551 | int cpu = cpumask_first(srcp: &pmu->supported_cpus); | 
|---|
| 6552 |  | 
|---|
| 6553 | return (cpu >= nr_cpu_ids) ? -1 : cpu; | 
|---|
| 6554 | } | 
|---|
| 6555 |  | 
|---|
| 6556 | static umode_t hybrid_tsx_is_visible(struct kobject *kobj, | 
|---|
| 6557 | struct attribute *attr, int i) | 
|---|
| 6558 | { | 
|---|
| 6559 | struct device *dev = kobj_to_dev(kobj); | 
|---|
| 6560 | struct x86_hybrid_pmu *pmu = | 
|---|
| 6561 | container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); | 
|---|
| 6562 | int cpu = hybrid_find_supported_cpu(pmu); | 
|---|
| 6563 |  | 
|---|
| 6564 | return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0; | 
|---|
| 6565 | } | 
|---|
| 6566 |  | 
|---|
| 6567 | static umode_t hybrid_format_is_visible(struct kobject *kobj, | 
|---|
| 6568 | struct attribute *attr, int i) | 
|---|
| 6569 | { | 
|---|
| 6570 | struct device *dev = kobj_to_dev(kobj); | 
|---|
| 6571 | struct x86_hybrid_pmu *pmu = | 
|---|
| 6572 | container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); | 
|---|
| 6573 | struct perf_pmu_format_hybrid_attr *pmu_attr = | 
|---|
| 6574 | container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); | 
|---|
| 6575 | int cpu = hybrid_find_supported_cpu(pmu); | 
|---|
| 6576 |  | 
|---|
| 6577 | return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0; | 
|---|
| 6578 | } | 
|---|
| 6579 |  | 
|---|
| 6580 | static umode_t hybrid_td_is_visible(struct kobject *kobj, | 
|---|
| 6581 | struct attribute *attr, int i) | 
|---|
| 6582 | { | 
|---|
| 6583 | struct device *dev = kobj_to_dev(kobj); | 
|---|
| 6584 | struct x86_hybrid_pmu *pmu = | 
|---|
| 6585 | container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); | 
|---|
| 6586 |  | 
|---|
| 6587 | if (!is_attr_for_this_pmu(kobj, attr)) | 
|---|
| 6588 | return 0; | 
|---|
| 6589 |  | 
|---|
| 6590 |  | 
|---|
| 6591 | /* Only the big core supports perf metrics */ | 
|---|
| 6592 | if (pmu->pmu_type == hybrid_big) | 
|---|
| 6593 | return pmu->intel_cap.perf_metrics ? attr->mode : 0; | 
|---|
| 6594 |  | 
|---|
| 6595 | return attr->mode; | 
|---|
| 6596 | } | 
|---|
| 6597 |  | 
|---|
| 6598 | static struct attribute_group hybrid_group_events_td  = { | 
|---|
| 6599 | .name		= "events", | 
|---|
| 6600 | .is_visible	= hybrid_td_is_visible, | 
|---|
| 6601 | }; | 
|---|
| 6602 |  | 
|---|
| 6603 | static struct attribute_group hybrid_group_events_mem = { | 
|---|
| 6604 | .name		= "events", | 
|---|
| 6605 | .is_visible	= hybrid_events_is_visible, | 
|---|
| 6606 | }; | 
|---|
| 6607 |  | 
|---|
| 6608 | static struct attribute_group hybrid_group_events_tsx = { | 
|---|
| 6609 | .name		= "events", | 
|---|
| 6610 | .is_visible	= hybrid_tsx_is_visible, | 
|---|
| 6611 | }; | 
|---|
| 6612 |  | 
|---|
| 6613 | static struct attribute_group  = { | 
|---|
| 6614 | .name		= "format", | 
|---|
| 6615 | .is_visible	= hybrid_format_is_visible, | 
|---|
| 6616 | }; | 
|---|
| 6617 |  | 
|---|
| 6618 | static ssize_t intel_hybrid_get_attr_cpus(struct device *dev, | 
|---|
| 6619 | struct device_attribute *attr, | 
|---|
| 6620 | char *buf) | 
|---|
| 6621 | { | 
|---|
| 6622 | struct x86_hybrid_pmu *pmu = | 
|---|
| 6623 | container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); | 
|---|
| 6624 |  | 
|---|
| 6625 | return cpumap_print_to_pagebuf(list: true, buf, mask: &pmu->supported_cpus); | 
|---|
| 6626 | } | 
|---|
| 6627 |  | 
|---|
| 6628 | static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL); | 
|---|
| 6629 | static struct attribute *intel_hybrid_cpus_attrs[] = { | 
|---|
| 6630 | &dev_attr_cpus.attr, | 
|---|
| 6631 | NULL, | 
|---|
| 6632 | }; | 
|---|
| 6633 |  | 
|---|
| 6634 | static struct attribute_group hybrid_group_cpus = { | 
|---|
| 6635 | .attrs		= intel_hybrid_cpus_attrs, | 
|---|
| 6636 | }; | 
|---|
| 6637 |  | 
|---|
| 6638 | static const struct attribute_group *hybrid_attr_update[] = { | 
|---|
| 6639 | &hybrid_group_events_td, | 
|---|
| 6640 | &hybrid_group_events_mem, | 
|---|
| 6641 | &hybrid_group_events_tsx, | 
|---|
| 6642 | &group_caps_gen, | 
|---|
| 6643 | &group_caps_lbr, | 
|---|
| 6644 | &hybrid_group_format_extra, | 
|---|
| 6645 | &group_format_evtsel_ext, | 
|---|
| 6646 | &group_format_acr, | 
|---|
| 6647 | &group_default, | 
|---|
| 6648 | &hybrid_group_cpus, | 
|---|
| 6649 | NULL, | 
|---|
| 6650 | }; | 
|---|
| 6651 |  | 
|---|
| 6652 | static struct attribute *empty_attrs; | 
|---|
| 6653 |  | 
|---|
| 6654 | static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, | 
|---|
| 6655 | u64 cntr_mask, | 
|---|
| 6656 | u64 fixed_cntr_mask, | 
|---|
| 6657 | u64 intel_ctrl) | 
|---|
| 6658 | { | 
|---|
| 6659 | struct event_constraint *c; | 
|---|
| 6660 |  | 
|---|
| 6661 | if (!event_constraints) | 
|---|
| 6662 | return; | 
|---|
| 6663 |  | 
|---|
| 6664 | /* | 
|---|
| 6665 | * event on fixed counter2 (REF_CYCLES) only works on this | 
|---|
| 6666 | * counter, so do not extend mask to generic counters | 
|---|
| 6667 | */ | 
|---|
| 6668 | for_each_event_constraint(c, event_constraints) { | 
|---|
| 6669 | /* | 
|---|
| 6670 | * Don't extend the topdown slots and metrics | 
|---|
| 6671 | * events to the generic counters. | 
|---|
| 6672 | */ | 
|---|
| 6673 | if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { | 
|---|
| 6674 | /* | 
|---|
| 6675 | * Disable topdown slots and metrics events, | 
|---|
| 6676 | * if slots event is not in CPUID. | 
|---|
| 6677 | */ | 
|---|
| 6678 | if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl)) | 
|---|
| 6679 | c->idxmsk64 = 0; | 
|---|
| 6680 | c->weight = hweight64(c->idxmsk64); | 
|---|
| 6681 | continue; | 
|---|
| 6682 | } | 
|---|
| 6683 |  | 
|---|
| 6684 | if (c->cmask == FIXED_EVENT_FLAGS) { | 
|---|
| 6685 | /* Disabled fixed counters which are not in CPUID */ | 
|---|
| 6686 | c->idxmsk64 &= intel_ctrl; | 
|---|
| 6687 |  | 
|---|
| 6688 | /* | 
|---|
| 6689 | * Don't extend the pseudo-encoding to the | 
|---|
| 6690 | * generic counters | 
|---|
| 6691 | */ | 
|---|
| 6692 | if (!use_fixed_pseudo_encoding(code: c->code)) | 
|---|
| 6693 | c->idxmsk64 |= cntr_mask; | 
|---|
| 6694 | } | 
|---|
| 6695 | c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED); | 
|---|
| 6696 | c->weight = hweight64(c->idxmsk64); | 
|---|
| 6697 | } | 
|---|
| 6698 | } | 
|---|
| 6699 |  | 
|---|
| 6700 | static void (struct extra_reg *) | 
|---|
| 6701 | { | 
|---|
| 6702 | struct extra_reg *er; | 
|---|
| 6703 |  | 
|---|
| 6704 | /* | 
|---|
| 6705 | * Access extra MSR may cause #GP under certain circumstances. | 
|---|
| 6706 | * E.g. KVM doesn't support offcore event | 
|---|
| 6707 | * Check all extra_regs here. | 
|---|
| 6708 | */ | 
|---|
| 6709 | if (!extra_regs) | 
|---|
| 6710 | return; | 
|---|
| 6711 |  | 
|---|
| 6712 | for (er = extra_regs; er->msr; er++) { | 
|---|
| 6713 | er->extra_msr_access = check_msr(msr: er->msr, mask: 0x11UL); | 
|---|
| 6714 | /* Disable LBR select mapping */ | 
|---|
| 6715 | if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) | 
|---|
| 6716 | x86_pmu.lbr_sel_map = NULL; | 
|---|
| 6717 | } | 
|---|
| 6718 | } | 
|---|
| 6719 |  | 
|---|
| 6720 | static inline int intel_pmu_v6_addr_offset(int index, bool eventsel) | 
|---|
| 6721 | { | 
|---|
| 6722 | return MSR_IA32_PMC_V6_STEP * index; | 
|---|
| 6723 | } | 
|---|
| 6724 |  | 
|---|
| 6725 | static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { | 
|---|
| 6726 | { hybrid_small, "cpu_atom"}, | 
|---|
| 6727 | { hybrid_big, "cpu_core"}, | 
|---|
| 6728 | { hybrid_tiny, "cpu_lowpower"}, | 
|---|
| 6729 | }; | 
|---|
| 6730 |  | 
|---|
| 6731 | static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) | 
|---|
| 6732 | { | 
|---|
| 6733 | unsigned long pmus_mask = pmus; | 
|---|
| 6734 | struct x86_hybrid_pmu *pmu; | 
|---|
| 6735 | int idx = 0, bit; | 
|---|
| 6736 |  | 
|---|
| 6737 | x86_pmu.num_hybrid_pmus = hweight_long(w: pmus_mask); | 
|---|
| 6738 | x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus, | 
|---|
| 6739 | sizeof(struct x86_hybrid_pmu), | 
|---|
| 6740 | GFP_KERNEL); | 
|---|
| 6741 | if (!x86_pmu.hybrid_pmu) | 
|---|
| 6742 | return -ENOMEM; | 
|---|
| 6743 |  | 
|---|
| 6744 | static_branch_enable(&perf_is_hybrid); | 
|---|
| 6745 | x86_pmu.filter = intel_pmu_filter; | 
|---|
| 6746 |  | 
|---|
| 6747 | for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) { | 
|---|
| 6748 | pmu = &x86_pmu.hybrid_pmu[idx++]; | 
|---|
| 6749 | pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id; | 
|---|
| 6750 | pmu->name = intel_hybrid_pmu_type_map[bit].name; | 
|---|
| 6751 |  | 
|---|
| 6752 | pmu->cntr_mask64 = x86_pmu.cntr_mask64; | 
|---|
| 6753 | pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; | 
|---|
| 6754 | pmu->pebs_events_mask = intel_pmu_pebs_mask(cntr_mask: pmu->cntr_mask64); | 
|---|
| 6755 | pmu->config_mask = X86_RAW_EVENT_MASK; | 
|---|
| 6756 | pmu->unconstrained = (struct event_constraint) | 
|---|
| 6757 | __EVENT_CONSTRAINT(0, pmu->cntr_mask64, | 
|---|
| 6758 | 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); | 
|---|
| 6759 |  | 
|---|
| 6760 | pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; | 
|---|
| 6761 | if (pmu->pmu_type & hybrid_small_tiny) { | 
|---|
| 6762 | pmu->intel_cap.perf_metrics = 0; | 
|---|
| 6763 | pmu->mid_ack = true; | 
|---|
| 6764 | } else if (pmu->pmu_type & hybrid_big) { | 
|---|
| 6765 | pmu->intel_cap.perf_metrics = 1; | 
|---|
| 6766 | pmu->late_ack = true; | 
|---|
| 6767 | } | 
|---|
| 6768 | } | 
|---|
| 6769 |  | 
|---|
| 6770 | return 0; | 
|---|
| 6771 | } | 
|---|
| 6772 |  | 
|---|
| 6773 | static __always_inline void intel_pmu_ref_cycles_ext(void) | 
|---|
| 6774 | { | 
|---|
| 6775 | if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED))) | 
|---|
| 6776 | intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c; | 
|---|
| 6777 | } | 
|---|
| 6778 |  | 
|---|
| 6779 | static __always_inline void intel_pmu_init_glc(struct pmu *pmu) | 
|---|
| 6780 | { | 
|---|
| 6781 | x86_pmu.late_ack = true; | 
|---|
| 6782 | x86_pmu.limit_period = glc_limit_period; | 
|---|
| 6783 | x86_pmu.pebs_aliases = NULL; | 
|---|
| 6784 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 6785 | x86_pmu.pebs_block = true; | 
|---|
| 6786 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 6787 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 6788 | x86_pmu.flags |= PMU_FL_INSTR_LATENCY; | 
|---|
| 6789 | x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); | 
|---|
| 6790 | x86_pmu.lbr_pt_coexist = true; | 
|---|
| 6791 | x86_pmu.num_topdown_events = 8; | 
|---|
| 6792 | static_call_update(intel_pmu_update_topdown_event, | 
|---|
| 6793 | &icl_update_topdown_event); | 
|---|
| 6794 | static_call_update(intel_pmu_set_topdown_event_period, | 
|---|
| 6795 | &icl_set_topdown_event_period); | 
|---|
| 6796 |  | 
|---|
| 6797 | memcpy(hybrid_var(pmu, hw_cache_event_ids), from: glc_hw_cache_event_ids, len: sizeof(hw_cache_event_ids)); | 
|---|
| 6798 | memcpy(hybrid_var(pmu, hw_cache_extra_regs), from: glc_hw_cache_extra_regs, len: sizeof(hw_cache_extra_regs)); | 
|---|
| 6799 | hybrid(pmu, event_constraints) = intel_glc_event_constraints; | 
|---|
| 6800 | hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; | 
|---|
| 6801 |  | 
|---|
| 6802 | intel_pmu_ref_cycles_ext(); | 
|---|
| 6803 | } | 
|---|
| 6804 |  | 
|---|
| 6805 | static __always_inline void intel_pmu_init_grt(struct pmu *pmu) | 
|---|
| 6806 | { | 
|---|
| 6807 | x86_pmu.mid_ack = true; | 
|---|
| 6808 | x86_pmu.limit_period = glc_limit_period; | 
|---|
| 6809 | x86_pmu.pebs_aliases = NULL; | 
|---|
| 6810 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 6811 | x86_pmu.pebs_block = true; | 
|---|
| 6812 | x86_pmu.lbr_pt_coexist = true; | 
|---|
| 6813 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 6814 | x86_pmu.flags |= PMU_FL_INSTR_LATENCY; | 
|---|
| 6815 |  | 
|---|
| 6816 | memcpy(hybrid_var(pmu, hw_cache_event_ids), from: glp_hw_cache_event_ids, len: sizeof(hw_cache_event_ids)); | 
|---|
| 6817 | memcpy(hybrid_var(pmu, hw_cache_extra_regs), from: tnt_hw_cache_extra_regs, len: sizeof(hw_cache_extra_regs)); | 
|---|
| 6818 | hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; | 
|---|
| 6819 | hybrid(pmu, event_constraints) = intel_grt_event_constraints; | 
|---|
| 6820 | hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints; | 
|---|
| 6821 | hybrid(pmu, extra_regs) = intel_grt_extra_regs; | 
|---|
| 6822 |  | 
|---|
| 6823 | intel_pmu_ref_cycles_ext(); | 
|---|
| 6824 | } | 
|---|
| 6825 |  | 
|---|
| 6826 | static __always_inline void intel_pmu_init_lnc(struct pmu *pmu) | 
|---|
| 6827 | { | 
|---|
| 6828 | intel_pmu_init_glc(pmu); | 
|---|
| 6829 | hybrid(pmu, event_constraints) = intel_lnc_event_constraints; | 
|---|
| 6830 | hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints; | 
|---|
| 6831 | hybrid(pmu, extra_regs) = intel_lnc_extra_regs; | 
|---|
| 6832 | } | 
|---|
| 6833 |  | 
|---|
| 6834 | static __always_inline void intel_pmu_init_skt(struct pmu *pmu) | 
|---|
| 6835 | { | 
|---|
| 6836 | intel_pmu_init_grt(pmu); | 
|---|
| 6837 | hybrid(pmu, event_constraints) = intel_skt_event_constraints; | 
|---|
| 6838 | hybrid(pmu, extra_regs) = intel_cmt_extra_regs; | 
|---|
| 6839 | static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr); | 
|---|
| 6840 | } | 
|---|
| 6841 |  | 
|---|
| 6842 | __init int intel_pmu_init(void) | 
|---|
| 6843 | { | 
|---|
| 6844 | struct attribute ** = &empty_attrs; | 
|---|
| 6845 | struct attribute ** = &empty_attrs; | 
|---|
| 6846 | struct attribute **td_attr    = &empty_attrs; | 
|---|
| 6847 | struct attribute **mem_attr   = &empty_attrs; | 
|---|
| 6848 | struct attribute **tsx_attr   = &empty_attrs; | 
|---|
| 6849 | union cpuid10_edx edx; | 
|---|
| 6850 | union cpuid10_eax eax; | 
|---|
| 6851 | union cpuid10_ebx ebx; | 
|---|
| 6852 | unsigned int fixed_mask; | 
|---|
| 6853 | bool pmem = false; | 
|---|
| 6854 | int version, i; | 
|---|
| 6855 | char *name; | 
|---|
| 6856 | struct x86_hybrid_pmu *pmu; | 
|---|
| 6857 |  | 
|---|
| 6858 | /* Architectural Perfmon was introduced starting with Core "Yonah" */ | 
|---|
| 6859 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 
|---|
| 6860 | switch (boot_cpu_data.x86) { | 
|---|
| 6861 | case  6: | 
|---|
| 6862 | if (boot_cpu_data.x86_vfm < INTEL_CORE_YONAH) | 
|---|
| 6863 | return p6_pmu_init(); | 
|---|
| 6864 | break; | 
|---|
| 6865 | case 11: | 
|---|
| 6866 | return knc_pmu_init(); | 
|---|
| 6867 | case 15: | 
|---|
| 6868 | return p4_pmu_init(); | 
|---|
| 6869 | } | 
|---|
| 6870 |  | 
|---|
| 6871 | pr_cont( "unsupported CPU family %d model %d ", | 
|---|
| 6872 | boot_cpu_data.x86, boot_cpu_data.x86_model); | 
|---|
| 6873 | return -ENODEV; | 
|---|
| 6874 | } | 
|---|
| 6875 |  | 
|---|
| 6876 | /* | 
|---|
| 6877 | * Check whether the Architectural PerfMon supports | 
|---|
| 6878 | * Branch Misses Retired hw_event or not. | 
|---|
| 6879 | */ | 
|---|
| 6880 | cpuid(op: 10, eax: &eax.full, ebx: &ebx.full, ecx: &fixed_mask, edx: &edx.full); | 
|---|
| 6881 | if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) | 
|---|
| 6882 | return -ENODEV; | 
|---|
| 6883 |  | 
|---|
| 6884 | version = eax.split.version_id; | 
|---|
| 6885 | if (version < 2) | 
|---|
| 6886 | x86_pmu = core_pmu; | 
|---|
| 6887 | else | 
|---|
| 6888 | x86_pmu = intel_pmu; | 
|---|
| 6889 |  | 
|---|
| 6890 | x86_pmu.version			= version; | 
|---|
| 6891 | x86_pmu.cntr_mask64		= GENMASK_ULL(eax.split.num_counters - 1, 0); | 
|---|
| 6892 | x86_pmu.cntval_bits		= eax.split.bit_width; | 
|---|
| 6893 | x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1; | 
|---|
| 6894 |  | 
|---|
| 6895 | x86_pmu.events_maskl		= ebx.full; | 
|---|
| 6896 | x86_pmu.events_mask_len		= eax.split.mask_length; | 
|---|
| 6897 |  | 
|---|
| 6898 | x86_pmu.pebs_events_mask	= intel_pmu_pebs_mask(cntr_mask: x86_pmu.cntr_mask64); | 
|---|
| 6899 | x86_pmu.pebs_capable		= PEBS_COUNTER_MASK; | 
|---|
| 6900 | x86_pmu.config_mask		= X86_RAW_EVENT_MASK; | 
|---|
| 6901 |  | 
|---|
| 6902 | /* | 
|---|
| 6903 | * Quirk: v2 perfmon does not report fixed-purpose events, so | 
|---|
| 6904 | * assume at least 3 events, when not running in a hypervisor: | 
|---|
| 6905 | */ | 
|---|
| 6906 | if (version > 1 && version < 5) { | 
|---|
| 6907 | int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR); | 
|---|
| 6908 |  | 
|---|
| 6909 | x86_pmu.fixed_cntr_mask64 = | 
|---|
| 6910 | GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0); | 
|---|
| 6911 | } else if (version >= 5) | 
|---|
| 6912 | x86_pmu.fixed_cntr_mask64 = fixed_mask; | 
|---|
| 6913 |  | 
|---|
| 6914 | if (boot_cpu_has(X86_FEATURE_PDCM)) { | 
|---|
| 6915 | u64 capabilities; | 
|---|
| 6916 |  | 
|---|
| 6917 | rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities); | 
|---|
| 6918 | x86_pmu.intel_cap.capabilities = capabilities; | 
|---|
| 6919 | } | 
|---|
| 6920 |  | 
|---|
| 6921 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) { | 
|---|
| 6922 | x86_pmu.lbr_reset = intel_pmu_lbr_reset_32; | 
|---|
| 6923 | x86_pmu.lbr_read = intel_pmu_lbr_read_32; | 
|---|
| 6924 | } | 
|---|
| 6925 |  | 
|---|
| 6926 | if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) | 
|---|
| 6927 | intel_pmu_arch_lbr_init(); | 
|---|
| 6928 |  | 
|---|
| 6929 | intel_pebs_init(); | 
|---|
| 6930 |  | 
|---|
| 6931 | x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ | 
|---|
| 6932 |  | 
|---|
| 6933 | if (version >= 5) { | 
|---|
| 6934 | x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated; | 
|---|
| 6935 | if (x86_pmu.intel_cap.anythread_deprecated) | 
|---|
| 6936 | pr_cont( " AnyThread deprecated, "); | 
|---|
| 6937 | } | 
|---|
| 6938 |  | 
|---|
| 6939 | /* | 
|---|
| 6940 | * Many features on and after V6 require dynamic constraint, | 
|---|
| 6941 | * e.g., Arch PEBS, ACR. | 
|---|
| 6942 | */ | 
|---|
| 6943 | if (version >= 6) | 
|---|
| 6944 | x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT; | 
|---|
| 6945 | /* | 
|---|
| 6946 | * Install the hw-cache-events table: | 
|---|
| 6947 | */ | 
|---|
| 6948 | switch (boot_cpu_data.x86_vfm) { | 
|---|
| 6949 | case INTEL_CORE_YONAH: | 
|---|
| 6950 | pr_cont( "Core events, "); | 
|---|
| 6951 | name = "core"; | 
|---|
| 6952 | break; | 
|---|
| 6953 |  | 
|---|
| 6954 | case INTEL_CORE2_MEROM: | 
|---|
| 6955 | x86_add_quirk(intel_clovertown_quirk); | 
|---|
| 6956 | fallthrough; | 
|---|
| 6957 |  | 
|---|
| 6958 | case INTEL_CORE2_MEROM_L: | 
|---|
| 6959 | case INTEL_CORE2_PENRYN: | 
|---|
| 6960 | case INTEL_CORE2_DUNNINGTON: | 
|---|
| 6961 | memcpy(to: hw_cache_event_ids, from: core2_hw_cache_event_ids, | 
|---|
| 6962 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 6963 |  | 
|---|
| 6964 | intel_pmu_lbr_init_core(); | 
|---|
| 6965 |  | 
|---|
| 6966 | x86_pmu.event_constraints = intel_core2_event_constraints; | 
|---|
| 6967 | x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; | 
|---|
| 6968 | pr_cont( "Core2 events, "); | 
|---|
| 6969 | name = "core2"; | 
|---|
| 6970 | break; | 
|---|
| 6971 |  | 
|---|
| 6972 | case INTEL_NEHALEM: | 
|---|
| 6973 | case INTEL_NEHALEM_EP: | 
|---|
| 6974 | case INTEL_NEHALEM_EX: | 
|---|
| 6975 | memcpy(to: hw_cache_event_ids, from: nehalem_hw_cache_event_ids, | 
|---|
| 6976 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 6977 | memcpy(to: hw_cache_extra_regs, from: nehalem_hw_cache_extra_regs, | 
|---|
| 6978 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 6979 |  | 
|---|
| 6980 | intel_pmu_lbr_init_nhm(); | 
|---|
| 6981 |  | 
|---|
| 6982 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | 
|---|
| 6983 | x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; | 
|---|
| 6984 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 
|---|
| 6985 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | 
|---|
| 6986 | x86_pmu.limit_period = nhm_limit_period; | 
|---|
| 6987 |  | 
|---|
| 6988 | mem_attr = nhm_mem_events_attrs; | 
|---|
| 6989 |  | 
|---|
| 6990 | /* UOPS_ISSUED.STALLED_CYCLES */ | 
|---|
| 6991 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = | 
|---|
| 6992 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | 
|---|
| 6993 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | 
|---|
| 6994 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = | 
|---|
| 6995 | X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); | 
|---|
| 6996 |  | 
|---|
| 6997 | intel_pmu_pebs_data_source_nhm(); | 
|---|
| 6998 | x86_add_quirk(intel_nehalem_quirk); | 
|---|
| 6999 | x86_pmu.pebs_no_tlb = 1; | 
|---|
| 7000 | extra_attr = nhm_format_attr; | 
|---|
| 7001 |  | 
|---|
| 7002 | pr_cont( "Nehalem events, "); | 
|---|
| 7003 | name = "nehalem"; | 
|---|
| 7004 | break; | 
|---|
| 7005 |  | 
|---|
| 7006 | case INTEL_ATOM_BONNELL: | 
|---|
| 7007 | case INTEL_ATOM_BONNELL_MID: | 
|---|
| 7008 | case INTEL_ATOM_SALTWELL: | 
|---|
| 7009 | case INTEL_ATOM_SALTWELL_MID: | 
|---|
| 7010 | case INTEL_ATOM_SALTWELL_TABLET: | 
|---|
| 7011 | memcpy(to: hw_cache_event_ids, from: atom_hw_cache_event_ids, | 
|---|
| 7012 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7013 |  | 
|---|
| 7014 | intel_pmu_lbr_init_atom(); | 
|---|
| 7015 |  | 
|---|
| 7016 | x86_pmu.event_constraints = intel_gen_event_constraints; | 
|---|
| 7017 | x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; | 
|---|
| 7018 | x86_pmu.pebs_aliases = intel_pebs_aliases_core2; | 
|---|
| 7019 | pr_cont( "Atom events, "); | 
|---|
| 7020 | name = "bonnell"; | 
|---|
| 7021 | break; | 
|---|
| 7022 |  | 
|---|
| 7023 | case INTEL_ATOM_SILVERMONT: | 
|---|
| 7024 | case INTEL_ATOM_SILVERMONT_D: | 
|---|
| 7025 | case INTEL_ATOM_SILVERMONT_MID: | 
|---|
| 7026 | case INTEL_ATOM_AIRMONT: | 
|---|
| 7027 | case INTEL_ATOM_SILVERMONT_MID2: | 
|---|
| 7028 | memcpy(to: hw_cache_event_ids, from: slm_hw_cache_event_ids, | 
|---|
| 7029 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7030 | memcpy(to: hw_cache_extra_regs, from: slm_hw_cache_extra_regs, | 
|---|
| 7031 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7032 |  | 
|---|
| 7033 | intel_pmu_lbr_init_slm(); | 
|---|
| 7034 |  | 
|---|
| 7035 | x86_pmu.event_constraints = intel_slm_event_constraints; | 
|---|
| 7036 | x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; | 
|---|
| 7037 | x86_pmu.extra_regs = intel_slm_extra_regs; | 
|---|
| 7038 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7039 | td_attr = slm_events_attrs; | 
|---|
| 7040 | extra_attr = slm_format_attr; | 
|---|
| 7041 | pr_cont( "Silvermont events, "); | 
|---|
| 7042 | name = "silvermont"; | 
|---|
| 7043 | break; | 
|---|
| 7044 |  | 
|---|
| 7045 | case INTEL_ATOM_GOLDMONT: | 
|---|
| 7046 | case INTEL_ATOM_GOLDMONT_D: | 
|---|
| 7047 | memcpy(to: hw_cache_event_ids, from: glm_hw_cache_event_ids, | 
|---|
| 7048 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7049 | memcpy(to: hw_cache_extra_regs, from: glm_hw_cache_extra_regs, | 
|---|
| 7050 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7051 |  | 
|---|
| 7052 | intel_pmu_lbr_init_skl(); | 
|---|
| 7053 |  | 
|---|
| 7054 | x86_pmu.event_constraints = intel_slm_event_constraints; | 
|---|
| 7055 | x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints; | 
|---|
| 7056 | x86_pmu.extra_regs = intel_glm_extra_regs; | 
|---|
| 7057 | /* | 
|---|
| 7058 | * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS | 
|---|
| 7059 | * for precise cycles. | 
|---|
| 7060 | * :pp is identical to :ppp | 
|---|
| 7061 | */ | 
|---|
| 7062 | x86_pmu.pebs_aliases = NULL; | 
|---|
| 7063 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7064 | x86_pmu.lbr_pt_coexist = true; | 
|---|
| 7065 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7066 | td_attr = glm_events_attrs; | 
|---|
| 7067 | extra_attr = slm_format_attr; | 
|---|
| 7068 | pr_cont( "Goldmont events, "); | 
|---|
| 7069 | name = "goldmont"; | 
|---|
| 7070 | break; | 
|---|
| 7071 |  | 
|---|
| 7072 | case INTEL_ATOM_GOLDMONT_PLUS: | 
|---|
| 7073 | memcpy(to: hw_cache_event_ids, from: glp_hw_cache_event_ids, | 
|---|
| 7074 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7075 | memcpy(to: hw_cache_extra_regs, from: glp_hw_cache_extra_regs, | 
|---|
| 7076 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7077 |  | 
|---|
| 7078 | intel_pmu_lbr_init_skl(); | 
|---|
| 7079 |  | 
|---|
| 7080 | x86_pmu.event_constraints = intel_slm_event_constraints; | 
|---|
| 7081 | x86_pmu.extra_regs = intel_glm_extra_regs; | 
|---|
| 7082 | /* | 
|---|
| 7083 | * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS | 
|---|
| 7084 | * for precise cycles. | 
|---|
| 7085 | */ | 
|---|
| 7086 | x86_pmu.pebs_aliases = NULL; | 
|---|
| 7087 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7088 | x86_pmu.lbr_pt_coexist = true; | 
|---|
| 7089 | x86_pmu.pebs_capable = ~0ULL; | 
|---|
| 7090 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7091 | x86_pmu.flags |= PMU_FL_PEBS_ALL; | 
|---|
| 7092 | x86_pmu.get_event_constraints = glp_get_event_constraints; | 
|---|
| 7093 | td_attr = glm_events_attrs; | 
|---|
| 7094 | /* Goldmont Plus has 4-wide pipeline */ | 
|---|
| 7095 | event_attr_td_total_slots_scale_glm.event_str = "4"; | 
|---|
| 7096 | extra_attr = slm_format_attr; | 
|---|
| 7097 | pr_cont( "Goldmont plus events, "); | 
|---|
| 7098 | name = "goldmont_plus"; | 
|---|
| 7099 | break; | 
|---|
| 7100 |  | 
|---|
| 7101 | case INTEL_ATOM_TREMONT_D: | 
|---|
| 7102 | case INTEL_ATOM_TREMONT: | 
|---|
| 7103 | case INTEL_ATOM_TREMONT_L: | 
|---|
| 7104 | x86_pmu.late_ack = true; | 
|---|
| 7105 | memcpy(to: hw_cache_event_ids, from: glp_hw_cache_event_ids, | 
|---|
| 7106 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7107 | memcpy(to: hw_cache_extra_regs, from: tnt_hw_cache_extra_regs, | 
|---|
| 7108 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7109 | hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; | 
|---|
| 7110 |  | 
|---|
| 7111 | intel_pmu_lbr_init_skl(); | 
|---|
| 7112 |  | 
|---|
| 7113 | x86_pmu.event_constraints = intel_slm_event_constraints; | 
|---|
| 7114 | x86_pmu.extra_regs = intel_tnt_extra_regs; | 
|---|
| 7115 | /* | 
|---|
| 7116 | * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS | 
|---|
| 7117 | * for precise cycles. | 
|---|
| 7118 | */ | 
|---|
| 7119 | x86_pmu.pebs_aliases = NULL; | 
|---|
| 7120 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7121 | x86_pmu.lbr_pt_coexist = true; | 
|---|
| 7122 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7123 | x86_pmu.get_event_constraints = tnt_get_event_constraints; | 
|---|
| 7124 | td_attr = tnt_events_attrs; | 
|---|
| 7125 | extra_attr = slm_format_attr; | 
|---|
| 7126 | pr_cont( "Tremont events, "); | 
|---|
| 7127 | name = "Tremont"; | 
|---|
| 7128 | break; | 
|---|
| 7129 |  | 
|---|
| 7130 | case INTEL_ATOM_GRACEMONT: | 
|---|
| 7131 | intel_pmu_init_grt(NULL); | 
|---|
| 7132 | intel_pmu_pebs_data_source_grt(); | 
|---|
| 7133 | x86_pmu.pebs_latency_data = grt_latency_data; | 
|---|
| 7134 | x86_pmu.get_event_constraints = tnt_get_event_constraints; | 
|---|
| 7135 | td_attr = tnt_events_attrs; | 
|---|
| 7136 | mem_attr = grt_mem_attrs; | 
|---|
| 7137 | extra_attr = nhm_format_attr; | 
|---|
| 7138 | pr_cont( "Gracemont events, "); | 
|---|
| 7139 | name = "gracemont"; | 
|---|
| 7140 | break; | 
|---|
| 7141 |  | 
|---|
| 7142 | case INTEL_ATOM_CRESTMONT: | 
|---|
| 7143 | case INTEL_ATOM_CRESTMONT_X: | 
|---|
| 7144 | intel_pmu_init_grt(NULL); | 
|---|
| 7145 | x86_pmu.extra_regs = intel_cmt_extra_regs; | 
|---|
| 7146 | intel_pmu_pebs_data_source_cmt(); | 
|---|
| 7147 | x86_pmu.pebs_latency_data = cmt_latency_data; | 
|---|
| 7148 | x86_pmu.get_event_constraints = cmt_get_event_constraints; | 
|---|
| 7149 | td_attr = cmt_events_attrs; | 
|---|
| 7150 | mem_attr = grt_mem_attrs; | 
|---|
| 7151 | extra_attr = cmt_format_attr; | 
|---|
| 7152 | pr_cont( "Crestmont events, "); | 
|---|
| 7153 | name = "crestmont"; | 
|---|
| 7154 | break; | 
|---|
| 7155 |  | 
|---|
| 7156 | case INTEL_ATOM_DARKMONT_X: | 
|---|
| 7157 | intel_pmu_init_skt(NULL); | 
|---|
| 7158 | intel_pmu_pebs_data_source_cmt(); | 
|---|
| 7159 | x86_pmu.pebs_latency_data = cmt_latency_data; | 
|---|
| 7160 | x86_pmu.get_event_constraints = cmt_get_event_constraints; | 
|---|
| 7161 | td_attr = skt_events_attrs; | 
|---|
| 7162 | mem_attr = grt_mem_attrs; | 
|---|
| 7163 | extra_attr = cmt_format_attr; | 
|---|
| 7164 | pr_cont( "Darkmont events, "); | 
|---|
| 7165 | name = "darkmont"; | 
|---|
| 7166 | break; | 
|---|
| 7167 |  | 
|---|
| 7168 | case INTEL_WESTMERE: | 
|---|
| 7169 | case INTEL_WESTMERE_EP: | 
|---|
| 7170 | case INTEL_WESTMERE_EX: | 
|---|
| 7171 | memcpy(to: hw_cache_event_ids, from: westmere_hw_cache_event_ids, | 
|---|
| 7172 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7173 | memcpy(to: hw_cache_extra_regs, from: nehalem_hw_cache_extra_regs, | 
|---|
| 7174 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7175 |  | 
|---|
| 7176 | intel_pmu_lbr_init_nhm(); | 
|---|
| 7177 |  | 
|---|
| 7178 | x86_pmu.event_constraints = intel_westmere_event_constraints; | 
|---|
| 7179 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 
|---|
| 7180 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; | 
|---|
| 7181 | x86_pmu.extra_regs = intel_westmere_extra_regs; | 
|---|
| 7182 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7183 |  | 
|---|
| 7184 | mem_attr = nhm_mem_events_attrs; | 
|---|
| 7185 |  | 
|---|
| 7186 | /* UOPS_ISSUED.STALLED_CYCLES */ | 
|---|
| 7187 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = | 
|---|
| 7188 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | 
|---|
| 7189 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | 
|---|
| 7190 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = | 
|---|
| 7191 | X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); | 
|---|
| 7192 |  | 
|---|
| 7193 | intel_pmu_pebs_data_source_nhm(); | 
|---|
| 7194 | extra_attr = nhm_format_attr; | 
|---|
| 7195 | pr_cont( "Westmere events, "); | 
|---|
| 7196 | name = "westmere"; | 
|---|
| 7197 | break; | 
|---|
| 7198 |  | 
|---|
| 7199 | case INTEL_SANDYBRIDGE: | 
|---|
| 7200 | case INTEL_SANDYBRIDGE_X: | 
|---|
| 7201 | x86_add_quirk(intel_sandybridge_quirk); | 
|---|
| 7202 | x86_add_quirk(intel_ht_bug); | 
|---|
| 7203 | memcpy(to: hw_cache_event_ids, from: snb_hw_cache_event_ids, | 
|---|
| 7204 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7205 | memcpy(to: hw_cache_extra_regs, from: snb_hw_cache_extra_regs, | 
|---|
| 7206 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7207 |  | 
|---|
| 7208 | intel_pmu_lbr_init_snb(); | 
|---|
| 7209 |  | 
|---|
| 7210 | x86_pmu.event_constraints = intel_snb_event_constraints; | 
|---|
| 7211 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 
|---|
| 7212 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | 
|---|
| 7213 | if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X) | 
|---|
| 7214 | x86_pmu.extra_regs = intel_snbep_extra_regs; | 
|---|
| 7215 | else | 
|---|
| 7216 | x86_pmu.extra_regs = intel_snb_extra_regs; | 
|---|
| 7217 |  | 
|---|
| 7218 |  | 
|---|
| 7219 | /* all extra regs are per-cpu when HT is on */ | 
|---|
| 7220 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7221 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 7222 |  | 
|---|
| 7223 | td_attr  = snb_events_attrs; | 
|---|
| 7224 | mem_attr = snb_mem_events_attrs; | 
|---|
| 7225 |  | 
|---|
| 7226 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ | 
|---|
| 7227 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = | 
|---|
| 7228 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | 
|---|
| 7229 | /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ | 
|---|
| 7230 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = | 
|---|
| 7231 | X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); | 
|---|
| 7232 |  | 
|---|
| 7233 | extra_attr = nhm_format_attr; | 
|---|
| 7234 |  | 
|---|
| 7235 | pr_cont( "SandyBridge events, "); | 
|---|
| 7236 | name = "sandybridge"; | 
|---|
| 7237 | break; | 
|---|
| 7238 |  | 
|---|
| 7239 | case INTEL_IVYBRIDGE: | 
|---|
| 7240 | case INTEL_IVYBRIDGE_X: | 
|---|
| 7241 | x86_add_quirk(intel_ht_bug); | 
|---|
| 7242 | memcpy(to: hw_cache_event_ids, from: snb_hw_cache_event_ids, | 
|---|
| 7243 | len: sizeof(hw_cache_event_ids)); | 
|---|
| 7244 | /* dTLB-load-misses on IVB is different than SNB */ | 
|---|
| 7245 | hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ | 
|---|
| 7246 |  | 
|---|
| 7247 | memcpy(to: hw_cache_extra_regs, from: snb_hw_cache_extra_regs, | 
|---|
| 7248 | len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7249 |  | 
|---|
| 7250 | intel_pmu_lbr_init_snb(); | 
|---|
| 7251 |  | 
|---|
| 7252 | x86_pmu.event_constraints = intel_ivb_event_constraints; | 
|---|
| 7253 | x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; | 
|---|
| 7254 | x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; | 
|---|
| 7255 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7256 | if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X) | 
|---|
| 7257 | x86_pmu.extra_regs = intel_snbep_extra_regs; | 
|---|
| 7258 | else | 
|---|
| 7259 | x86_pmu.extra_regs = intel_snb_extra_regs; | 
|---|
| 7260 | /* all extra regs are per-cpu when HT is on */ | 
|---|
| 7261 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7262 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 7263 |  | 
|---|
| 7264 | td_attr  = snb_events_attrs; | 
|---|
| 7265 | mem_attr = snb_mem_events_attrs; | 
|---|
| 7266 |  | 
|---|
| 7267 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ | 
|---|
| 7268 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = | 
|---|
| 7269 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | 
|---|
| 7270 |  | 
|---|
| 7271 | extra_attr = nhm_format_attr; | 
|---|
| 7272 |  | 
|---|
| 7273 | pr_cont( "IvyBridge events, "); | 
|---|
| 7274 | name = "ivybridge"; | 
|---|
| 7275 | break; | 
|---|
| 7276 |  | 
|---|
| 7277 |  | 
|---|
| 7278 | case INTEL_HASWELL: | 
|---|
| 7279 | case INTEL_HASWELL_X: | 
|---|
| 7280 | case INTEL_HASWELL_L: | 
|---|
| 7281 | case INTEL_HASWELL_G: | 
|---|
| 7282 | x86_add_quirk(intel_ht_bug); | 
|---|
| 7283 | x86_add_quirk(intel_pebs_isolation_quirk); | 
|---|
| 7284 | x86_pmu.late_ack = true; | 
|---|
| 7285 | memcpy(to: hw_cache_event_ids, from: hsw_hw_cache_event_ids, len: sizeof(hw_cache_event_ids)); | 
|---|
| 7286 | memcpy(to: hw_cache_extra_regs, from: hsw_hw_cache_extra_regs, len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7287 |  | 
|---|
| 7288 | intel_pmu_lbr_init_hsw(); | 
|---|
| 7289 |  | 
|---|
| 7290 | x86_pmu.event_constraints = intel_hsw_event_constraints; | 
|---|
| 7291 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; | 
|---|
| 7292 | x86_pmu.extra_regs = intel_snbep_extra_regs; | 
|---|
| 7293 | x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; | 
|---|
| 7294 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7295 | /* all extra regs are per-cpu when HT is on */ | 
|---|
| 7296 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7297 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 7298 |  | 
|---|
| 7299 | x86_pmu.hw_config = hsw_hw_config; | 
|---|
| 7300 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | 
|---|
| 7301 | x86_pmu.limit_period = hsw_limit_period; | 
|---|
| 7302 | x86_pmu.lbr_double_abort = true; | 
|---|
| 7303 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7304 | hsw_format_attr : nhm_format_attr; | 
|---|
| 7305 | td_attr  = hsw_events_attrs; | 
|---|
| 7306 | mem_attr = hsw_mem_events_attrs; | 
|---|
| 7307 | tsx_attr = hsw_tsx_events_attrs; | 
|---|
| 7308 | pr_cont( "Haswell events, "); | 
|---|
| 7309 | name = "haswell"; | 
|---|
| 7310 | break; | 
|---|
| 7311 |  | 
|---|
| 7312 | case INTEL_BROADWELL: | 
|---|
| 7313 | case INTEL_BROADWELL_D: | 
|---|
| 7314 | case INTEL_BROADWELL_G: | 
|---|
| 7315 | case INTEL_BROADWELL_X: | 
|---|
| 7316 | x86_add_quirk(intel_pebs_isolation_quirk); | 
|---|
| 7317 | x86_pmu.late_ack = true; | 
|---|
| 7318 | memcpy(to: hw_cache_event_ids, from: hsw_hw_cache_event_ids, len: sizeof(hw_cache_event_ids)); | 
|---|
| 7319 | memcpy(to: hw_cache_extra_regs, from: hsw_hw_cache_extra_regs, len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7320 |  | 
|---|
| 7321 | /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */ | 
|---|
| 7322 | hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ | | 
|---|
| 7323 | BDW_L3_MISS|HSW_SNOOP_DRAM; | 
|---|
| 7324 | hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS| | 
|---|
| 7325 | HSW_SNOOP_DRAM; | 
|---|
| 7326 | hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ| | 
|---|
| 7327 | BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; | 
|---|
| 7328 | hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| | 
|---|
| 7329 | BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; | 
|---|
| 7330 |  | 
|---|
| 7331 | intel_pmu_lbr_init_hsw(); | 
|---|
| 7332 |  | 
|---|
| 7333 | x86_pmu.event_constraints = intel_bdw_event_constraints; | 
|---|
| 7334 | x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints; | 
|---|
| 7335 | x86_pmu.extra_regs = intel_snbep_extra_regs; | 
|---|
| 7336 | x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; | 
|---|
| 7337 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7338 | /* all extra regs are per-cpu when HT is on */ | 
|---|
| 7339 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7340 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 7341 |  | 
|---|
| 7342 | x86_pmu.hw_config = hsw_hw_config; | 
|---|
| 7343 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | 
|---|
| 7344 | x86_pmu.limit_period = bdw_limit_period; | 
|---|
| 7345 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7346 | hsw_format_attr : nhm_format_attr; | 
|---|
| 7347 | td_attr  = hsw_events_attrs; | 
|---|
| 7348 | mem_attr = hsw_mem_events_attrs; | 
|---|
| 7349 | tsx_attr = hsw_tsx_events_attrs; | 
|---|
| 7350 | pr_cont( "Broadwell events, "); | 
|---|
| 7351 | name = "broadwell"; | 
|---|
| 7352 | break; | 
|---|
| 7353 |  | 
|---|
| 7354 | case INTEL_XEON_PHI_KNL: | 
|---|
| 7355 | case INTEL_XEON_PHI_KNM: | 
|---|
| 7356 | memcpy(to: hw_cache_event_ids, | 
|---|
| 7357 | from: slm_hw_cache_event_ids, len: sizeof(hw_cache_event_ids)); | 
|---|
| 7358 | memcpy(to: hw_cache_extra_regs, | 
|---|
| 7359 | from: knl_hw_cache_extra_regs, len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7360 | intel_pmu_lbr_init_knl(); | 
|---|
| 7361 |  | 
|---|
| 7362 | x86_pmu.event_constraints = intel_slm_event_constraints; | 
|---|
| 7363 | x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; | 
|---|
| 7364 | x86_pmu.extra_regs = intel_knl_extra_regs; | 
|---|
| 7365 |  | 
|---|
| 7366 | /* all extra regs are per-cpu when HT is on */ | 
|---|
| 7367 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7368 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 7369 | extra_attr = slm_format_attr; | 
|---|
| 7370 | pr_cont( "Knights Landing/Mill events, "); | 
|---|
| 7371 | name = "knights-landing"; | 
|---|
| 7372 | break; | 
|---|
| 7373 |  | 
|---|
| 7374 | case INTEL_SKYLAKE_X: | 
|---|
| 7375 | pmem = true; | 
|---|
| 7376 | fallthrough; | 
|---|
| 7377 | case INTEL_SKYLAKE_L: | 
|---|
| 7378 | case INTEL_SKYLAKE: | 
|---|
| 7379 | case INTEL_KABYLAKE_L: | 
|---|
| 7380 | case INTEL_KABYLAKE: | 
|---|
| 7381 | case INTEL_COMETLAKE_L: | 
|---|
| 7382 | case INTEL_COMETLAKE: | 
|---|
| 7383 | x86_add_quirk(intel_pebs_isolation_quirk); | 
|---|
| 7384 | x86_pmu.late_ack = true; | 
|---|
| 7385 | memcpy(to: hw_cache_event_ids, from: skl_hw_cache_event_ids, len: sizeof(hw_cache_event_ids)); | 
|---|
| 7386 | memcpy(to: hw_cache_extra_regs, from: skl_hw_cache_extra_regs, len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7387 | intel_pmu_lbr_init_skl(); | 
|---|
| 7388 |  | 
|---|
| 7389 | /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */ | 
|---|
| 7390 | event_attr_td_recovery_bubbles.event_str_noht = | 
|---|
| 7391 | "event=0xd,umask=0x1,cmask=1"; | 
|---|
| 7392 | event_attr_td_recovery_bubbles.event_str_ht = | 
|---|
| 7393 | "event=0xd,umask=0x1,cmask=1,any=1"; | 
|---|
| 7394 |  | 
|---|
| 7395 | x86_pmu.event_constraints = intel_skl_event_constraints; | 
|---|
| 7396 | x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; | 
|---|
| 7397 | x86_pmu.extra_regs = intel_skl_extra_regs; | 
|---|
| 7398 | x86_pmu.pebs_aliases = intel_pebs_aliases_skl; | 
|---|
| 7399 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7400 | /* all extra regs are per-cpu when HT is on */ | 
|---|
| 7401 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7402 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 7403 |  | 
|---|
| 7404 | x86_pmu.hw_config = hsw_hw_config; | 
|---|
| 7405 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | 
|---|
| 7406 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7407 | hsw_format_attr : nhm_format_attr; | 
|---|
| 7408 | extra_skl_attr = skl_format_attr; | 
|---|
| 7409 | td_attr  = hsw_events_attrs; | 
|---|
| 7410 | mem_attr = hsw_mem_events_attrs; | 
|---|
| 7411 | tsx_attr = hsw_tsx_events_attrs; | 
|---|
| 7412 | intel_pmu_pebs_data_source_skl(pmem); | 
|---|
| 7413 |  | 
|---|
| 7414 | /* | 
|---|
| 7415 | * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default. | 
|---|
| 7416 | * TSX force abort hooks are not required on these systems. Only deploy | 
|---|
| 7417 | * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT. | 
|---|
| 7418 | */ | 
|---|
| 7419 | if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) && | 
|---|
| 7420 | !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) { | 
|---|
| 7421 | x86_pmu.flags |= PMU_FL_TFA; | 
|---|
| 7422 | x86_pmu.get_event_constraints = tfa_get_event_constraints; | 
|---|
| 7423 | x86_pmu.enable_all = intel_tfa_pmu_enable_all; | 
|---|
| 7424 | x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; | 
|---|
| 7425 | } | 
|---|
| 7426 |  | 
|---|
| 7427 | pr_cont( "Skylake events, "); | 
|---|
| 7428 | name = "skylake"; | 
|---|
| 7429 | break; | 
|---|
| 7430 |  | 
|---|
| 7431 | case INTEL_ICELAKE_X: | 
|---|
| 7432 | case INTEL_ICELAKE_D: | 
|---|
| 7433 | x86_pmu.pebs_ept = 1; | 
|---|
| 7434 | pmem = true; | 
|---|
| 7435 | fallthrough; | 
|---|
| 7436 | case INTEL_ICELAKE_L: | 
|---|
| 7437 | case INTEL_ICELAKE: | 
|---|
| 7438 | case INTEL_TIGERLAKE_L: | 
|---|
| 7439 | case INTEL_TIGERLAKE: | 
|---|
| 7440 | case INTEL_ROCKETLAKE: | 
|---|
| 7441 | x86_pmu.late_ack = true; | 
|---|
| 7442 | memcpy(to: hw_cache_event_ids, from: skl_hw_cache_event_ids, len: sizeof(hw_cache_event_ids)); | 
|---|
| 7443 | memcpy(to: hw_cache_extra_regs, from: skl_hw_cache_extra_regs, len: sizeof(hw_cache_extra_regs)); | 
|---|
| 7444 | hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; | 
|---|
| 7445 | intel_pmu_lbr_init_skl(); | 
|---|
| 7446 |  | 
|---|
| 7447 | x86_pmu.event_constraints = intel_icl_event_constraints; | 
|---|
| 7448 | x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints; | 
|---|
| 7449 | x86_pmu.extra_regs = intel_icl_extra_regs; | 
|---|
| 7450 | x86_pmu.pebs_aliases = NULL; | 
|---|
| 7451 | x86_pmu.pebs_prec_dist = true; | 
|---|
| 7452 | x86_pmu.flags |= PMU_FL_HAS_RSP_1; | 
|---|
| 7453 | x86_pmu.flags |= PMU_FL_NO_HT_SHARING; | 
|---|
| 7454 |  | 
|---|
| 7455 | x86_pmu.hw_config = hsw_hw_config; | 
|---|
| 7456 | x86_pmu.get_event_constraints = icl_get_event_constraints; | 
|---|
| 7457 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7458 | hsw_format_attr : nhm_format_attr; | 
|---|
| 7459 | extra_skl_attr = skl_format_attr; | 
|---|
| 7460 | mem_attr = icl_events_attrs; | 
|---|
| 7461 | td_attr = icl_td_events_attrs; | 
|---|
| 7462 | tsx_attr = icl_tsx_events_attrs; | 
|---|
| 7463 | x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); | 
|---|
| 7464 | x86_pmu.lbr_pt_coexist = true; | 
|---|
| 7465 | intel_pmu_pebs_data_source_skl(pmem); | 
|---|
| 7466 | x86_pmu.num_topdown_events = 4; | 
|---|
| 7467 | static_call_update(intel_pmu_update_topdown_event, | 
|---|
| 7468 | &icl_update_topdown_event); | 
|---|
| 7469 | static_call_update(intel_pmu_set_topdown_event_period, | 
|---|
| 7470 | &icl_set_topdown_event_period); | 
|---|
| 7471 | pr_cont( "Icelake events, "); | 
|---|
| 7472 | name = "icelake"; | 
|---|
| 7473 | break; | 
|---|
| 7474 |  | 
|---|
| 7475 | case INTEL_SAPPHIRERAPIDS_X: | 
|---|
| 7476 | case INTEL_EMERALDRAPIDS_X: | 
|---|
| 7477 | x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; | 
|---|
| 7478 | x86_pmu.extra_regs = intel_glc_extra_regs; | 
|---|
| 7479 | pr_cont( "Sapphire Rapids events, "); | 
|---|
| 7480 | name = "sapphire_rapids"; | 
|---|
| 7481 | goto glc_common; | 
|---|
| 7482 |  | 
|---|
| 7483 | case INTEL_GRANITERAPIDS_X: | 
|---|
| 7484 | case INTEL_GRANITERAPIDS_D: | 
|---|
| 7485 | x86_pmu.extra_regs = intel_rwc_extra_regs; | 
|---|
| 7486 | pr_cont( "Granite Rapids events, "); | 
|---|
| 7487 | name = "granite_rapids"; | 
|---|
| 7488 |  | 
|---|
| 7489 | glc_common: | 
|---|
| 7490 | intel_pmu_init_glc(NULL); | 
|---|
| 7491 | x86_pmu.pebs_ept = 1; | 
|---|
| 7492 | x86_pmu.hw_config = hsw_hw_config; | 
|---|
| 7493 | x86_pmu.get_event_constraints = glc_get_event_constraints; | 
|---|
| 7494 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7495 | hsw_format_attr : nhm_format_attr; | 
|---|
| 7496 | extra_skl_attr = skl_format_attr; | 
|---|
| 7497 | mem_attr = glc_events_attrs; | 
|---|
| 7498 | td_attr = glc_td_events_attrs; | 
|---|
| 7499 | tsx_attr = glc_tsx_events_attrs; | 
|---|
| 7500 | intel_pmu_pebs_data_source_skl(pmem: true); | 
|---|
| 7501 | break; | 
|---|
| 7502 |  | 
|---|
| 7503 | case INTEL_ALDERLAKE: | 
|---|
| 7504 | case INTEL_ALDERLAKE_L: | 
|---|
| 7505 | case INTEL_RAPTORLAKE: | 
|---|
| 7506 | case INTEL_RAPTORLAKE_P: | 
|---|
| 7507 | case INTEL_RAPTORLAKE_S: | 
|---|
| 7508 | /* | 
|---|
| 7509 | * Alder Lake has 2 types of CPU, core and atom. | 
|---|
| 7510 | * | 
|---|
| 7511 | * Initialize the common PerfMon capabilities here. | 
|---|
| 7512 | */ | 
|---|
| 7513 | intel_pmu_init_hybrid(pmus: hybrid_big_small); | 
|---|
| 7514 |  | 
|---|
| 7515 | x86_pmu.pebs_latency_data = grt_latency_data; | 
|---|
| 7516 | x86_pmu.get_event_constraints = adl_get_event_constraints; | 
|---|
| 7517 | x86_pmu.hw_config = adl_hw_config; | 
|---|
| 7518 | x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; | 
|---|
| 7519 |  | 
|---|
| 7520 | td_attr = adl_hybrid_events_attrs; | 
|---|
| 7521 | mem_attr = adl_hybrid_mem_attrs; | 
|---|
| 7522 | tsx_attr = adl_hybrid_tsx_attrs; | 
|---|
| 7523 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7524 | adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr; | 
|---|
| 7525 |  | 
|---|
| 7526 | /* Initialize big core specific PerfMon capabilities.*/ | 
|---|
| 7527 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; | 
|---|
| 7528 | intel_pmu_init_glc(pmu: &pmu->pmu); | 
|---|
| 7529 | if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { | 
|---|
| 7530 | pmu->cntr_mask64 <<= 2; | 
|---|
| 7531 | pmu->cntr_mask64 |= 0x3; | 
|---|
| 7532 | pmu->fixed_cntr_mask64 <<= 1; | 
|---|
| 7533 | pmu->fixed_cntr_mask64 |= 0x1; | 
|---|
| 7534 | } else { | 
|---|
| 7535 | pmu->cntr_mask64 = x86_pmu.cntr_mask64; | 
|---|
| 7536 | pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; | 
|---|
| 7537 | } | 
|---|
| 7538 |  | 
|---|
| 7539 | /* | 
|---|
| 7540 | * Quirk: For some Alder Lake machine, when all E-cores are disabled in | 
|---|
| 7541 | * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However, | 
|---|
| 7542 | * the X86_FEATURE_HYBRID_CPU is still set. The above codes will | 
|---|
| 7543 | * mistakenly add extra counters for P-cores. Correct the number of | 
|---|
| 7544 | * counters here. | 
|---|
| 7545 | */ | 
|---|
| 7546 | if ((x86_pmu_num_counters(pmu: &pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(pmu: &pmu->pmu) > 4)) { | 
|---|
| 7547 | pmu->cntr_mask64 = x86_pmu.cntr_mask64; | 
|---|
| 7548 | pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; | 
|---|
| 7549 | } | 
|---|
| 7550 |  | 
|---|
| 7551 | pmu->pebs_events_mask = intel_pmu_pebs_mask(cntr_mask: pmu->cntr_mask64); | 
|---|
| 7552 | pmu->unconstrained = (struct event_constraint) | 
|---|
| 7553 | __EVENT_CONSTRAINT(0, pmu->cntr_mask64, | 
|---|
| 7554 | 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); | 
|---|
| 7555 |  | 
|---|
| 7556 | pmu->extra_regs = intel_glc_extra_regs; | 
|---|
| 7557 |  | 
|---|
| 7558 | /* Initialize Atom core specific PerfMon capabilities.*/ | 
|---|
| 7559 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; | 
|---|
| 7560 | intel_pmu_init_grt(pmu: &pmu->pmu); | 
|---|
| 7561 |  | 
|---|
| 7562 | x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; | 
|---|
| 7563 | intel_pmu_pebs_data_source_adl(); | 
|---|
| 7564 | pr_cont( "Alderlake Hybrid events, "); | 
|---|
| 7565 | name = "alderlake_hybrid"; | 
|---|
| 7566 | break; | 
|---|
| 7567 |  | 
|---|
| 7568 | case INTEL_METEORLAKE: | 
|---|
| 7569 | case INTEL_METEORLAKE_L: | 
|---|
| 7570 | case INTEL_ARROWLAKE_U: | 
|---|
| 7571 | intel_pmu_init_hybrid(pmus: hybrid_big_small); | 
|---|
| 7572 |  | 
|---|
| 7573 | x86_pmu.pebs_latency_data = cmt_latency_data; | 
|---|
| 7574 | x86_pmu.get_event_constraints = mtl_get_event_constraints; | 
|---|
| 7575 | x86_pmu.hw_config = adl_hw_config; | 
|---|
| 7576 |  | 
|---|
| 7577 | td_attr = adl_hybrid_events_attrs; | 
|---|
| 7578 | mem_attr = mtl_hybrid_mem_attrs; | 
|---|
| 7579 | tsx_attr = adl_hybrid_tsx_attrs; | 
|---|
| 7580 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7581 | mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; | 
|---|
| 7582 |  | 
|---|
| 7583 | /* Initialize big core specific PerfMon capabilities.*/ | 
|---|
| 7584 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; | 
|---|
| 7585 | intel_pmu_init_glc(pmu: &pmu->pmu); | 
|---|
| 7586 | pmu->extra_regs = intel_rwc_extra_regs; | 
|---|
| 7587 |  | 
|---|
| 7588 | /* Initialize Atom core specific PerfMon capabilities.*/ | 
|---|
| 7589 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; | 
|---|
| 7590 | intel_pmu_init_grt(pmu: &pmu->pmu); | 
|---|
| 7591 | pmu->extra_regs = intel_cmt_extra_regs; | 
|---|
| 7592 |  | 
|---|
| 7593 | intel_pmu_pebs_data_source_mtl(); | 
|---|
| 7594 | pr_cont( "Meteorlake Hybrid events, "); | 
|---|
| 7595 | name = "meteorlake_hybrid"; | 
|---|
| 7596 | break; | 
|---|
| 7597 |  | 
|---|
| 7598 | case INTEL_PANTHERLAKE_L: | 
|---|
| 7599 | pr_cont( "Pantherlake Hybrid events, "); | 
|---|
| 7600 | name = "pantherlake_hybrid"; | 
|---|
| 7601 | goto lnl_common; | 
|---|
| 7602 |  | 
|---|
| 7603 | case INTEL_LUNARLAKE_M: | 
|---|
| 7604 | case INTEL_ARROWLAKE: | 
|---|
| 7605 | pr_cont( "Lunarlake Hybrid events, "); | 
|---|
| 7606 | name = "lunarlake_hybrid"; | 
|---|
| 7607 |  | 
|---|
| 7608 | lnl_common: | 
|---|
| 7609 | intel_pmu_init_hybrid(pmus: hybrid_big_small); | 
|---|
| 7610 |  | 
|---|
| 7611 | x86_pmu.pebs_latency_data = lnl_latency_data; | 
|---|
| 7612 | x86_pmu.get_event_constraints = mtl_get_event_constraints; | 
|---|
| 7613 | x86_pmu.hw_config = adl_hw_config; | 
|---|
| 7614 |  | 
|---|
| 7615 | td_attr = lnl_hybrid_events_attrs; | 
|---|
| 7616 | mem_attr = mtl_hybrid_mem_attrs; | 
|---|
| 7617 | tsx_attr = adl_hybrid_tsx_attrs; | 
|---|
| 7618 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7619 | mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; | 
|---|
| 7620 |  | 
|---|
| 7621 | /* Initialize big core specific PerfMon capabilities.*/ | 
|---|
| 7622 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; | 
|---|
| 7623 | intel_pmu_init_lnc(pmu: &pmu->pmu); | 
|---|
| 7624 |  | 
|---|
| 7625 | /* Initialize Atom core specific PerfMon capabilities.*/ | 
|---|
| 7626 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; | 
|---|
| 7627 | intel_pmu_init_skt(pmu: &pmu->pmu); | 
|---|
| 7628 |  | 
|---|
| 7629 | intel_pmu_pebs_data_source_lnl(); | 
|---|
| 7630 | break; | 
|---|
| 7631 |  | 
|---|
| 7632 | case INTEL_ARROWLAKE_H: | 
|---|
| 7633 | intel_pmu_init_hybrid(pmus: hybrid_big_small_tiny); | 
|---|
| 7634 |  | 
|---|
| 7635 | x86_pmu.pebs_latency_data = arl_h_latency_data; | 
|---|
| 7636 | x86_pmu.get_event_constraints = arl_h_get_event_constraints; | 
|---|
| 7637 | x86_pmu.hw_config = arl_h_hw_config; | 
|---|
| 7638 |  | 
|---|
| 7639 | td_attr = arl_h_hybrid_events_attrs; | 
|---|
| 7640 | mem_attr = arl_h_hybrid_mem_attrs; | 
|---|
| 7641 | tsx_attr = adl_hybrid_tsx_attrs; | 
|---|
| 7642 | extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? | 
|---|
| 7643 | mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; | 
|---|
| 7644 |  | 
|---|
| 7645 | /* Initialize big core specific PerfMon capabilities. */ | 
|---|
| 7646 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; | 
|---|
| 7647 | intel_pmu_init_lnc(pmu: &pmu->pmu); | 
|---|
| 7648 |  | 
|---|
| 7649 | /* Initialize Atom core specific PerfMon capabilities. */ | 
|---|
| 7650 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; | 
|---|
| 7651 | intel_pmu_init_skt(pmu: &pmu->pmu); | 
|---|
| 7652 |  | 
|---|
| 7653 | /* Initialize Lower Power Atom specific PerfMon capabilities. */ | 
|---|
| 7654 | pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX]; | 
|---|
| 7655 | intel_pmu_init_grt(pmu: &pmu->pmu); | 
|---|
| 7656 | pmu->extra_regs = intel_cmt_extra_regs; | 
|---|
| 7657 |  | 
|---|
| 7658 | intel_pmu_pebs_data_source_arl_h(); | 
|---|
| 7659 | pr_cont( "ArrowLake-H Hybrid events, "); | 
|---|
| 7660 | name = "arrowlake_h_hybrid"; | 
|---|
| 7661 | break; | 
|---|
| 7662 |  | 
|---|
| 7663 | default: | 
|---|
| 7664 | switch (x86_pmu.version) { | 
|---|
| 7665 | case 1: | 
|---|
| 7666 | x86_pmu.event_constraints = intel_v1_event_constraints; | 
|---|
| 7667 | pr_cont( "generic architected perfmon v1, "); | 
|---|
| 7668 | name = "generic_arch_v1"; | 
|---|
| 7669 | break; | 
|---|
| 7670 | case 2: | 
|---|
| 7671 | case 3: | 
|---|
| 7672 | case 4: | 
|---|
| 7673 | /* | 
|---|
| 7674 | * default constraints for v2 and up | 
|---|
| 7675 | */ | 
|---|
| 7676 | x86_pmu.event_constraints = intel_gen_event_constraints; | 
|---|
| 7677 | pr_cont( "generic architected perfmon, "); | 
|---|
| 7678 | name = "generic_arch_v2+"; | 
|---|
| 7679 | break; | 
|---|
| 7680 | default: | 
|---|
| 7681 | /* | 
|---|
| 7682 | * The default constraints for v5 and up can support up to | 
|---|
| 7683 | * 16 fixed counters. For the fixed counters 4 and later, | 
|---|
| 7684 | * the pseudo-encoding is applied. | 
|---|
| 7685 | * The constraints may be cut according to the CPUID enumeration | 
|---|
| 7686 | * by inserting the EVENT_CONSTRAINT_END. | 
|---|
| 7687 | */ | 
|---|
| 7688 | if (fls64(x: x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED) | 
|---|
| 7689 | x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0); | 
|---|
| 7690 | intel_v5_gen_event_constraints[fls64(x: x86_pmu.fixed_cntr_mask64)].weight = -1; | 
|---|
| 7691 | x86_pmu.event_constraints = intel_v5_gen_event_constraints; | 
|---|
| 7692 | pr_cont( "generic architected perfmon, "); | 
|---|
| 7693 | name = "generic_arch_v5+"; | 
|---|
| 7694 | break; | 
|---|
| 7695 | } | 
|---|
| 7696 | } | 
|---|
| 7697 |  | 
|---|
| 7698 | snprintf(buf: pmu_name_str, size: sizeof(pmu_name_str), fmt: "%s", name); | 
|---|
| 7699 |  | 
|---|
| 7700 | if (!is_hybrid()) { | 
|---|
| 7701 | group_events_td.attrs  = td_attr; | 
|---|
| 7702 | group_events_mem.attrs = mem_attr; | 
|---|
| 7703 | group_events_tsx.attrs = tsx_attr; | 
|---|
| 7704 | group_format_extra.attrs = extra_attr; | 
|---|
| 7705 | group_format_extra_skl.attrs = extra_skl_attr; | 
|---|
| 7706 |  | 
|---|
| 7707 | x86_pmu.attr_update = attr_update; | 
|---|
| 7708 | } else { | 
|---|
| 7709 | hybrid_group_events_td.attrs  = td_attr; | 
|---|
| 7710 | hybrid_group_events_mem.attrs = mem_attr; | 
|---|
| 7711 | hybrid_group_events_tsx.attrs = tsx_attr; | 
|---|
| 7712 | hybrid_group_format_extra.attrs = extra_attr; | 
|---|
| 7713 |  | 
|---|
| 7714 | x86_pmu.attr_update = hybrid_attr_update; | 
|---|
| 7715 | } | 
|---|
| 7716 |  | 
|---|
| 7717 | /* | 
|---|
| 7718 | * The archPerfmonExt (0x23) includes an enhanced enumeration of | 
|---|
| 7719 | * PMU architectural features with a per-core view. For non-hybrid, | 
|---|
| 7720 | * each core has the same PMU capabilities. It's good enough to | 
|---|
| 7721 | * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu | 
|---|
| 7722 | * is used to keep the common capabilities. Still keep the values | 
|---|
| 7723 | * from the leaf 0xa. The core specific update will be done later | 
|---|
| 7724 | * when a new type is online. | 
|---|
| 7725 | */ | 
|---|
| 7726 | if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) | 
|---|
| 7727 | update_pmu_cap(NULL); | 
|---|
| 7728 |  | 
|---|
| 7729 | intel_pmu_check_counters_mask(cntr_mask: &x86_pmu.cntr_mask64, | 
|---|
| 7730 | fixed_cntr_mask: &x86_pmu.fixed_cntr_mask64, | 
|---|
| 7731 | intel_ctrl: &x86_pmu.intel_ctrl); | 
|---|
| 7732 |  | 
|---|
| 7733 | /* AnyThread may be deprecated on arch perfmon v5 or later */ | 
|---|
| 7734 | if (x86_pmu.intel_cap.anythread_deprecated) | 
|---|
| 7735 | x86_pmu.format_attrs = intel_arch_formats_attr; | 
|---|
| 7736 |  | 
|---|
| 7737 | intel_pmu_check_event_constraints(event_constraints: x86_pmu.event_constraints, | 
|---|
| 7738 | cntr_mask: x86_pmu.cntr_mask64, | 
|---|
| 7739 | fixed_cntr_mask: x86_pmu.fixed_cntr_mask64, | 
|---|
| 7740 | intel_ctrl: x86_pmu.intel_ctrl); | 
|---|
| 7741 | /* | 
|---|
| 7742 | * Access LBR MSR may cause #GP under certain circumstances. | 
|---|
| 7743 | * Check all LBR MSR here. | 
|---|
| 7744 | * Disable LBR access if any LBR MSRs can not be accessed. | 
|---|
| 7745 | */ | 
|---|
| 7746 | if (x86_pmu.lbr_tos && !check_msr(msr: x86_pmu.lbr_tos, mask: 0x3UL)) | 
|---|
| 7747 | x86_pmu.lbr_nr = 0; | 
|---|
| 7748 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | 
|---|
| 7749 | if (!(check_msr(msr: x86_pmu.lbr_from + i, mask: 0xffffUL) && | 
|---|
| 7750 | check_msr(msr: x86_pmu.lbr_to + i, mask: 0xffffUL))) | 
|---|
| 7751 | x86_pmu.lbr_nr = 0; | 
|---|
| 7752 | } | 
|---|
| 7753 |  | 
|---|
| 7754 | if (x86_pmu.lbr_nr) { | 
|---|
| 7755 | intel_pmu_lbr_init(); | 
|---|
| 7756 |  | 
|---|
| 7757 | pr_cont( "%d-deep LBR, ", x86_pmu.lbr_nr); | 
|---|
| 7758 |  | 
|---|
| 7759 | /* only support branch_stack snapshot for perfmon >= v2 */ | 
|---|
| 7760 | if (x86_pmu.disable_all == intel_pmu_disable_all) { | 
|---|
| 7761 | if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) { | 
|---|
| 7762 | static_call_update(perf_snapshot_branch_stack, | 
|---|
| 7763 | intel_pmu_snapshot_arch_branch_stack); | 
|---|
| 7764 | } else { | 
|---|
| 7765 | static_call_update(perf_snapshot_branch_stack, | 
|---|
| 7766 | intel_pmu_snapshot_branch_stack); | 
|---|
| 7767 | } | 
|---|
| 7768 | } | 
|---|
| 7769 | } | 
|---|
| 7770 |  | 
|---|
| 7771 | intel_pmu_check_extra_regs(extra_regs: x86_pmu.extra_regs); | 
|---|
| 7772 |  | 
|---|
| 7773 | /* Support full width counters using alternative MSR range */ | 
|---|
| 7774 | if (x86_pmu.intel_cap.full_width_write) { | 
|---|
| 7775 | x86_pmu.max_period = x86_pmu.cntval_mask >> 1; | 
|---|
| 7776 | x86_pmu.perfctr = MSR_IA32_PMC0; | 
|---|
| 7777 | pr_cont( "full-width counters, "); | 
|---|
| 7778 | } | 
|---|
| 7779 |  | 
|---|
| 7780 | /* Support V6+ MSR Aliasing */ | 
|---|
| 7781 | if (x86_pmu.version >= 6) { | 
|---|
| 7782 | x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR; | 
|---|
| 7783 | x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A; | 
|---|
| 7784 | x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR; | 
|---|
| 7785 | x86_pmu.addr_offset = intel_pmu_v6_addr_offset; | 
|---|
| 7786 | } | 
|---|
| 7787 |  | 
|---|
| 7788 | if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) | 
|---|
| 7789 | x86_pmu.intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS; | 
|---|
| 7790 |  | 
|---|
| 7791 | if (x86_pmu.intel_cap.pebs_timing_info) | 
|---|
| 7792 | x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; | 
|---|
| 7793 |  | 
|---|
| 7794 | intel_aux_output_init(); | 
|---|
| 7795 |  | 
|---|
| 7796 | return 0; | 
|---|
| 7797 | } | 
|---|
| 7798 |  | 
|---|
| 7799 | /* | 
|---|
| 7800 | * HT bug: phase 2 init | 
|---|
| 7801 | * Called once we have valid topology information to check | 
|---|
| 7802 | * whether or not HT is enabled | 
|---|
| 7803 | * If HT is off, then we disable the workaround | 
|---|
| 7804 | */ | 
|---|
| 7805 | static __init int fixup_ht_bug(void) | 
|---|
| 7806 | { | 
|---|
| 7807 | int c; | 
|---|
| 7808 | /* | 
|---|
| 7809 | * problem not present on this CPU model, nothing to do | 
|---|
| 7810 | */ | 
|---|
| 7811 | if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) | 
|---|
| 7812 | return 0; | 
|---|
| 7813 |  | 
|---|
| 7814 | if (topology_max_smt_threads() > 1) { | 
|---|
| 7815 | pr_info( "PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); | 
|---|
| 7816 | return 0; | 
|---|
| 7817 | } | 
|---|
| 7818 |  | 
|---|
| 7819 | cpus_read_lock(); | 
|---|
| 7820 |  | 
|---|
| 7821 | hardlockup_detector_perf_stop(); | 
|---|
| 7822 |  | 
|---|
| 7823 | x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); | 
|---|
| 7824 |  | 
|---|
| 7825 | x86_pmu.start_scheduling = NULL; | 
|---|
| 7826 | x86_pmu.commit_scheduling = NULL; | 
|---|
| 7827 | x86_pmu.stop_scheduling = NULL; | 
|---|
| 7828 |  | 
|---|
| 7829 | hardlockup_detector_perf_restart(); | 
|---|
| 7830 |  | 
|---|
| 7831 | for_each_online_cpu(c) | 
|---|
| 7832 | free_excl_cntrs(cpuc: &per_cpu(cpu_hw_events, c)); | 
|---|
| 7833 |  | 
|---|
| 7834 | cpus_read_unlock(); | 
|---|
| 7835 | pr_info( "PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); | 
|---|
| 7836 | return 0; | 
|---|
| 7837 | } | 
|---|
| 7838 | subsys_initcall(fixup_ht_bug) | 
|---|
| 7839 |  | 
|---|