1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/slab.h>
3#include <linux/pci.h>
4#include <asm/apicdef.h>
5#include <asm/intel-family.h>
6#include <linux/io-64-nonatomic-lo-hi.h>
7
8#include <linux/perf_event.h>
9#include "../perf_event.h"
10
11#define UNCORE_PMU_NAME_LEN 32
12#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
13#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
14
15#define UNCORE_FIXED_EVENT 0xff
16#define UNCORE_PMC_IDX_MAX_GENERIC 8
17#define UNCORE_PMC_IDX_MAX_FIXED 1
18#define UNCORE_PMC_IDX_MAX_FREERUNNING 1
19#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
20#define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
21 UNCORE_PMC_IDX_MAX_FIXED)
22#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
23 UNCORE_PMC_IDX_MAX_FREERUNNING)
24
25#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
26 ((dev << 24) | (func << 16) | (type << 8) | idx)
27#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
28#define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
29#define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
30#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
31#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
32#define UNCORE_EXTRA_PCI_DEV 0xff
33#define UNCORE_EXTRA_PCI_DEV_MAX 4
34
35#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
36
37#define UNCORE_IGNORE_END -1
38
39struct pci_extra_dev {
40 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
41};
42
43struct intel_uncore_ops;
44struct intel_uncore_pmu;
45struct intel_uncore_box;
46struct uncore_event_desc;
47struct freerunning_counters;
48struct intel_uncore_topology;
49
50struct intel_uncore_type {
51 const char *name;
52 int num_counters;
53 int num_boxes;
54 int perf_ctr_bits;
55 int fixed_ctr_bits;
56 int num_freerunning_types;
57 int type_id;
58 unsigned perf_ctr;
59 unsigned event_ctl;
60 unsigned event_mask;
61 unsigned event_mask_ext;
62 unsigned fixed_ctr;
63 unsigned fixed_ctl;
64 unsigned box_ctl;
65 union {
66 unsigned msr_offset;
67 unsigned mmio_offset;
68 };
69 unsigned mmio_map_size;
70 unsigned num_shared_regs:8;
71 unsigned single_fixed:1;
72 unsigned pair_ctr_ctl:1;
73 union {
74 u64 *msr_offsets;
75 u64 *pci_offsets;
76 u64 *mmio_offsets;
77 };
78 struct event_constraint unconstrainted;
79 struct event_constraint *constraints;
80 struct intel_uncore_pmu *pmus;
81 struct intel_uncore_ops *ops;
82 struct uncore_event_desc *event_descs;
83 struct freerunning_counters *freerunning;
84 const struct attribute_group *attr_groups[4];
85 const struct attribute_group **attr_update;
86 struct pmu *pmu; /* for custom pmu ops */
87 struct rb_root *boxes;
88 /*
89 * Uncore PMU would store relevant platform topology configuration here
90 * to identify which platform component each PMON block of that type is
91 * supposed to monitor.
92 */
93 struct intel_uncore_topology **topology;
94 /*
95 * Optional callbacks for managing mapping of Uncore units to PMONs
96 */
97 int (*get_topology)(struct intel_uncore_type *type);
98 void (*set_mapping)(struct intel_uncore_type *type);
99 void (*cleanup_mapping)(struct intel_uncore_type *type);
100 /*
101 * Optional callbacks for extra uncore units cleanup
102 */
103 void (*cleanup_extra_boxes)(struct intel_uncore_type *type);
104};
105
106#define pmu_group attr_groups[0]
107#define format_group attr_groups[1]
108#define events_group attr_groups[2]
109
110struct intel_uncore_ops {
111 void (*init_box)(struct intel_uncore_box *);
112 void (*exit_box)(struct intel_uncore_box *);
113 void (*disable_box)(struct intel_uncore_box *);
114 void (*enable_box)(struct intel_uncore_box *);
115 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
116 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
117 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
118 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
119 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
120 struct perf_event *);
121 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
122};
123
124struct intel_uncore_pmu {
125 struct pmu pmu;
126 char name[UNCORE_PMU_NAME_LEN];
127 int pmu_idx;
128 bool registered;
129 atomic_t activeboxes;
130 cpumask_t cpu_mask;
131 struct intel_uncore_type *type;
132 struct intel_uncore_box **boxes;
133};
134
135struct intel_uncore_extra_reg {
136 raw_spinlock_t lock;
137 u64 config, config1, config2;
138 atomic_t ref;
139};
140
141struct intel_uncore_box {
142 int dieid; /* Logical die ID */
143 int n_active; /* number of active events */
144 int n_events;
145 int cpu; /* cpu to collect events */
146 unsigned long flags;
147 atomic_t refcnt;
148 struct perf_event *events[UNCORE_PMC_IDX_MAX];
149 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
150 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
151 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
152 u64 tags[UNCORE_PMC_IDX_MAX];
153 struct pci_dev *pci_dev;
154 struct intel_uncore_pmu *pmu;
155 u64 hrtimer_duration; /* hrtimer timeout for this box */
156 struct hrtimer hrtimer;
157 struct list_head list;
158 struct list_head active_list;
159 void __iomem *io_addr;
160 struct intel_uncore_extra_reg shared_regs[];
161};
162
163/* CFL uncore 8th cbox MSRs */
164#define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
165#define CFL_UNC_CBO_7_PER_CTR0 0xf76
166
167#define UNCORE_BOX_FLAG_INITIATED 0
168/* event config registers are 8-byte apart */
169#define UNCORE_BOX_FLAG_CTL_OFFS8 1
170/* CFL 8th CBOX has different MSR space */
171#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
172
173struct uncore_event_desc {
174 struct device_attribute attr;
175 const char *config;
176};
177
178struct freerunning_counters {
179 unsigned int counter_base;
180 unsigned int counter_offset;
181 unsigned int box_offset;
182 unsigned int num_counters;
183 unsigned int bits;
184 unsigned *box_offsets;
185};
186
187struct uncore_iio_topology {
188 int pci_bus_no;
189 int segment;
190};
191
192struct uncore_upi_topology {
193 int die_to;
194 int pmu_idx_to;
195 int enabled;
196};
197
198struct intel_uncore_topology {
199 int pmu_idx;
200 union {
201 void *untyped;
202 struct uncore_iio_topology *iio;
203 struct uncore_upi_topology *upi;
204 };
205};
206
207struct pci2phy_map {
208 struct list_head list;
209 int segment;
210 int pbus_to_dieid[256];
211};
212
213struct pci2phy_map *__find_pci2phy_map(int segment);
214int uncore_pcibus_to_dieid(struct pci_bus *bus);
215int uncore_die_to_segment(int die);
216int uncore_device_to_die(struct pci_dev *dev);
217
218ssize_t uncore_event_show(struct device *dev,
219 struct device_attribute *attr, char *buf);
220
221static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
222{
223 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
224}
225
226#define to_device_attribute(n) container_of(n, struct device_attribute, attr)
227#define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr)
228#define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n))
229
230extern int __uncore_max_dies;
231#define uncore_max_dies() (__uncore_max_dies)
232
233#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
234{ \
235 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
236 .config = _config, \
237}
238
239#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
240static ssize_t __uncore_##_var##_show(struct device *dev, \
241 struct device_attribute *attr, \
242 char *page) \
243{ \
244 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
245 return sprintf(page, _format "\n"); \
246} \
247static struct device_attribute format_attr_##_var = \
248 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
249
250static inline bool uncore_pmc_fixed(int idx)
251{
252 return idx == UNCORE_PMC_IDX_FIXED;
253}
254
255static inline bool uncore_pmc_freerunning(int idx)
256{
257 return idx == UNCORE_PMC_IDX_FREERUNNING;
258}
259
260static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box,
261 unsigned long offset)
262{
263 if (offset < box->pmu->type->mmio_map_size)
264 return true;
265
266 pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
267 offset, box->pmu->type->name);
268
269 return false;
270}
271
272static inline
273unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
274{
275 return box->pmu->type->box_ctl +
276 box->pmu->type->mmio_offset * box->pmu->pmu_idx;
277}
278
279static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
280{
281 return box->pmu->type->box_ctl;
282}
283
284static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
285{
286 return box->pmu->type->fixed_ctl;
287}
288
289static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
290{
291 return box->pmu->type->fixed_ctr;
292}
293
294static inline
295unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
296{
297 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
298 return idx * 8 + box->pmu->type->event_ctl;
299
300 return idx * 4 + box->pmu->type->event_ctl;
301}
302
303static inline
304unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
305{
306 return idx * 8 + box->pmu->type->perf_ctr;
307}
308
309static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
310{
311 struct intel_uncore_pmu *pmu = box->pmu;
312 return pmu->type->msr_offsets ?
313 pmu->type->msr_offsets[pmu->pmu_idx] :
314 pmu->type->msr_offset * pmu->pmu_idx;
315}
316
317static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
318{
319 if (!box->pmu->type->box_ctl)
320 return 0;
321 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
322}
323
324static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
325{
326 if (!box->pmu->type->fixed_ctl)
327 return 0;
328 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
329}
330
331static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
332{
333 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
334}
335
336
337/*
338 * In the uncore document, there is no event-code assigned to free running
339 * counters. Some events need to be defined to indicate the free running
340 * counters. The events are encoded as event-code + umask-code.
341 *
342 * The event-code for all free running counters is 0xff, which is the same as
343 * the fixed counters.
344 *
345 * The umask-code is used to distinguish a fixed counter and a free running
346 * counter, and different types of free running counters.
347 * - For fixed counters, the umask-code is 0x0X.
348 * X indicates the index of the fixed counter, which starts from 0.
349 * - For free running counters, the umask-code uses the rest of the space.
350 * It would bare the format of 0xXY.
351 * X stands for the type of free running counters, which starts from 1.
352 * Y stands for the index of free running counters of same type, which
353 * starts from 0.
354 *
355 * For example, there are three types of IIO free running counters on Skylake
356 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
357 * The event-code for all the free running counters is 0xff.
358 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
359 * which umask-code starts from 0x10.
360 * So 'ioclk' is encoded as event=0xff,umask=0x10
361 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
362 * the second type, which umask-code starts from 0x20.
363 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
364 */
365static inline unsigned int uncore_freerunning_idx(u64 config)
366{
367 return ((config >> 8) & 0xf);
368}
369
370#define UNCORE_FREERUNNING_UMASK_START 0x10
371
372static inline unsigned int uncore_freerunning_type(u64 config)
373{
374 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
375}
376
377static inline
378unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
379 struct perf_event *event)
380{
381 unsigned int type = uncore_freerunning_type(config: event->hw.config);
382 unsigned int idx = uncore_freerunning_idx(config: event->hw.config);
383 struct intel_uncore_pmu *pmu = box->pmu;
384
385 return pmu->type->freerunning[type].counter_base +
386 pmu->type->freerunning[type].counter_offset * idx +
387 (pmu->type->freerunning[type].box_offsets ?
388 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
389 pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
390}
391
392static inline
393unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
394{
395 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
396 return CFL_UNC_CBO_7_PERFEVTSEL0 +
397 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
398 } else {
399 return box->pmu->type->event_ctl +
400 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
401 uncore_msr_box_offset(box);
402 }
403}
404
405static inline
406unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
407{
408 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
409 return CFL_UNC_CBO_7_PER_CTR0 +
410 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
411 } else {
412 return box->pmu->type->perf_ctr +
413 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
414 uncore_msr_box_offset(box);
415 }
416}
417
418static inline
419unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
420{
421 if (box->pci_dev || box->io_addr)
422 return uncore_pci_fixed_ctl(box);
423 else
424 return uncore_msr_fixed_ctl(box);
425}
426
427static inline
428unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
429{
430 if (box->pci_dev || box->io_addr)
431 return uncore_pci_fixed_ctr(box);
432 else
433 return uncore_msr_fixed_ctr(box);
434}
435
436static inline
437unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
438{
439 if (box->pci_dev || box->io_addr)
440 return uncore_pci_event_ctl(box, idx);
441 else
442 return uncore_msr_event_ctl(box, idx);
443}
444
445static inline
446unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
447{
448 if (box->pci_dev || box->io_addr)
449 return uncore_pci_perf_ctr(box, idx);
450 else
451 return uncore_msr_perf_ctr(box, idx);
452}
453
454static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
455{
456 return box->pmu->type->perf_ctr_bits;
457}
458
459static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
460{
461 return box->pmu->type->fixed_ctr_bits;
462}
463
464static inline
465unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
466 struct perf_event *event)
467{
468 unsigned int type = uncore_freerunning_type(config: event->hw.config);
469
470 return box->pmu->type->freerunning[type].bits;
471}
472
473static inline int uncore_num_freerunning(struct intel_uncore_box *box,
474 struct perf_event *event)
475{
476 unsigned int type = uncore_freerunning_type(config: event->hw.config);
477
478 return box->pmu->type->freerunning[type].num_counters;
479}
480
481static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
482 struct perf_event *event)
483{
484 return box->pmu->type->num_freerunning_types;
485}
486
487static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
488 struct perf_event *event)
489{
490 unsigned int type = uncore_freerunning_type(config: event->hw.config);
491 unsigned int idx = uncore_freerunning_idx(config: event->hw.config);
492
493 return (type < uncore_num_freerunning_types(box, event)) &&
494 (idx < uncore_num_freerunning(box, event));
495}
496
497static inline int uncore_num_counters(struct intel_uncore_box *box)
498{
499 return box->pmu->type->num_counters;
500}
501
502static inline bool is_freerunning_event(struct perf_event *event)
503{
504 u64 cfg = event->attr.config;
505
506 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
507 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
508}
509
510/* Check and reject invalid config */
511static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
512 struct perf_event *event)
513{
514 if (is_freerunning_event(event))
515 return 0;
516
517 return -EINVAL;
518}
519
520static inline void uncore_disable_event(struct intel_uncore_box *box,
521 struct perf_event *event)
522{
523 box->pmu->type->ops->disable_event(box, event);
524}
525
526static inline void uncore_enable_event(struct intel_uncore_box *box,
527 struct perf_event *event)
528{
529 box->pmu->type->ops->enable_event(box, event);
530}
531
532static inline u64 uncore_read_counter(struct intel_uncore_box *box,
533 struct perf_event *event)
534{
535 return box->pmu->type->ops->read_counter(box, event);
536}
537
538static inline void uncore_box_init(struct intel_uncore_box *box)
539{
540 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, addr: &box->flags)) {
541 if (box->pmu->type->ops->init_box)
542 box->pmu->type->ops->init_box(box);
543 }
544}
545
546static inline void uncore_box_exit(struct intel_uncore_box *box)
547{
548 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, addr: &box->flags)) {
549 if (box->pmu->type->ops->exit_box)
550 box->pmu->type->ops->exit_box(box);
551 }
552}
553
554static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
555{
556 return (box->dieid < 0);
557}
558
559static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
560{
561 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
562}
563
564static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
565{
566 return event->pmu_private;
567}
568
569struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
570u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
571void uncore_mmio_exit_box(struct intel_uncore_box *box);
572u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
573 struct perf_event *event);
574void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
575void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
576void uncore_pmu_event_start(struct perf_event *event, int flags);
577void uncore_pmu_event_stop(struct perf_event *event, int flags);
578int uncore_pmu_event_add(struct perf_event *event, int flags);
579void uncore_pmu_event_del(struct perf_event *event, int flags);
580void uncore_pmu_event_read(struct perf_event *event);
581void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
582struct event_constraint *
583uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
584void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
585u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
586void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
587
588extern struct intel_uncore_type *empty_uncore[];
589extern struct intel_uncore_type **uncore_msr_uncores;
590extern struct intel_uncore_type **uncore_pci_uncores;
591extern struct intel_uncore_type **uncore_mmio_uncores;
592extern struct pci_driver *uncore_pci_driver;
593extern struct pci_driver *uncore_pci_sub_driver;
594extern raw_spinlock_t pci2phy_map_lock;
595extern struct list_head pci2phy_map_head;
596extern struct pci_extra_dev *uncore_extra_pci_dev;
597extern struct event_constraint uncore_constraint_empty;
598extern int spr_uncore_units_ignore[];
599extern int gnr_uncore_units_ignore[];
600
601/* uncore_snb.c */
602int snb_uncore_pci_init(void);
603int ivb_uncore_pci_init(void);
604int hsw_uncore_pci_init(void);
605int bdw_uncore_pci_init(void);
606int skl_uncore_pci_init(void);
607void snb_uncore_cpu_init(void);
608void nhm_uncore_cpu_init(void);
609void skl_uncore_cpu_init(void);
610void icl_uncore_cpu_init(void);
611void tgl_uncore_cpu_init(void);
612void adl_uncore_cpu_init(void);
613void lnl_uncore_cpu_init(void);
614void mtl_uncore_cpu_init(void);
615void ptl_uncore_cpu_init(void);
616void tgl_uncore_mmio_init(void);
617void tgl_l_uncore_mmio_init(void);
618void adl_uncore_mmio_init(void);
619void lnl_uncore_mmio_init(void);
620void ptl_uncore_mmio_init(void);
621int snb_pci2phy_map_init(int devid);
622
623/* uncore_snbep.c */
624int snbep_uncore_pci_init(void);
625void snbep_uncore_cpu_init(void);
626int ivbep_uncore_pci_init(void);
627void ivbep_uncore_cpu_init(void);
628int hswep_uncore_pci_init(void);
629void hswep_uncore_cpu_init(void);
630int bdx_uncore_pci_init(void);
631void bdx_uncore_cpu_init(void);
632int knl_uncore_pci_init(void);
633void knl_uncore_cpu_init(void);
634int skx_uncore_pci_init(void);
635void skx_uncore_cpu_init(void);
636int snr_uncore_pci_init(void);
637void snr_uncore_cpu_init(void);
638void snr_uncore_mmio_init(void);
639int icx_uncore_pci_init(void);
640void icx_uncore_cpu_init(void);
641void icx_uncore_mmio_init(void);
642int spr_uncore_pci_init(void);
643void spr_uncore_cpu_init(void);
644void spr_uncore_mmio_init(void);
645int gnr_uncore_pci_init(void);
646void gnr_uncore_cpu_init(void);
647void gnr_uncore_mmio_init(void);
648
649/* uncore_nhmex.c */
650void nhmex_uncore_cpu_init(void);
651