| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Shared support code for AMD K8 northbridges and derivatives. | 
|---|
| 4 | * Copyright 2006 Andi Kleen, SUSE Labs. | 
|---|
| 5 | */ | 
|---|
| 6 |  | 
|---|
| 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|---|
| 8 |  | 
|---|
| 9 | #include <linux/types.h> | 
|---|
| 10 | #include <linux/slab.h> | 
|---|
| 11 | #include <linux/init.h> | 
|---|
| 12 | #include <linux/errno.h> | 
|---|
| 13 | #include <linux/export.h> | 
|---|
| 14 | #include <linux/spinlock.h> | 
|---|
| 15 | #include <linux/pci_ids.h> | 
|---|
| 16 |  | 
|---|
| 17 | #include <asm/amd/nb.h> | 
|---|
| 18 | #include <asm/cpuid/api.h> | 
|---|
| 19 |  | 
|---|
| 20 | static u32 *flush_words; | 
|---|
| 21 |  | 
|---|
| 22 | static const struct pci_device_id amd_nb_misc_ids[] = { | 
|---|
| 23 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 
|---|
| 24 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 
|---|
| 25 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, | 
|---|
| 26 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, | 
|---|
| 27 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, | 
|---|
| 28 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, | 
|---|
| 29 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, | 
|---|
| 30 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | 
|---|
| 31 | {} | 
|---|
| 32 | }; | 
|---|
| 33 |  | 
|---|
| 34 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { | 
|---|
| 35 | { 0x00, 0x18, 0x20 }, | 
|---|
| 36 | { 0xff, 0x00, 0x20 }, | 
|---|
| 37 | { 0xfe, 0x00, 0x20 }, | 
|---|
| 38 | { } | 
|---|
| 39 | }; | 
|---|
| 40 |  | 
|---|
| 41 | static struct amd_northbridge_info amd_northbridges; | 
|---|
| 42 |  | 
|---|
| 43 | u16 amd_nb_num(void) | 
|---|
| 44 | { | 
|---|
| 45 | return amd_northbridges.num; | 
|---|
| 46 | } | 
|---|
| 47 | EXPORT_SYMBOL_GPL(amd_nb_num); | 
|---|
| 48 |  | 
|---|
| 49 | bool amd_nb_has_feature(unsigned int feature) | 
|---|
| 50 | { | 
|---|
| 51 | return ((amd_northbridges.flags & feature) == feature); | 
|---|
| 52 | } | 
|---|
| 53 | EXPORT_SYMBOL_GPL(amd_nb_has_feature); | 
|---|
| 54 |  | 
|---|
| 55 | struct amd_northbridge *node_to_amd_nb(int node) | 
|---|
| 56 | { | 
|---|
| 57 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; | 
|---|
| 58 | } | 
|---|
| 59 | EXPORT_SYMBOL_GPL(node_to_amd_nb); | 
|---|
| 60 |  | 
|---|
| 61 | static int amd_cache_northbridges(void) | 
|---|
| 62 | { | 
|---|
| 63 | struct amd_northbridge *nb; | 
|---|
| 64 | u16 i; | 
|---|
| 65 |  | 
|---|
| 66 | if (amd_northbridges.num) | 
|---|
| 67 | return 0; | 
|---|
| 68 |  | 
|---|
| 69 | amd_northbridges.num = amd_num_nodes(); | 
|---|
| 70 |  | 
|---|
| 71 | nb = kcalloc(amd_northbridges.num, sizeof(struct amd_northbridge), GFP_KERNEL); | 
|---|
| 72 | if (!nb) | 
|---|
| 73 | return -ENOMEM; | 
|---|
| 74 |  | 
|---|
| 75 | amd_northbridges.nb = nb; | 
|---|
| 76 |  | 
|---|
| 77 | for (i = 0; i < amd_northbridges.num; i++) { | 
|---|
| 78 | node_to_amd_nb(i)->misc = amd_node_get_func(node: i, func: 3); | 
|---|
| 79 |  | 
|---|
| 80 | /* | 
|---|
| 81 | * Each Northbridge must have a 'misc' device. | 
|---|
| 82 | * If not, then uninitialize everything. | 
|---|
| 83 | */ | 
|---|
| 84 | if (!node_to_amd_nb(i)->misc) { | 
|---|
| 85 | amd_northbridges.num = 0; | 
|---|
| 86 | kfree(objp: nb); | 
|---|
| 87 | return -ENODEV; | 
|---|
| 88 | } | 
|---|
| 89 |  | 
|---|
| 90 | node_to_amd_nb(i)->link = amd_node_get_func(node: i, func: 4); | 
|---|
| 91 | } | 
|---|
| 92 |  | 
|---|
| 93 | if (amd_gart_present()) | 
|---|
| 94 | amd_northbridges.flags |= AMD_NB_GART; | 
|---|
| 95 |  | 
|---|
| 96 | if (!cpuid_amd_hygon_has_l3_cache()) | 
|---|
| 97 | return 0; | 
|---|
| 98 |  | 
|---|
| 99 | /* | 
|---|
| 100 | * Some CPU families support L3 Cache Index Disable. There are some | 
|---|
| 101 | * limitations because of E382 and E388 on family 0x10. | 
|---|
| 102 | */ | 
|---|
| 103 | if (boot_cpu_data.x86 == 0x10 && | 
|---|
| 104 | boot_cpu_data.x86_model >= 0x8 && | 
|---|
| 105 | (boot_cpu_data.x86_model > 0x9 || | 
|---|
| 106 | boot_cpu_data.x86_stepping >= 0x1)) | 
|---|
| 107 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | 
|---|
| 108 |  | 
|---|
| 109 | if (boot_cpu_data.x86 == 0x15) | 
|---|
| 110 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | 
|---|
| 111 |  | 
|---|
| 112 | /* L3 cache partitioning is supported on family 0x15 */ | 
|---|
| 113 | if (boot_cpu_data.x86 == 0x15) | 
|---|
| 114 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; | 
|---|
| 115 |  | 
|---|
| 116 | return 0; | 
|---|
| 117 | } | 
|---|
| 118 |  | 
|---|
| 119 | /* | 
|---|
| 120 | * Ignores subdevice/subvendor but as far as I can figure out | 
|---|
| 121 | * they're useless anyways | 
|---|
| 122 | */ | 
|---|
| 123 | bool __init early_is_amd_nb(u32 device) | 
|---|
| 124 | { | 
|---|
| 125 | const struct pci_device_id *id; | 
|---|
| 126 | u32 vendor = device & 0xffff; | 
|---|
| 127 |  | 
|---|
| 128 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && | 
|---|
| 129 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) | 
|---|
| 130 | return false; | 
|---|
| 131 |  | 
|---|
| 132 | if (cpu_feature_enabled(X86_FEATURE_ZEN)) | 
|---|
| 133 | return false; | 
|---|
| 134 |  | 
|---|
| 135 | device >>= 16; | 
|---|
| 136 | for (id = amd_nb_misc_ids; id->vendor; id++) | 
|---|
| 137 | if (vendor == id->vendor && device == id->device) | 
|---|
| 138 | return true; | 
|---|
| 139 | return false; | 
|---|
| 140 | } | 
|---|
| 141 |  | 
|---|
| 142 | struct resource *amd_get_mmconfig_range(struct resource *res) | 
|---|
| 143 | { | 
|---|
| 144 | u64 base, msr; | 
|---|
| 145 | unsigned int segn_busn_bits; | 
|---|
| 146 |  | 
|---|
| 147 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && | 
|---|
| 148 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) | 
|---|
| 149 | return NULL; | 
|---|
| 150 |  | 
|---|
| 151 | /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ | 
|---|
| 152 | if (boot_cpu_data.x86 < 0x10 || | 
|---|
| 153 | rdmsrq_safe(MSR_FAM10H_MMIO_CONF_BASE, p: &msr)) | 
|---|
| 154 | return NULL; | 
|---|
| 155 |  | 
|---|
| 156 | /* mmconfig is not enabled */ | 
|---|
| 157 | if (!(msr & FAM10H_MMIO_CONF_ENABLE)) | 
|---|
| 158 | return NULL; | 
|---|
| 159 |  | 
|---|
| 160 | base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); | 
|---|
| 161 |  | 
|---|
| 162 | segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & | 
|---|
| 163 | FAM10H_MMIO_CONF_BUSRANGE_MASK; | 
|---|
| 164 |  | 
|---|
| 165 | res->flags = IORESOURCE_MEM; | 
|---|
| 166 | res->start = base; | 
|---|
| 167 | res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; | 
|---|
| 168 | return res; | 
|---|
| 169 | } | 
|---|
| 170 |  | 
|---|
| 171 | int amd_get_subcaches(int cpu) | 
|---|
| 172 | { | 
|---|
| 173 | struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; | 
|---|
| 174 | unsigned int mask; | 
|---|
| 175 |  | 
|---|
| 176 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 
|---|
| 177 | return 0; | 
|---|
| 178 |  | 
|---|
| 179 | pci_read_config_dword(dev: link, where: 0x1d4, val: &mask); | 
|---|
| 180 |  | 
|---|
| 181 | return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; | 
|---|
| 182 | } | 
|---|
| 183 |  | 
|---|
| 184 | int amd_set_subcaches(int cpu, unsigned long mask) | 
|---|
| 185 | { | 
|---|
| 186 | static unsigned int reset, ban; | 
|---|
| 187 | struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); | 
|---|
| 188 | unsigned int reg; | 
|---|
| 189 | int cuid; | 
|---|
| 190 |  | 
|---|
| 191 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) | 
|---|
| 192 | return -EINVAL; | 
|---|
| 193 |  | 
|---|
| 194 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ | 
|---|
| 195 | if (reset == 0) { | 
|---|
| 196 | pci_read_config_dword(dev: nb->link, where: 0x1d4, val: &reset); | 
|---|
| 197 | pci_read_config_dword(dev: nb->misc, where: 0x1b8, val: &ban); | 
|---|
| 198 | ban &= 0x180000; | 
|---|
| 199 | } | 
|---|
| 200 |  | 
|---|
| 201 | /* deactivate BAN mode if any subcaches are to be disabled */ | 
|---|
| 202 | if (mask != 0xf) { | 
|---|
| 203 | pci_read_config_dword(dev: nb->misc, where: 0x1b8, val: ®); | 
|---|
| 204 | pci_write_config_dword(dev: nb->misc, where: 0x1b8, val: reg & ~0x180000); | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | cuid = cpu_data(cpu).topo.core_id; | 
|---|
| 208 | mask <<= 4 * cuid; | 
|---|
| 209 | mask |= (0xf ^ (1 << cuid)) << 26; | 
|---|
| 210 |  | 
|---|
| 211 | pci_write_config_dword(dev: nb->link, where: 0x1d4, val: mask); | 
|---|
| 212 |  | 
|---|
| 213 | /* reset BAN mode if L3 partitioning returned to reset state */ | 
|---|
| 214 | pci_read_config_dword(dev: nb->link, where: 0x1d4, val: ®); | 
|---|
| 215 | if (reg == reset) { | 
|---|
| 216 | pci_read_config_dword(dev: nb->misc, where: 0x1b8, val: ®); | 
|---|
| 217 | reg &= ~0x180000; | 
|---|
| 218 | pci_write_config_dword(dev: nb->misc, where: 0x1b8, val: reg | ban); | 
|---|
| 219 | } | 
|---|
| 220 |  | 
|---|
| 221 | return 0; | 
|---|
| 222 | } | 
|---|
| 223 |  | 
|---|
| 224 | static void amd_cache_gart(void) | 
|---|
| 225 | { | 
|---|
| 226 | u16 i; | 
|---|
| 227 |  | 
|---|
| 228 | if (!amd_nb_has_feature(AMD_NB_GART)) | 
|---|
| 229 | return; | 
|---|
| 230 |  | 
|---|
| 231 | flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); | 
|---|
| 232 | if (!flush_words) { | 
|---|
| 233 | amd_northbridges.flags &= ~AMD_NB_GART; | 
|---|
| 234 | pr_notice( "Cannot initialize GART flush words, GART support disabled\n"); | 
|---|
| 235 | return; | 
|---|
| 236 | } | 
|---|
| 237 |  | 
|---|
| 238 | for (i = 0; i != amd_northbridges.num; i++) | 
|---|
| 239 | pci_read_config_dword(dev: node_to_amd_nb(i)->misc, where: 0x9c, val: &flush_words[i]); | 
|---|
| 240 | } | 
|---|
| 241 |  | 
|---|
| 242 | void amd_flush_garts(void) | 
|---|
| 243 | { | 
|---|
| 244 | int flushed, i; | 
|---|
| 245 | unsigned long flags; | 
|---|
| 246 | static DEFINE_SPINLOCK(gart_lock); | 
|---|
| 247 |  | 
|---|
| 248 | if (!amd_nb_has_feature(AMD_NB_GART)) | 
|---|
| 249 | return; | 
|---|
| 250 |  | 
|---|
| 251 | /* | 
|---|
| 252 | * Avoid races between AGP and IOMMU. In theory it's not needed | 
|---|
| 253 | * but I'm not sure if the hardware won't lose flush requests | 
|---|
| 254 | * when another is pending. This whole thing is so expensive anyways | 
|---|
| 255 | * that it doesn't matter to serialize more. -AK | 
|---|
| 256 | */ | 
|---|
| 257 | spin_lock_irqsave(&gart_lock, flags); | 
|---|
| 258 | flushed = 0; | 
|---|
| 259 | for (i = 0; i < amd_northbridges.num; i++) { | 
|---|
| 260 | pci_write_config_dword(dev: node_to_amd_nb(i)->misc, where: 0x9c, | 
|---|
| 261 | val: flush_words[i] | 1); | 
|---|
| 262 | flushed++; | 
|---|
| 263 | } | 
|---|
| 264 | for (i = 0; i < amd_northbridges.num; i++) { | 
|---|
| 265 | u32 w; | 
|---|
| 266 | /* Make sure the hardware actually executed the flush*/ | 
|---|
| 267 | for (;;) { | 
|---|
| 268 | pci_read_config_dword(dev: node_to_amd_nb(i)->misc, | 
|---|
| 269 | where: 0x9c, val: &w); | 
|---|
| 270 | if (!(w & 1)) | 
|---|
| 271 | break; | 
|---|
| 272 | cpu_relax(); | 
|---|
| 273 | } | 
|---|
| 274 | } | 
|---|
| 275 | spin_unlock_irqrestore(lock: &gart_lock, flags); | 
|---|
| 276 | if (!flushed) | 
|---|
| 277 | pr_notice( "nothing to flush?\n"); | 
|---|
| 278 | } | 
|---|
| 279 | EXPORT_SYMBOL_GPL(amd_flush_garts); | 
|---|
| 280 |  | 
|---|
| 281 | static void __fix_erratum_688(void *info) | 
|---|
| 282 | { | 
|---|
| 283 | #define MSR_AMD64_IC_CFG 0xC0011021 | 
|---|
| 284 |  | 
|---|
| 285 | msr_set_bit(MSR_AMD64_IC_CFG, bit: 3); | 
|---|
| 286 | msr_set_bit(MSR_AMD64_IC_CFG, bit: 14); | 
|---|
| 287 | } | 
|---|
| 288 |  | 
|---|
| 289 | /* Apply erratum 688 fix so machines without a BIOS fix work. */ | 
|---|
| 290 | static __init void fix_erratum_688(void) | 
|---|
| 291 | { | 
|---|
| 292 | struct pci_dev *F4; | 
|---|
| 293 | u32 val; | 
|---|
| 294 |  | 
|---|
| 295 | if (boot_cpu_data.x86 != 0x14) | 
|---|
| 296 | return; | 
|---|
| 297 |  | 
|---|
| 298 | if (!amd_northbridges.num) | 
|---|
| 299 | return; | 
|---|
| 300 |  | 
|---|
| 301 | F4 = node_to_amd_nb(0)->link; | 
|---|
| 302 | if (!F4) | 
|---|
| 303 | return; | 
|---|
| 304 |  | 
|---|
| 305 | if (pci_read_config_dword(dev: F4, where: 0x164, val: &val)) | 
|---|
| 306 | return; | 
|---|
| 307 |  | 
|---|
| 308 | if (val & BIT(2)) | 
|---|
| 309 | return; | 
|---|
| 310 |  | 
|---|
| 311 | on_each_cpu(func: __fix_erratum_688, NULL, wait: 0); | 
|---|
| 312 |  | 
|---|
| 313 | pr_info( "x86/cpu/AMD: CPU erratum 688 worked around\n"); | 
|---|
| 314 | } | 
|---|
| 315 |  | 
|---|
| 316 | static __init int init_amd_nbs(void) | 
|---|
| 317 | { | 
|---|
| 318 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && | 
|---|
| 319 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) | 
|---|
| 320 | return 0; | 
|---|
| 321 |  | 
|---|
| 322 | amd_cache_northbridges(); | 
|---|
| 323 | amd_cache_gart(); | 
|---|
| 324 |  | 
|---|
| 325 | fix_erratum_688(); | 
|---|
| 326 |  | 
|---|
| 327 | return 0; | 
|---|
| 328 | } | 
|---|
| 329 |  | 
|---|
| 330 | /* This has to go after the PCI subsystem */ | 
|---|
| 331 | fs_initcall(init_amd_nbs); | 
|---|
| 332 |  | 
|---|