1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt
9#define dev_fmt(fmt) pr_fmt(fmt)
10
11#include <linux/pci.h>
12#include <linux/acpi.h>
13#include <linux/list.h>
14#include <linux/bitmap.h>
15#include <linux/syscore_ops.h>
16#include <linux/interrupt.h>
17#include <linux/msi.h>
18#include <linux/irq.h>
19#include <linux/amd-iommu.h>
20#include <linux/export.h>
21#include <linux/kmemleak.h>
22#include <linux/cc_platform.h>
23#include <linux/iopoll.h>
24#include <asm/pci-direct.h>
25#include <asm/iommu.h>
26#include <asm/apic.h>
27#include <asm/gart.h>
28#include <asm/x86_init.h>
29#include <asm/io_apic.h>
30#include <asm/irq_remapping.h>
31#include <asm/set_memory.h>
32#include <asm/sev.h>
33
34#include <linux/crash_dump.h>
35
36#include "amd_iommu.h"
37#include "../irq_remapping.h"
38#include "../iommu-pages.h"
39
40/*
41 * definitions for the ACPI scanning code
42 */
43#define IVRS_HEADER_LENGTH 48
44
45#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46#define ACPI_IVMD_TYPE_ALL 0x20
47#define ACPI_IVMD_TYPE 0x21
48#define ACPI_IVMD_TYPE_RANGE 0x22
49
50#define IVHD_DEV_ALL 0x01
51#define IVHD_DEV_SELECT 0x02
52#define IVHD_DEV_SELECT_RANGE_START 0x03
53#define IVHD_DEV_RANGE_END 0x04
54#define IVHD_DEV_ALIAS 0x42
55#define IVHD_DEV_ALIAS_RANGE 0x43
56#define IVHD_DEV_EXT_SELECT 0x46
57#define IVHD_DEV_EXT_SELECT_RANGE 0x47
58#define IVHD_DEV_SPECIAL 0x48
59#define IVHD_DEV_ACPI_HID 0xf0
60
61#define UID_NOT_PRESENT 0
62#define UID_IS_INTEGER 1
63#define UID_IS_CHARACTER 2
64
65#define IVHD_SPECIAL_IOAPIC 1
66#define IVHD_SPECIAL_HPET 2
67
68#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69#define IVHD_FLAG_PASSPW_EN_MASK 0x02
70#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71#define IVHD_FLAG_ISOC_EN_MASK 0x08
72
73#define IVMD_FLAG_EXCL_RANGE 0x08
74#define IVMD_FLAG_IW 0x04
75#define IVMD_FLAG_IR 0x02
76#define IVMD_FLAG_UNITY_MAP 0x01
77
78#define ACPI_DEVFLAG_INITPASS 0x01
79#define ACPI_DEVFLAG_EXTINT 0x02
80#define ACPI_DEVFLAG_NMI 0x04
81#define ACPI_DEVFLAG_SYSMGT1 0x10
82#define ACPI_DEVFLAG_SYSMGT2 0x20
83#define ACPI_DEVFLAG_LINT0 0x40
84#define ACPI_DEVFLAG_LINT1 0x80
85#define ACPI_DEVFLAG_ATSDIS 0x10000000
86
87#define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
88 | ((dev & 0x1f) << 3) | (fn & 0x7))
89
90/*
91 * ACPI table definitions
92 *
93 * These data structures are laid over the table to parse the important values
94 * out of it.
95 */
96
97/*
98 * structure describing one IOMMU in the ACPI table. Typically followed by one
99 * or more ivhd_entrys.
100 */
101struct ivhd_header {
102 u8 type;
103 u8 flags;
104 u16 length;
105 u16 devid;
106 u16 cap_ptr;
107 u64 mmio_phys;
108 u16 pci_seg;
109 u16 info;
110 u32 efr_attr;
111
112 /* Following only valid on IVHD type 11h and 40h */
113 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
114 u64 efr_reg2;
115} __attribute__((packed));
116
117/*
118 * A device entry describing which devices a specific IOMMU translates and
119 * which requestor ids they use.
120 */
121struct ivhd_entry {
122 u8 type;
123 u16 devid;
124 u8 flags;
125 struct_group(ext_hid,
126 u32 ext;
127 u32 hidh;
128 );
129 u64 cid;
130 u8 uidf;
131 u8 uidl;
132 u8 uid;
133} __attribute__((packed));
134
135/*
136 * An AMD IOMMU memory definition structure. It defines things like exclusion
137 * ranges for devices and regions that should be unity mapped.
138 */
139struct ivmd_header {
140 u8 type;
141 u8 flags;
142 u16 length;
143 u16 devid;
144 u16 aux;
145 u16 pci_seg;
146 u8 resv[6];
147 u64 range_start;
148 u64 range_length;
149} __attribute__((packed));
150
151bool amd_iommu_dump;
152bool amd_iommu_irq_remap __read_mostly;
153
154enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
155/* Host page table level */
156u8 amd_iommu_hpt_level;
157/* Guest page table level */
158int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
159
160int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
161static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
162
163static bool amd_iommu_detected;
164static bool amd_iommu_disabled __initdata;
165static bool amd_iommu_force_enable __initdata;
166static bool amd_iommu_irtcachedis;
167static int amd_iommu_target_ivhd_type;
168
169/* Global EFR and EFR2 registers */
170u64 amd_iommu_efr;
171u64 amd_iommu_efr2;
172
173/* Host (v1) page table is not supported*/
174bool amd_iommu_hatdis;
175
176/* SNP is enabled on the system? */
177bool amd_iommu_snp_en;
178EXPORT_SYMBOL(amd_iommu_snp_en);
179
180LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
181LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */
182LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */
183
184/* Number of IOMMUs present in the system */
185static int amd_iommus_present;
186
187/* IOMMUs have a non-present cache? */
188bool amd_iommu_np_cache __read_mostly;
189bool amd_iommu_iotlb_sup __read_mostly = true;
190
191static bool amd_iommu_pc_present __read_mostly;
192bool amdr_ivrs_remap_support __read_mostly;
193
194bool amd_iommu_force_isolation __read_mostly;
195
196unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES;
197
198enum iommu_init_state {
199 IOMMU_START_STATE,
200 IOMMU_IVRS_DETECTED,
201 IOMMU_ACPI_FINISHED,
202 IOMMU_ENABLED,
203 IOMMU_PCI_INIT,
204 IOMMU_INTERRUPTS_EN,
205 IOMMU_INITIALIZED,
206 IOMMU_NOT_FOUND,
207 IOMMU_INIT_ERROR,
208 IOMMU_CMDLINE_DISABLED,
209};
210
211/* Early ioapic and hpet maps from kernel command line */
212#define EARLY_MAP_SIZE 4
213static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
214static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
215static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
216
217static int __initdata early_ioapic_map_size;
218static int __initdata early_hpet_map_size;
219static int __initdata early_acpihid_map_size;
220
221static bool __initdata cmdline_maps;
222
223static enum iommu_init_state init_state = IOMMU_START_STATE;
224
225static int amd_iommu_enable_interrupts(void);
226static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
227
228static bool amd_iommu_pre_enabled = true;
229
230static u32 amd_iommu_ivinfo __initdata;
231
232bool translation_pre_enabled(struct amd_iommu *iommu)
233{
234 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
235}
236
237static void clear_translation_pre_enabled(struct amd_iommu *iommu)
238{
239 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
240}
241
242static void init_translation_status(struct amd_iommu *iommu)
243{
244 u64 ctrl;
245
246 ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET);
247 if (ctrl & (1<<CONTROL_IOMMU_EN))
248 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
249}
250
251int amd_iommu_get_num_iommus(void)
252{
253 return amd_iommus_present;
254}
255
256bool amd_iommu_ht_range_ignore(void)
257{
258 return check_feature2(FEATURE_HT_RANGE_IGNORE);
259}
260
261/*
262 * Iterate through all the IOMMUs to get common EFR
263 * masks among all IOMMUs and warn if found inconsistency.
264 */
265static __init void get_global_efr(void)
266{
267 struct amd_iommu *iommu;
268
269 for_each_iommu(iommu) {
270 u64 tmp = iommu->features;
271 u64 tmp2 = iommu->features2;
272
273 if (list_is_first(list: &iommu->list, head: &amd_iommu_list)) {
274 amd_iommu_efr = tmp;
275 amd_iommu_efr2 = tmp2;
276 continue;
277 }
278
279 if (amd_iommu_efr == tmp &&
280 amd_iommu_efr2 == tmp2)
281 continue;
282
283 pr_err(FW_BUG
284 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
285 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
286 iommu->index, iommu->pci_seg->id,
287 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
288 PCI_FUNC(iommu->devid));
289
290 amd_iommu_efr &= tmp;
291 amd_iommu_efr2 &= tmp2;
292 }
293
294 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
295}
296
297/*
298 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
299 * Default to IVHD EFR since it is available sooner
300 * (i.e. before PCI init).
301 */
302static void __init early_iommu_features_init(struct amd_iommu *iommu,
303 struct ivhd_header *h)
304{
305 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
306 iommu->features = h->efr_reg;
307 iommu->features2 = h->efr_reg2;
308 }
309 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
310 amdr_ivrs_remap_support = true;
311}
312
313/* Access to l1 and l2 indexed register spaces */
314
315static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
316{
317 u32 val;
318
319 pci_write_config_dword(dev: iommu->dev, where: 0xf8, val: (address | l1 << 16));
320 pci_read_config_dword(dev: iommu->dev, where: 0xfc, val: &val);
321 return val;
322}
323
324static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
325{
326 pci_write_config_dword(dev: iommu->dev, where: 0xf8, val: (address | l1 << 16 | 1 << 31));
327 pci_write_config_dword(dev: iommu->dev, where: 0xfc, val);
328 pci_write_config_dword(dev: iommu->dev, where: 0xf8, val: (address | l1 << 16));
329}
330
331static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
332{
333 u32 val;
334
335 pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: address);
336 pci_read_config_dword(dev: iommu->dev, where: 0xf4, val: &val);
337 return val;
338}
339
340static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
341{
342 pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: (address | 1 << 8));
343 pci_write_config_dword(dev: iommu->dev, where: 0xf4, val);
344}
345
346/****************************************************************************
347 *
348 * AMD IOMMU MMIO register space handling functions
349 *
350 * These functions are used to program the IOMMU device registers in
351 * MMIO space required for that driver.
352 *
353 ****************************************************************************/
354
355/*
356 * This function set the exclusion range in the IOMMU. DMA accesses to the
357 * exclusion range are passed through untranslated
358 */
359static void iommu_set_exclusion_range(struct amd_iommu *iommu)
360{
361 u64 start = iommu->exclusion_start & PAGE_MASK;
362 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
363 u64 entry;
364
365 if (!iommu->exclusion_start)
366 return;
367
368 entry = start | MMIO_EXCL_ENABLE_MASK;
369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
370 &entry, sizeof(entry));
371
372 entry = limit;
373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
374 &entry, sizeof(entry));
375}
376
377static void iommu_set_cwwb_range(struct amd_iommu *iommu)
378{
379 u64 start = iommu_virt_to_phys(vaddr: (void *)iommu->cmd_sem);
380 u64 entry = start & PM_ADDR_MASK;
381
382 if (!check_feature(FEATURE_SNP))
383 return;
384
385 /* Note:
386 * Re-purpose Exclusion base/limit registers for Completion wait
387 * write-back base/limit.
388 */
389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
390 &entry, sizeof(entry));
391
392 /* Note:
393 * Default to 4 Kbytes, which can be specified by setting base
394 * address equal to the limit address.
395 */
396 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
397 &entry, sizeof(entry));
398}
399
400/* Programs the physical address of the device table into the IOMMU hardware */
401static void iommu_set_device_table(struct amd_iommu *iommu)
402{
403 u64 entry;
404 u32 dev_table_size = iommu->pci_seg->dev_table_size;
405 void *dev_table = (void *)get_dev_table(iommu);
406
407 BUG_ON(iommu->mmio_base == NULL);
408
409 if (is_kdump_kernel())
410 return;
411
412 entry = iommu_virt_to_phys(vaddr: dev_table);
413 entry |= (dev_table_size >> 12) - 1;
414 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
415 &entry, sizeof(entry));
416}
417
418static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift)
419{
420 u64 ctrl;
421
422 ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET);
423 mask <<= shift;
424 ctrl &= ~mask;
425 ctrl |= (val << shift) & mask;
426 writeq(val: ctrl, addr: iommu->mmio_base + MMIO_CONTROL_OFFSET);
427}
428
429/* Generic functions to enable/disable certain features of the IOMMU. */
430void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
431{
432 iommu_feature_set(iommu, val: 1ULL, mask: 1ULL, shift: bit);
433}
434
435static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
436{
437 iommu_feature_set(iommu, val: 0ULL, mask: 1ULL, shift: bit);
438}
439
440/* Function to enable the hardware */
441static void iommu_enable(struct amd_iommu *iommu)
442{
443 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
444}
445
446static void iommu_disable(struct amd_iommu *iommu)
447{
448 if (!iommu->mmio_base)
449 return;
450
451 /* Disable command buffer */
452 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
453
454 /* Disable event logging and event interrupts */
455 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
456 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
457
458 /* Disable IOMMU GA_LOG */
459 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
460 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
461
462 /* Disable IOMMU PPR logging */
463 iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
464 iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
465
466 /* Disable IOMMU hardware itself */
467 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
468
469 /* Clear IRTE cache disabling bit */
470 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
471}
472
473/*
474 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
475 * the system has one.
476 */
477static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
478{
479 if (!request_mem_region(address, end, "amd_iommu")) {
480 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
481 address, end);
482 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
483 return NULL;
484 }
485
486 return (u8 __iomem *)ioremap(offset: address, size: end);
487}
488
489static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
490{
491 if (iommu->mmio_base)
492 iounmap(addr: iommu->mmio_base);
493 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
494}
495
496static inline u32 get_ivhd_header_size(struct ivhd_header *h)
497{
498 u32 size = 0;
499
500 switch (h->type) {
501 case 0x10:
502 size = 24;
503 break;
504 case 0x11:
505 case 0x40:
506 size = 40;
507 break;
508 }
509 return size;
510}
511
512/****************************************************************************
513 *
514 * The functions below belong to the first pass of AMD IOMMU ACPI table
515 * parsing. In this pass we try to find out the highest device id this
516 * code has to handle. Upon this information the size of the shared data
517 * structures is determined later.
518 *
519 ****************************************************************************/
520
521/*
522 * This function calculates the length of a given IVHD entry
523 */
524static inline int ivhd_entry_length(u8 *ivhd)
525{
526 u32 type = ((struct ivhd_entry *)ivhd)->type;
527
528 if (type < 0x80) {
529 return 0x04 << (*ivhd >> 6);
530 } else if (type == IVHD_DEV_ACPI_HID) {
531 /* For ACPI_HID, offset 21 is uid len */
532 return *((u8 *)ivhd + 21) + 22;
533 }
534 return 0;
535}
536
537/*
538 * After reading the highest device id from the IOMMU PCI capability header
539 * this function looks if there is a higher device id defined in the ACPI table
540 */
541static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
542{
543 u8 *p = (void *)h, *end = (void *)h;
544 struct ivhd_entry *dev;
545 int last_devid = -EINVAL;
546
547 u32 ivhd_size = get_ivhd_header_size(h);
548
549 if (!ivhd_size) {
550 pr_err("Unsupported IVHD type %#x\n", h->type);
551 return -EINVAL;
552 }
553
554 p += ivhd_size;
555 end += h->length;
556
557 while (p < end) {
558 dev = (struct ivhd_entry *)p;
559 switch (dev->type) {
560 case IVHD_DEV_ALL:
561 /* Use maximum BDF value for DEV_ALL */
562 return 0xffff;
563 case IVHD_DEV_SELECT:
564 case IVHD_DEV_RANGE_END:
565 case IVHD_DEV_ALIAS:
566 case IVHD_DEV_EXT_SELECT:
567 /* all the above subfield types refer to device ids */
568 if (dev->devid > last_devid)
569 last_devid = dev->devid;
570 break;
571 default:
572 break;
573 }
574 p += ivhd_entry_length(ivhd: p);
575 }
576
577 WARN_ON(p != end);
578
579 return last_devid;
580}
581
582static int __init check_ivrs_checksum(struct acpi_table_header *table)
583{
584 int i;
585 u8 checksum = 0, *p = (u8 *)table;
586
587 for (i = 0; i < table->length; ++i)
588 checksum += p[i];
589 if (checksum != 0) {
590 /* ACPI table corrupt */
591 pr_err(FW_BUG "IVRS invalid checksum\n");
592 return -ENODEV;
593 }
594
595 return 0;
596}
597
598/*
599 * Iterate over all IVHD entries in the ACPI table and find the highest device
600 * id which we need to handle. This is the first of three functions which parse
601 * the ACPI table. So we check the checksum here.
602 */
603static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
604{
605 u8 *p = (u8 *)table, *end = (u8 *)table;
606 struct ivhd_header *h;
607 int last_devid, last_bdf = 0;
608
609 p += IVRS_HEADER_LENGTH;
610
611 end += table->length;
612 while (p < end) {
613 h = (struct ivhd_header *)p;
614 if (h->pci_seg == pci_seg &&
615 h->type == amd_iommu_target_ivhd_type) {
616 last_devid = find_last_devid_from_ivhd(h);
617
618 if (last_devid < 0)
619 return -EINVAL;
620 if (last_devid > last_bdf)
621 last_bdf = last_devid;
622 }
623 p += h->length;
624 }
625 WARN_ON(p != end);
626
627 return last_bdf;
628}
629
630/****************************************************************************
631 *
632 * The following functions belong to the code path which parses the ACPI table
633 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
634 * data structures, initialize the per PCI segment device/alias/rlookup table
635 * and also basically initialize the hardware.
636 *
637 ****************************************************************************/
638
639/* Allocate per PCI segment device table */
640static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
641{
642 pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
643 size: pci_seg->dev_table_size);
644 if (!pci_seg->dev_table)
645 return -ENOMEM;
646
647 return 0;
648}
649
650static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
651{
652 if (is_kdump_kernel())
653 memunmap(addr: (void *)pci_seg->dev_table);
654 else
655 iommu_free_pages(virt: pci_seg->dev_table);
656 pci_seg->dev_table = NULL;
657}
658
659/* Allocate per PCI segment IOMMU rlookup table. */
660static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
661{
662 pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1,
663 sizeof(*pci_seg->rlookup_table),
664 GFP_KERNEL);
665 if (pci_seg->rlookup_table == NULL)
666 return -ENOMEM;
667
668 return 0;
669}
670
671static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
672{
673 kvfree(addr: pci_seg->rlookup_table);
674 pci_seg->rlookup_table = NULL;
675}
676
677static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
678{
679 pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1,
680 sizeof(*pci_seg->irq_lookup_table),
681 GFP_KERNEL);
682 if (pci_seg->irq_lookup_table == NULL)
683 return -ENOMEM;
684
685 return 0;
686}
687
688static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
689{
690 kvfree(addr: pci_seg->irq_lookup_table);
691 pci_seg->irq_lookup_table = NULL;
692}
693
694static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
695{
696 int i;
697
698 pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1,
699 sizeof(*pci_seg->alias_table),
700 GFP_KERNEL);
701 if (!pci_seg->alias_table)
702 return -ENOMEM;
703
704 /*
705 * let all alias entries point to itself
706 */
707 for (i = 0; i <= pci_seg->last_bdf; ++i)
708 pci_seg->alias_table[i] = i;
709
710 return 0;
711}
712
713static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
714{
715 kvfree(addr: pci_seg->alias_table);
716 pci_seg->alias_table = NULL;
717}
718
719static inline void *iommu_memremap(unsigned long paddr, size_t size)
720{
721 phys_addr_t phys;
722
723 if (!paddr)
724 return NULL;
725
726 /*
727 * Obtain true physical address in kdump kernel when SME is enabled.
728 * Currently, previous kernel with SME enabled and kdump kernel
729 * with SME support disabled is not supported.
730 */
731 phys = __sme_clr(paddr);
732
733 if (cc_platform_has(attr: CC_ATTR_HOST_MEM_ENCRYPT))
734 return (__force void *)ioremap_encrypted(phys_addr: phys, size);
735 else
736 return memremap(offset: phys, size, flags: MEMREMAP_WB);
737}
738
739/*
740 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
741 * write commands to that buffer later and the IOMMU will execute them
742 * asynchronously
743 */
744static int __init alloc_command_buffer(struct amd_iommu *iommu)
745{
746 iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
747
748 return iommu->cmd_buf ? 0 : -ENOMEM;
749}
750
751/*
752 * Interrupt handler has processed all pending events and adjusted head
753 * and tail pointer. Reset overflow mask and restart logging again.
754 */
755void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
756 u8 cntrl_intr, u8 cntrl_log,
757 u32 status_run_mask, u32 status_overflow_mask)
758{
759 u32 status;
760
761 status = readl(addr: iommu->mmio_base + MMIO_STATUS_OFFSET);
762 if (status & status_run_mask)
763 return;
764
765 pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
766
767 iommu_feature_disable(iommu, bit: cntrl_log);
768 iommu_feature_disable(iommu, bit: cntrl_intr);
769
770 writel(val: status_overflow_mask, addr: iommu->mmio_base + MMIO_STATUS_OFFSET);
771
772 iommu_feature_enable(iommu, bit: cntrl_intr);
773 iommu_feature_enable(iommu, bit: cntrl_log);
774}
775
776/*
777 * This function restarts event logging in case the IOMMU experienced
778 * an event log buffer overflow.
779 */
780void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
781{
782 amd_iommu_restart_log(iommu, evt_type: "Event", CONTROL_EVT_INT_EN,
783 CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
784 MMIO_STATUS_EVT_OVERFLOW_MASK);
785}
786
787/*
788 * This function restarts event logging in case the IOMMU experienced
789 * GA log overflow.
790 */
791void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
792{
793 amd_iommu_restart_log(iommu, evt_type: "GA", CONTROL_GAINT_EN,
794 CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
795 MMIO_STATUS_GALOG_OVERFLOW_MASK);
796}
797
798/*
799 * This function resets the command buffer if the IOMMU stopped fetching
800 * commands from it.
801 */
802static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
803{
804 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
805
806 writel(val: 0x00, addr: iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
807 writel(val: 0x00, addr: iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
808 iommu->cmd_buf_head = 0;
809 iommu->cmd_buf_tail = 0;
810
811 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
812}
813
814/*
815 * This function writes the command buffer address to the hardware and
816 * enables it.
817 */
818static void iommu_enable_command_buffer(struct amd_iommu *iommu)
819{
820 u64 entry;
821
822 BUG_ON(iommu->cmd_buf == NULL);
823
824 if (!is_kdump_kernel()) {
825 /*
826 * Command buffer is re-used for kdump kernel and setting
827 * of MMIO register is not required.
828 */
829 entry = iommu_virt_to_phys(vaddr: iommu->cmd_buf);
830 entry |= MMIO_CMD_SIZE_512;
831 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
832 &entry, sizeof(entry));
833 }
834
835 amd_iommu_reset_cmd_buffer(iommu);
836}
837
838/*
839 * This function disables the command buffer
840 */
841static void iommu_disable_command_buffer(struct amd_iommu *iommu)
842{
843 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
844}
845
846static void __init free_command_buffer(struct amd_iommu *iommu)
847{
848 iommu_free_pages(virt: iommu->cmd_buf);
849}
850
851void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
852 size_t size)
853{
854 void *buf;
855
856 size = PAGE_ALIGN(size);
857 buf = iommu_alloc_pages_sz(gfp, size);
858 if (!buf)
859 return NULL;
860 if (check_feature(FEATURE_SNP) &&
861 set_memory_4k(addr: (unsigned long)buf, numpages: size / PAGE_SIZE)) {
862 iommu_free_pages(virt: buf);
863 return NULL;
864 }
865
866 return buf;
867}
868
869/* allocates the memory where the IOMMU will log its events to */
870static int __init alloc_event_buffer(struct amd_iommu *iommu)
871{
872 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
873 EVT_BUFFER_SIZE);
874
875 return iommu->evt_buf ? 0 : -ENOMEM;
876}
877
878static void iommu_enable_event_buffer(struct amd_iommu *iommu)
879{
880 u64 entry;
881
882 BUG_ON(iommu->evt_buf == NULL);
883
884 if (!is_kdump_kernel()) {
885 /*
886 * Event buffer is re-used for kdump kernel and setting
887 * of MMIO register is not required.
888 */
889 entry = iommu_virt_to_phys(vaddr: iommu->evt_buf) | EVT_LEN_MASK;
890 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
891 &entry, sizeof(entry));
892 }
893
894 /* set head and tail to zero manually */
895 writel(val: 0x00, addr: iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
896 writel(val: 0x00, addr: iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
897
898 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
899}
900
901/*
902 * This function disables the event log buffer
903 */
904static void iommu_disable_event_buffer(struct amd_iommu *iommu)
905{
906 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
907}
908
909static void __init free_event_buffer(struct amd_iommu *iommu)
910{
911 iommu_free_pages(virt: iommu->evt_buf);
912}
913
914static void free_ga_log(struct amd_iommu *iommu)
915{
916#ifdef CONFIG_IRQ_REMAP
917 iommu_free_pages(iommu->ga_log);
918 iommu_free_pages(iommu->ga_log_tail);
919#endif
920}
921
922#ifdef CONFIG_IRQ_REMAP
923static int iommu_ga_log_enable(struct amd_iommu *iommu)
924{
925 u32 status, i;
926 u64 entry;
927
928 if (!iommu->ga_log)
929 return -EINVAL;
930
931 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
932 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
933 &entry, sizeof(entry));
934 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
935 (BIT_ULL(52)-1)) & ~7ULL;
936 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
937 &entry, sizeof(entry));
938 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
939 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
940
941
942 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
943 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
944
945 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
946 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
947 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
948 break;
949 udelay(10);
950 }
951
952 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
953 return -EINVAL;
954
955 return 0;
956}
957
958static int iommu_init_ga_log(struct amd_iommu *iommu)
959{
960 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
961 return 0;
962
963 iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
964 if (!iommu->ga_log)
965 goto err_out;
966
967 iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
968 if (!iommu->ga_log_tail)
969 goto err_out;
970
971 return 0;
972err_out:
973 free_ga_log(iommu);
974 return -EINVAL;
975}
976#endif /* CONFIG_IRQ_REMAP */
977
978static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
979{
980 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, size: 1);
981 if (!iommu->cmd_sem)
982 return -ENOMEM;
983 iommu->cmd_sem_paddr = iommu_virt_to_phys(vaddr: (void *)iommu->cmd_sem);
984 return 0;
985}
986
987static int __init remap_event_buffer(struct amd_iommu *iommu)
988{
989 u64 paddr;
990
991 pr_info_once("Re-using event buffer from the previous kernel\n");
992 paddr = readq(addr: iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
993 iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
994
995 return iommu->evt_buf ? 0 : -ENOMEM;
996}
997
998static int __init remap_command_buffer(struct amd_iommu *iommu)
999{
1000 u64 paddr;
1001
1002 pr_info_once("Re-using command buffer from the previous kernel\n");
1003 paddr = readq(addr: iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK;
1004 iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE);
1005
1006 return iommu->cmd_buf ? 0 : -ENOMEM;
1007}
1008
1009static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu)
1010{
1011 u64 paddr;
1012
1013 if (check_feature(FEATURE_SNP)) {
1014 /*
1015 * When SNP is enabled, the exclusion base register is used for the
1016 * completion wait buffer (CWB) address. Read and re-use it.
1017 */
1018 pr_info_once("Re-using CWB buffers from the previous kernel\n");
1019 paddr = readq(addr: iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK;
1020 iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
1021 if (!iommu->cmd_sem)
1022 return -ENOMEM;
1023 iommu->cmd_sem_paddr = paddr;
1024 } else {
1025 return alloc_cwwb_sem(iommu);
1026 }
1027
1028 return 0;
1029}
1030
1031static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
1032{
1033 int ret;
1034
1035 /*
1036 * Reuse/Remap the previous kernel's allocated completion wait
1037 * command and event buffers for kdump boot.
1038 */
1039 if (is_kdump_kernel()) {
1040 ret = remap_or_alloc_cwwb_sem(iommu);
1041 if (ret)
1042 return ret;
1043
1044 ret = remap_command_buffer(iommu);
1045 if (ret)
1046 return ret;
1047
1048 ret = remap_event_buffer(iommu);
1049 if (ret)
1050 return ret;
1051 } else {
1052 ret = alloc_cwwb_sem(iommu);
1053 if (ret)
1054 return ret;
1055
1056 ret = alloc_command_buffer(iommu);
1057 if (ret)
1058 return ret;
1059
1060 ret = alloc_event_buffer(iommu);
1061 if (ret)
1062 return ret;
1063 }
1064
1065 return 0;
1066}
1067
1068static void __init free_cwwb_sem(struct amd_iommu *iommu)
1069{
1070 if (iommu->cmd_sem)
1071 iommu_free_pages(virt: (void *)iommu->cmd_sem);
1072}
1073static void __init unmap_cwwb_sem(struct amd_iommu *iommu)
1074{
1075 if (iommu->cmd_sem) {
1076 if (check_feature(FEATURE_SNP))
1077 memunmap(addr: (void *)iommu->cmd_sem);
1078 else
1079 iommu_free_pages(virt: (void *)iommu->cmd_sem);
1080 }
1081}
1082
1083static void __init unmap_command_buffer(struct amd_iommu *iommu)
1084{
1085 memunmap(addr: (void *)iommu->cmd_buf);
1086}
1087
1088static void __init unmap_event_buffer(struct amd_iommu *iommu)
1089{
1090 memunmap(addr: iommu->evt_buf);
1091}
1092
1093static void __init free_iommu_buffers(struct amd_iommu *iommu)
1094{
1095 if (is_kdump_kernel()) {
1096 unmap_cwwb_sem(iommu);
1097 unmap_command_buffer(iommu);
1098 unmap_event_buffer(iommu);
1099 } else {
1100 free_cwwb_sem(iommu);
1101 free_command_buffer(iommu);
1102 free_event_buffer(iommu);
1103 }
1104}
1105
1106static void iommu_enable_xt(struct amd_iommu *iommu)
1107{
1108#ifdef CONFIG_IRQ_REMAP
1109 /*
1110 * XT mode (32-bit APIC destination ID) requires
1111 * GA mode (128-bit IRTE support) as a prerequisite.
1112 */
1113 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
1114 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1115 iommu_feature_enable(iommu, CONTROL_XT_EN);
1116#endif /* CONFIG_IRQ_REMAP */
1117}
1118
1119static void iommu_enable_gt(struct amd_iommu *iommu)
1120{
1121 if (!check_feature(FEATURE_GT))
1122 return;
1123
1124 iommu_feature_enable(iommu, CONTROL_GT_EN);
1125}
1126
1127/* sets a specific bit in the device table entry. */
1128static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
1129{
1130 int i = (bit >> 6) & 0x03;
1131 int _bit = bit & 0x3f;
1132
1133 dte->data[i] |= (1UL << _bit);
1134}
1135
1136static bool __reuse_device_table(struct amd_iommu *iommu)
1137{
1138 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1139 u32 lo, hi, old_devtb_size;
1140 phys_addr_t old_devtb_phys;
1141 u64 entry;
1142
1143 /* Each IOMMU use separate device table with the same size */
1144 lo = readl(addr: iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
1145 hi = readl(addr: iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
1146 entry = (((u64) hi) << 32) + lo;
1147
1148 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
1149 if (old_devtb_size != pci_seg->dev_table_size) {
1150 pr_err("The device table size of IOMMU:%d is not expected!\n",
1151 iommu->index);
1152 return false;
1153 }
1154
1155 /*
1156 * When SME is enabled in the first kernel, the entry includes the
1157 * memory encryption mask(sme_me_mask), we must remove the memory
1158 * encryption mask to obtain the true physical address in kdump kernel.
1159 */
1160 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
1161
1162 if (old_devtb_phys >= 0x100000000ULL) {
1163 pr_err("The address of old device table is above 4G, not trustworthy!\n");
1164 return false;
1165 }
1166
1167 /*
1168 * Re-use the previous kernel's device table for kdump.
1169 */
1170 pci_seg->old_dev_tbl_cpy = iommu_memremap(paddr: old_devtb_phys, size: pci_seg->dev_table_size);
1171 if (pci_seg->old_dev_tbl_cpy == NULL) {
1172 pr_err("Failed to remap memory for reusing old device table!\n");
1173 return false;
1174 }
1175
1176 return true;
1177}
1178
1179static bool reuse_device_table(void)
1180{
1181 struct amd_iommu *iommu;
1182 struct amd_iommu_pci_seg *pci_seg;
1183
1184 if (!amd_iommu_pre_enabled)
1185 return false;
1186
1187 pr_warn("Translation is already enabled - trying to reuse translation structures\n");
1188
1189 /*
1190 * All IOMMUs within PCI segment shares common device table.
1191 * Hence reuse device table only once per PCI segment.
1192 */
1193 for_each_pci_segment(pci_seg) {
1194 for_each_iommu(iommu) {
1195 if (pci_seg->id != iommu->pci_seg->id)
1196 continue;
1197 if (!__reuse_device_table(iommu))
1198 return false;
1199 break;
1200 }
1201 }
1202
1203 return true;
1204}
1205
1206struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid)
1207{
1208 struct ivhd_dte_flags *e;
1209 unsigned int best_len = UINT_MAX;
1210 struct dev_table_entry *dte = NULL;
1211
1212 for_each_ivhd_dte_flags(e) {
1213 /*
1214 * Need to go through the whole list to find the smallest range,
1215 * which contains the devid.
1216 */
1217 if ((e->segid == segid) &&
1218 (e->devid_first <= devid) && (devid <= e->devid_last)) {
1219 unsigned int len = e->devid_last - e->devid_first;
1220
1221 if (len < best_len) {
1222 dte = &(e->dte);
1223 best_len = len;
1224 }
1225 }
1226 }
1227 return dte;
1228}
1229
1230static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last)
1231{
1232 struct ivhd_dte_flags *e;
1233
1234 for_each_ivhd_dte_flags(e) {
1235 if ((e->segid == segid) &&
1236 (e->devid_first == first) &&
1237 (e->devid_last == last))
1238 return true;
1239 }
1240 return false;
1241}
1242
1243/*
1244 * This function takes the device specific flags read from the ACPI
1245 * table and sets up the device table entry with that information
1246 */
1247static void __init
1248set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last,
1249 u32 flags, u32 ext_flags)
1250{
1251 int i;
1252 struct dev_table_entry dte = {};
1253
1254 /* Parse IVHD DTE setting flags and store information */
1255 if (flags) {
1256 struct ivhd_dte_flags *d;
1257
1258 if (search_ivhd_dte_flags(segid: iommu->pci_seg->id, first, last))
1259 return;
1260
1261 d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL);
1262 if (!d)
1263 return;
1264
1265 pr_debug("%s: devid range %#x:%#x\n", __func__, first, last);
1266
1267 if (flags & ACPI_DEVFLAG_INITPASS)
1268 set_dte_bit(dte: &dte, DEV_ENTRY_INIT_PASS);
1269 if (flags & ACPI_DEVFLAG_EXTINT)
1270 set_dte_bit(dte: &dte, DEV_ENTRY_EINT_PASS);
1271 if (flags & ACPI_DEVFLAG_NMI)
1272 set_dte_bit(dte: &dte, DEV_ENTRY_NMI_PASS);
1273 if (flags & ACPI_DEVFLAG_SYSMGT1)
1274 set_dte_bit(dte: &dte, DEV_ENTRY_SYSMGT1);
1275 if (flags & ACPI_DEVFLAG_SYSMGT2)
1276 set_dte_bit(dte: &dte, DEV_ENTRY_SYSMGT2);
1277 if (flags & ACPI_DEVFLAG_LINT0)
1278 set_dte_bit(dte: &dte, DEV_ENTRY_LINT0_PASS);
1279 if (flags & ACPI_DEVFLAG_LINT1)
1280 set_dte_bit(dte: &dte, DEV_ENTRY_LINT1_PASS);
1281
1282 /* Apply erratum 63, which needs info in initial_dte */
1283 if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1)
1284 dte.data[0] |= DTE_FLAG_IW;
1285
1286 memcpy(to: &d->dte, from: &dte, len: sizeof(dte));
1287 d->segid = iommu->pci_seg->id;
1288 d->devid_first = first;
1289 d->devid_last = last;
1290 list_add_tail(new: &d->list, head: &amd_ivhd_dev_flags_list);
1291 }
1292
1293 for (i = first; i <= last; i++) {
1294 if (flags) {
1295 struct dev_table_entry *dev_table = get_dev_table(iommu);
1296
1297 memcpy(to: &dev_table[i], from: &dte, len: sizeof(dte));
1298 }
1299 amd_iommu_set_rlookup_table(iommu, devid: i);
1300 }
1301}
1302
1303static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1304 u16 devid, u32 flags, u32 ext_flags)
1305{
1306 set_dev_entry_from_acpi_range(iommu, first: devid, last: devid, flags, ext_flags);
1307}
1308
1309int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
1310{
1311 struct devid_map *entry;
1312 struct list_head *list;
1313
1314 if (type == IVHD_SPECIAL_IOAPIC)
1315 list = &ioapic_map;
1316 else if (type == IVHD_SPECIAL_HPET)
1317 list = &hpet_map;
1318 else
1319 return -EINVAL;
1320
1321 list_for_each_entry(entry, list, list) {
1322 if (!(entry->id == id && entry->cmd_line))
1323 continue;
1324
1325 pr_info("Command-line override present for %s id %d - ignoring\n",
1326 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1327
1328 *devid = entry->devid;
1329
1330 return 0;
1331 }
1332
1333 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1334 if (!entry)
1335 return -ENOMEM;
1336
1337 entry->id = id;
1338 entry->devid = *devid;
1339 entry->cmd_line = cmd_line;
1340
1341 list_add_tail(new: &entry->list, head: list);
1342
1343 return 0;
1344}
1345
1346static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
1347 bool cmd_line)
1348{
1349 struct acpihid_map_entry *entry;
1350 struct list_head *list = &acpihid_map;
1351
1352 list_for_each_entry(entry, list, list) {
1353 if (strcmp(entry->hid, hid) ||
1354 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1355 !entry->cmd_line)
1356 continue;
1357
1358 pr_info("Command-line override for hid:%s uid:%s\n",
1359 hid, uid);
1360 *devid = entry->devid;
1361 return 0;
1362 }
1363
1364 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1365 if (!entry)
1366 return -ENOMEM;
1367
1368 memcpy(to: entry->uid, from: uid, len: strlen(uid));
1369 memcpy(to: entry->hid, from: hid, len: strlen(hid));
1370 entry->devid = *devid;
1371 entry->cmd_line = cmd_line;
1372 entry->root_devid = (entry->devid & (~0x7));
1373
1374 pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n",
1375 entry->cmd_line ? "cmd" : "ivrs",
1376 entry->hid, entry->uid, entry->root_devid);
1377
1378 list_add_tail(new: &entry->list, head: list);
1379 return 0;
1380}
1381
1382static int __init add_early_maps(void)
1383{
1384 int i, ret;
1385
1386 for (i = 0; i < early_ioapic_map_size; ++i) {
1387 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1388 id: early_ioapic_map[i].id,
1389 devid: &early_ioapic_map[i].devid,
1390 cmd_line: early_ioapic_map[i].cmd_line);
1391 if (ret)
1392 return ret;
1393 }
1394
1395 for (i = 0; i < early_hpet_map_size; ++i) {
1396 ret = add_special_device(IVHD_SPECIAL_HPET,
1397 id: early_hpet_map[i].id,
1398 devid: &early_hpet_map[i].devid,
1399 cmd_line: early_hpet_map[i].cmd_line);
1400 if (ret)
1401 return ret;
1402 }
1403
1404 for (i = 0; i < early_acpihid_map_size; ++i) {
1405 ret = add_acpi_hid_device(hid: early_acpihid_map[i].hid,
1406 uid: early_acpihid_map[i].uid,
1407 devid: &early_acpihid_map[i].devid,
1408 cmd_line: early_acpihid_map[i].cmd_line);
1409 if (ret)
1410 return ret;
1411 }
1412
1413 return 0;
1414}
1415
1416/*
1417 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1418 * initializes the hardware and our data structures with it.
1419 */
1420static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1421 struct ivhd_header *h)
1422{
1423 u8 *p = (u8 *)h;
1424 u8 *end = p, flags = 0;
1425 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
1426 u32 dev_i, ext_flags = 0;
1427 bool alias = false;
1428 struct ivhd_entry *e;
1429 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1430 u32 ivhd_size;
1431 int ret;
1432
1433
1434 ret = add_early_maps();
1435 if (ret)
1436 return ret;
1437
1438 amd_iommu_apply_ivrs_quirks();
1439
1440 /*
1441 * First save the recommended feature enable bits from ACPI
1442 */
1443 iommu->acpi_flags = h->flags;
1444
1445 /*
1446 * Done. Now parse the device entries
1447 */
1448 ivhd_size = get_ivhd_header_size(h);
1449 if (!ivhd_size) {
1450 pr_err("Unsupported IVHD type %#x\n", h->type);
1451 return -EINVAL;
1452 }
1453
1454 p += ivhd_size;
1455
1456 end += h->length;
1457
1458
1459 while (p < end) {
1460 e = (struct ivhd_entry *)p;
1461 seg_id = pci_seg->id;
1462
1463 switch (e->type) {
1464 case IVHD_DEV_ALL:
1465
1466 DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags);
1467 set_dev_entry_from_acpi_range(iommu, first: 0, last: pci_seg->last_bdf, flags: e->flags, ext_flags: 0);
1468 break;
1469 case IVHD_DEV_SELECT:
1470
1471 DUMP_printk(" DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
1472 seg_id, PCI_BUS_NUM(e->devid),
1473 PCI_SLOT(e->devid),
1474 PCI_FUNC(e->devid),
1475 e->flags);
1476
1477 devid = e->devid;
1478 set_dev_entry_from_acpi(iommu, devid, flags: e->flags, ext_flags: 0);
1479 break;
1480 case IVHD_DEV_SELECT_RANGE_START:
1481
1482 DUMP_printk(" DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
1483 seg_id, PCI_BUS_NUM(e->devid),
1484 PCI_SLOT(e->devid),
1485 PCI_FUNC(e->devid),
1486 e->flags);
1487
1488 devid_start = e->devid;
1489 flags = e->flags;
1490 ext_flags = 0;
1491 alias = false;
1492 break;
1493 case IVHD_DEV_ALIAS:
1494
1495 DUMP_printk(" DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n",
1496 seg_id, PCI_BUS_NUM(e->devid),
1497 PCI_SLOT(e->devid),
1498 PCI_FUNC(e->devid),
1499 e->flags,
1500 PCI_BUS_NUM(e->ext >> 8),
1501 PCI_SLOT(e->ext >> 8),
1502 PCI_FUNC(e->ext >> 8));
1503
1504 devid = e->devid;
1505 devid_to = e->ext >> 8;
1506 set_dev_entry_from_acpi(iommu, devid , flags: e->flags, ext_flags: 0);
1507 set_dev_entry_from_acpi(iommu, devid: devid_to, flags: e->flags, ext_flags: 0);
1508 pci_seg->alias_table[devid] = devid_to;
1509 break;
1510 case IVHD_DEV_ALIAS_RANGE:
1511
1512 DUMP_printk(" DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n",
1513 seg_id, PCI_BUS_NUM(e->devid),
1514 PCI_SLOT(e->devid),
1515 PCI_FUNC(e->devid),
1516 e->flags,
1517 seg_id, PCI_BUS_NUM(e->ext >> 8),
1518 PCI_SLOT(e->ext >> 8),
1519 PCI_FUNC(e->ext >> 8));
1520
1521 devid_start = e->devid;
1522 flags = e->flags;
1523 devid_to = e->ext >> 8;
1524 ext_flags = 0;
1525 alias = true;
1526 break;
1527 case IVHD_DEV_EXT_SELECT:
1528
1529 DUMP_printk(" DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
1530 seg_id, PCI_BUS_NUM(e->devid),
1531 PCI_SLOT(e->devid),
1532 PCI_FUNC(e->devid),
1533 e->flags, e->ext);
1534
1535 devid = e->devid;
1536 set_dev_entry_from_acpi(iommu, devid, flags: e->flags,
1537 ext_flags: e->ext);
1538 break;
1539 case IVHD_DEV_EXT_SELECT_RANGE:
1540
1541 DUMP_printk(" DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
1542 seg_id, PCI_BUS_NUM(e->devid),
1543 PCI_SLOT(e->devid),
1544 PCI_FUNC(e->devid),
1545 e->flags, e->ext);
1546
1547 devid_start = e->devid;
1548 flags = e->flags;
1549 ext_flags = e->ext;
1550 alias = false;
1551 break;
1552 case IVHD_DEV_RANGE_END:
1553
1554 DUMP_printk(" DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n",
1555 seg_id, PCI_BUS_NUM(e->devid),
1556 PCI_SLOT(e->devid),
1557 PCI_FUNC(e->devid));
1558
1559 devid = e->devid;
1560 if (alias) {
1561 for (dev_i = devid_start; dev_i <= devid; ++dev_i)
1562 pci_seg->alias_table[dev_i] = devid_to;
1563 set_dev_entry_from_acpi(iommu, devid: devid_to, flags, ext_flags);
1564 }
1565 set_dev_entry_from_acpi_range(iommu, first: devid_start, last: devid, flags, ext_flags);
1566 break;
1567 case IVHD_DEV_SPECIAL: {
1568 u8 handle, type;
1569 const char *var;
1570 u32 devid;
1571 int ret;
1572
1573 handle = e->ext & 0xff;
1574 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
1575 type = (e->ext >> 24) & 0xff;
1576
1577 if (type == IVHD_SPECIAL_IOAPIC)
1578 var = "IOAPIC";
1579 else if (type == IVHD_SPECIAL_HPET)
1580 var = "HPET";
1581 else
1582 var = "UNKNOWN";
1583
1584 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
1585 var, (int)handle,
1586 seg_id, PCI_BUS_NUM(devid),
1587 PCI_SLOT(devid),
1588 PCI_FUNC(devid),
1589 e->flags);
1590
1591 ret = add_special_device(type, id: handle, devid: &devid, cmd_line: false);
1592 if (ret)
1593 return ret;
1594
1595 /*
1596 * add_special_device might update the devid in case a
1597 * command-line override is present. So call
1598 * set_dev_entry_from_acpi after add_special_device.
1599 */
1600 set_dev_entry_from_acpi(iommu, devid, flags: e->flags, ext_flags: 0);
1601
1602 break;
1603 }
1604 case IVHD_DEV_ACPI_HID: {
1605 u32 devid;
1606 u8 hid[ACPIHID_HID_LEN];
1607 u8 uid[ACPIHID_UID_LEN];
1608 int ret;
1609
1610 if (h->type != 0x40) {
1611 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1612 e->type);
1613 break;
1614 }
1615
1616 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1617 memcpy(to: hid, from: &e->ext_hid, ACPIHID_HID_LEN - 1);
1618 hid[ACPIHID_HID_LEN - 1] = '\0';
1619
1620 if (!(*hid)) {
1621 pr_err(FW_BUG "Invalid HID.\n");
1622 break;
1623 }
1624
1625 uid[0] = '\0';
1626 switch (e->uidf) {
1627 case UID_NOT_PRESENT:
1628
1629 if (e->uidl != 0)
1630 pr_warn(FW_BUG "Invalid UID length.\n");
1631
1632 break;
1633 case UID_IS_INTEGER:
1634
1635 sprintf(buf: uid, fmt: "%d", e->uid);
1636
1637 break;
1638 case UID_IS_CHARACTER:
1639
1640 memcpy(to: uid, from: &e->uid, len: e->uidl);
1641 uid[e->uidl] = '\0';
1642
1643 break;
1644 default:
1645 break;
1646 }
1647
1648 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
1649 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
1650 hid, uid, seg_id,
1651 PCI_BUS_NUM(devid),
1652 PCI_SLOT(devid),
1653 PCI_FUNC(devid),
1654 e->flags);
1655
1656 flags = e->flags;
1657
1658 ret = add_acpi_hid_device(hid, uid, devid: &devid, cmd_line: false);
1659 if (ret)
1660 return ret;
1661
1662 /*
1663 * add_special_device might update the devid in case a
1664 * command-line override is present. So call
1665 * set_dev_entry_from_acpi after add_special_device.
1666 */
1667 set_dev_entry_from_acpi(iommu, devid, flags: e->flags, ext_flags: 0);
1668
1669 break;
1670 }
1671 default:
1672 break;
1673 }
1674
1675 p += ivhd_entry_length(ivhd: p);
1676 }
1677
1678 return 0;
1679}
1680
1681/* Allocate PCI segment data structure */
1682static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
1683 struct acpi_table_header *ivrs_base)
1684{
1685 struct amd_iommu_pci_seg *pci_seg;
1686 int last_bdf;
1687
1688 /*
1689 * First parse ACPI tables to find the largest Bus/Dev/Func we need to
1690 * handle in this PCI segment. Upon this information the shared data
1691 * structures for the PCI segments in the system will be allocated.
1692 */
1693 last_bdf = find_last_devid_acpi(table: ivrs_base, pci_seg: id);
1694 if (last_bdf < 0)
1695 return NULL;
1696
1697 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
1698 if (pci_seg == NULL)
1699 return NULL;
1700
1701 pci_seg->last_bdf = last_bdf;
1702 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
1703 pci_seg->dev_table_size =
1704 max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
1705 SZ_4K);
1706
1707 pci_seg->id = id;
1708 init_llist_head(list: &pci_seg->dev_data_list);
1709 INIT_LIST_HEAD(list: &pci_seg->unity_map);
1710 list_add_tail(new: &pci_seg->list, head: &amd_iommu_pci_seg_list);
1711
1712 if (alloc_dev_table(pci_seg))
1713 return NULL;
1714 if (alloc_alias_table(pci_seg))
1715 return NULL;
1716 if (alloc_rlookup_table(pci_seg))
1717 return NULL;
1718
1719 return pci_seg;
1720}
1721
1722static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
1723 struct acpi_table_header *ivrs_base)
1724{
1725 struct amd_iommu_pci_seg *pci_seg;
1726
1727 for_each_pci_segment(pci_seg) {
1728 if (pci_seg->id == id)
1729 return pci_seg;
1730 }
1731
1732 return alloc_pci_segment(id, ivrs_base);
1733}
1734
1735static void __init free_pci_segments(void)
1736{
1737 struct amd_iommu_pci_seg *pci_seg, *next;
1738
1739 for_each_pci_segment_safe(pci_seg, next) {
1740 list_del(entry: &pci_seg->list);
1741 free_irq_lookup_table(pci_seg);
1742 free_rlookup_table(pci_seg);
1743 free_alias_table(pci_seg);
1744 free_dev_table(pci_seg);
1745 kfree(objp: pci_seg);
1746 }
1747}
1748
1749static void __init free_sysfs(struct amd_iommu *iommu)
1750{
1751 if (iommu->iommu.dev) {
1752 iommu_device_unregister(iommu: &iommu->iommu);
1753 iommu_device_sysfs_remove(iommu: &iommu->iommu);
1754 }
1755}
1756
1757static void __init free_iommu_one(struct amd_iommu *iommu)
1758{
1759 free_sysfs(iommu);
1760 free_iommu_buffers(iommu);
1761 amd_iommu_free_ppr_log(iommu);
1762 free_ga_log(iommu);
1763 iommu_unmap_mmio_space(iommu);
1764 amd_iommu_iopf_uninit(iommu);
1765}
1766
1767static void __init free_iommu_all(void)
1768{
1769 struct amd_iommu *iommu, *next;
1770
1771 for_each_iommu_safe(iommu, next) {
1772 list_del(entry: &iommu->list);
1773 free_iommu_one(iommu);
1774 kfree(objp: iommu);
1775 }
1776}
1777
1778/*
1779 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1780 * Workaround:
1781 * BIOS should disable L2B micellaneous clock gating by setting
1782 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1783 */
1784static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1785{
1786 u32 value;
1787
1788 if ((boot_cpu_data.x86 != 0x15) ||
1789 (boot_cpu_data.x86_model < 0x10) ||
1790 (boot_cpu_data.x86_model > 0x1f))
1791 return;
1792
1793 pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: 0x90);
1794 pci_read_config_dword(dev: iommu->dev, where: 0xf4, val: &value);
1795
1796 if (value & BIT(2))
1797 return;
1798
1799 /* Select NB indirect register 0x90 and enable writing */
1800 pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: 0x90 | (1 << 8));
1801
1802 pci_write_config_dword(dev: iommu->dev, where: 0xf4, val: value | 0x4);
1803 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1804
1805 /* Clear the enable writing bit */
1806 pci_write_config_dword(dev: iommu->dev, where: 0xf0, val: 0x90);
1807}
1808
1809/*
1810 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1811 * Workaround:
1812 * BIOS should enable ATS write permission check by setting
1813 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1814 */
1815static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1816{
1817 u32 value;
1818
1819 if ((boot_cpu_data.x86 != 0x15) ||
1820 (boot_cpu_data.x86_model < 0x30) ||
1821 (boot_cpu_data.x86_model > 0x3f))
1822 return;
1823
1824 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1825 value = iommu_read_l2(iommu, address: 0x47);
1826
1827 if (value & BIT(0))
1828 return;
1829
1830 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1831 iommu_write_l2(iommu, address: 0x47, val: value | BIT(0));
1832
1833 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1834}
1835
1836/*
1837 * This function glues the initialization function for one IOMMU
1838 * together and also allocates the command buffer and programs the
1839 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1840 */
1841static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
1842 struct acpi_table_header *ivrs_base)
1843{
1844 struct amd_iommu_pci_seg *pci_seg;
1845
1846 pci_seg = get_pci_segment(id: h->pci_seg, ivrs_base);
1847 if (pci_seg == NULL)
1848 return -ENOMEM;
1849 iommu->pci_seg = pci_seg;
1850
1851 raw_spin_lock_init(&iommu->lock);
1852 atomic64_set(v: &iommu->cmd_sem_val, i: 0);
1853
1854 /* Add IOMMU to internal data structures */
1855 list_add_tail(new: &iommu->list, head: &amd_iommu_list);
1856 iommu->index = amd_iommus_present++;
1857
1858 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1859 WARN(1, "System has more IOMMUs than supported by this driver\n");
1860 return -ENOSYS;
1861 }
1862
1863 /*
1864 * Copy data from ACPI table entry to the iommu struct
1865 */
1866 iommu->devid = h->devid;
1867 iommu->cap_ptr = h->cap_ptr;
1868 iommu->mmio_phys = h->mmio_phys;
1869
1870 switch (h->type) {
1871 case 0x10:
1872 /* Check if IVHD EFR contains proper max banks/counters */
1873 if ((h->efr_attr != 0) &&
1874 ((h->efr_attr & (0xF << 13)) != 0) &&
1875 ((h->efr_attr & (0x3F << 17)) != 0))
1876 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1877 else
1878 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1879
1880 /* GAM requires GA mode. */
1881 if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)
1882 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1883 break;
1884 case 0x11:
1885 case 0x40:
1886 if (h->efr_reg & (1 << 9))
1887 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1888 else
1889 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1890
1891 /* XT and GAM require GA mode. */
1892 if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) {
1893 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1894 break;
1895 }
1896
1897 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1898 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1899
1900 if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) {
1901 pr_warn_once("Host Address Translation is not supported.\n");
1902 amd_iommu_hatdis = true;
1903 }
1904
1905 early_iommu_features_init(iommu, h);
1906
1907 break;
1908 default:
1909 return -EINVAL;
1910 }
1911
1912 iommu->mmio_base = iommu_map_mmio_space(address: iommu->mmio_phys,
1913 end: iommu->mmio_phys_end);
1914 if (!iommu->mmio_base)
1915 return -ENOMEM;
1916
1917 return init_iommu_from_acpi(iommu, h);
1918}
1919
1920static int __init init_iommu_one_late(struct amd_iommu *iommu)
1921{
1922 int ret;
1923
1924 ret = alloc_iommu_buffers(iommu);
1925 if (ret)
1926 return ret;
1927
1928 iommu->int_enabled = false;
1929
1930 init_translation_status(iommu);
1931 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1932 iommu_disable(iommu);
1933 clear_translation_pre_enabled(iommu);
1934 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1935 iommu->index);
1936 }
1937 if (amd_iommu_pre_enabled)
1938 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1939
1940 if (amd_iommu_irq_remap) {
1941 ret = amd_iommu_create_irq_domain(iommu);
1942 if (ret)
1943 return ret;
1944 }
1945
1946 /*
1947 * Make sure IOMMU is not considered to translate itself. The IVRS
1948 * table tells us so, but this is a lie!
1949 */
1950 iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
1951
1952 return 0;
1953}
1954
1955/**
1956 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1957 * @ivrs: Pointer to the IVRS header
1958 *
1959 * This function search through all IVDB of the maximum supported IVHD
1960 */
1961static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1962{
1963 u8 *base = (u8 *)ivrs;
1964 struct ivhd_header *ivhd = (struct ivhd_header *)
1965 (base + IVRS_HEADER_LENGTH);
1966 u8 last_type = ivhd->type;
1967 u16 devid = ivhd->devid;
1968
1969 while (((u8 *)ivhd - base < ivrs->length) &&
1970 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1971 u8 *p = (u8 *) ivhd;
1972
1973 if (ivhd->devid == devid)
1974 last_type = ivhd->type;
1975 ivhd = (struct ivhd_header *)(p + ivhd->length);
1976 }
1977
1978 return last_type;
1979}
1980
1981/*
1982 * Iterates over all IOMMU entries in the ACPI table, allocates the
1983 * IOMMU structure and initializes it with init_iommu_one()
1984 */
1985static int __init init_iommu_all(struct acpi_table_header *table)
1986{
1987 u8 *p = (u8 *)table, *end = (u8 *)table;
1988 struct ivhd_header *h;
1989 struct amd_iommu *iommu;
1990 int ret;
1991
1992 end += table->length;
1993 p += IVRS_HEADER_LENGTH;
1994
1995 /* Phase 1: Process all IVHD blocks */
1996 while (p < end) {
1997 h = (struct ivhd_header *)p;
1998 if (*p == amd_iommu_target_ivhd_type) {
1999
2000 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
2001 "flags: %01x info %04x\n",
2002 h->pci_seg, PCI_BUS_NUM(h->devid),
2003 PCI_SLOT(h->devid), PCI_FUNC(h->devid),
2004 h->cap_ptr, h->flags, h->info);
2005 DUMP_printk(" mmio-addr: %016llx\n",
2006 h->mmio_phys);
2007
2008 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
2009 if (iommu == NULL)
2010 return -ENOMEM;
2011
2012 ret = init_iommu_one(iommu, h, ivrs_base: table);
2013 if (ret)
2014 return ret;
2015 }
2016 p += h->length;
2017
2018 }
2019 WARN_ON(p != end);
2020
2021 /* Phase 2 : Early feature support check */
2022 get_global_efr();
2023
2024 /* Phase 3 : Enabling IOMMU features */
2025 for_each_iommu(iommu) {
2026 ret = init_iommu_one_late(iommu);
2027 if (ret)
2028 return ret;
2029 }
2030
2031 return 0;
2032}
2033
2034static void init_iommu_perf_ctr(struct amd_iommu *iommu)
2035{
2036 u64 val;
2037 struct pci_dev *pdev = iommu->dev;
2038
2039 if (!check_feature(FEATURE_PC))
2040 return;
2041
2042 amd_iommu_pc_present = true;
2043
2044 pci_info(pdev, "IOMMU performance counters supported\n");
2045
2046 val = readl(addr: iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
2047 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
2048 iommu->max_counters = (u8) ((val >> 7) & 0xf);
2049
2050 return;
2051}
2052
2053static ssize_t amd_iommu_show_cap(struct device *dev,
2054 struct device_attribute *attr,
2055 char *buf)
2056{
2057 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
2058 return sysfs_emit(buf, fmt: "%x\n", iommu->cap);
2059}
2060static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
2061
2062static ssize_t amd_iommu_show_features(struct device *dev,
2063 struct device_attribute *attr,
2064 char *buf)
2065{
2066 return sysfs_emit(buf, fmt: "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
2067}
2068static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
2069
2070static struct attribute *amd_iommu_attrs[] = {
2071 &dev_attr_cap.attr,
2072 &dev_attr_features.attr,
2073 NULL,
2074};
2075
2076static struct attribute_group amd_iommu_group = {
2077 .name = "amd-iommu",
2078 .attrs = amd_iommu_attrs,
2079};
2080
2081static const struct attribute_group *amd_iommu_groups[] = {
2082 &amd_iommu_group,
2083 NULL,
2084};
2085
2086/*
2087 * Note: IVHD 0x11 and 0x40 also contains exact copy
2088 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
2089 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
2090 */
2091static void __init late_iommu_features_init(struct amd_iommu *iommu)
2092{
2093 u64 features, features2;
2094
2095 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
2096 return;
2097
2098 /* read extended feature bits */
2099 features = readq(addr: iommu->mmio_base + MMIO_EXT_FEATURES);
2100 features2 = readq(addr: iommu->mmio_base + MMIO_EXT_FEATURES2);
2101
2102 if (!amd_iommu_efr) {
2103 amd_iommu_efr = features;
2104 amd_iommu_efr2 = features2;
2105 return;
2106 }
2107
2108 /*
2109 * Sanity check and warn if EFR values from
2110 * IVHD and MMIO conflict.
2111 */
2112 if (features != amd_iommu_efr ||
2113 features2 != amd_iommu_efr2) {
2114 pr_warn(FW_WARN
2115 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
2116 features, amd_iommu_efr,
2117 features2, amd_iommu_efr2);
2118 }
2119}
2120
2121static int __init iommu_init_pci(struct amd_iommu *iommu)
2122{
2123 int cap_ptr = iommu->cap_ptr;
2124 int ret;
2125
2126 iommu->dev = pci_get_domain_bus_and_slot(domain: iommu->pci_seg->id,
2127 PCI_BUS_NUM(iommu->devid),
2128 devfn: iommu->devid & 0xff);
2129 if (!iommu->dev)
2130 return -ENODEV;
2131
2132 /* ACPI _PRT won't have an IRQ for IOMMU */
2133 iommu->dev->irq_managed = 1;
2134
2135 pci_read_config_dword(dev: iommu->dev, where: cap_ptr + MMIO_CAP_HDR_OFFSET,
2136 val: &iommu->cap);
2137
2138 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
2139 amd_iommu_iotlb_sup = false;
2140
2141 late_iommu_features_init(iommu);
2142
2143 if (check_feature(FEATURE_GT)) {
2144 int glxval;
2145 u64 pasmax;
2146
2147 pasmax = FIELD_GET(FEATURE_PASMAX, amd_iommu_efr);
2148 iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
2149
2150 BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
2151
2152 glxval = FIELD_GET(FEATURE_GLX, amd_iommu_efr);
2153
2154 if (amd_iommu_max_glx_val == -1)
2155 amd_iommu_max_glx_val = glxval;
2156 else
2157 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
2158
2159 iommu_enable_gt(iommu);
2160 }
2161
2162 if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu))
2163 return -ENOMEM;
2164
2165 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
2166 pr_info("Using strict mode due to virtualization\n");
2167 iommu_set_dma_strict();
2168 amd_iommu_np_cache = true;
2169 }
2170
2171 init_iommu_perf_ctr(iommu);
2172
2173 if (is_rd890_iommu(pdev: iommu->dev)) {
2174 int i, j;
2175
2176 iommu->root_pdev =
2177 pci_get_domain_bus_and_slot(domain: iommu->pci_seg->id,
2178 bus: iommu->dev->bus->number,
2179 PCI_DEVFN(0, 0));
2180
2181 /*
2182 * Some rd890 systems may not be fully reconfigured by the
2183 * BIOS, so it's necessary for us to store this information so
2184 * it can be reprogrammed on resume
2185 */
2186 pci_read_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 4,
2187 val: &iommu->stored_addr_lo);
2188 pci_read_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 8,
2189 val: &iommu->stored_addr_hi);
2190
2191 /* Low bit locks writes to configuration space */
2192 iommu->stored_addr_lo &= ~1;
2193
2194 for (i = 0; i < 6; i++)
2195 for (j = 0; j < 0x12; j++)
2196 iommu->stored_l1[i][j] = iommu_read_l1(iommu, l1: i, address: j);
2197
2198 for (i = 0; i < 0x83; i++)
2199 iommu->stored_l2[i] = iommu_read_l2(iommu, address: i);
2200 }
2201
2202 amd_iommu_erratum_746_workaround(iommu);
2203 amd_iommu_ats_write_check_workaround(iommu);
2204
2205 ret = iommu_device_sysfs_add(iommu: &iommu->iommu, parent: &iommu->dev->dev,
2206 groups: amd_iommu_groups, fmt: "ivhd%d", iommu->index);
2207 if (ret)
2208 return ret;
2209
2210 /*
2211 * Allocate per IOMMU IOPF queue here so that in attach device path,
2212 * PRI capable device can be added to IOPF queue
2213 */
2214 if (amd_iommu_gt_ppr_supported()) {
2215 ret = amd_iommu_iopf_init(iommu);
2216 if (ret)
2217 return ret;
2218 }
2219
2220 ret = iommu_device_register(iommu: &iommu->iommu, ops: &amd_iommu_ops, NULL);
2221 if (ret || amd_iommu_pgtable == PD_MODE_NONE) {
2222 /*
2223 * Remove sysfs if DMA translation is not supported by the
2224 * IOMMU. Do not return an error to enable IRQ remapping
2225 * in state_next(), DTE[V, TV] must eventually be set to 0.
2226 */
2227 iommu_device_sysfs_remove(iommu: &iommu->iommu);
2228 }
2229
2230 return pci_enable_device(dev: iommu->dev);
2231}
2232
2233static void print_iommu_info(void)
2234{
2235 int i;
2236 static const char * const feat_str[] = {
2237 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
2238 "IA", "GA", "HE", "PC"
2239 };
2240
2241 if (amd_iommu_efr) {
2242 pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
2243
2244 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
2245 if (check_feature(mask: 1ULL << i))
2246 pr_cont(" %s", feat_str[i]);
2247 }
2248
2249 if (check_feature(FEATURE_GAM_VAPIC))
2250 pr_cont(" GA_vAPIC");
2251
2252 if (check_feature(FEATURE_SNP))
2253 pr_cont(" SNP");
2254
2255 pr_cont("\n");
2256 }
2257
2258 if (irq_remapping_enabled) {
2259 pr_info("Interrupt remapping enabled\n");
2260 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2261 pr_info("X2APIC enabled\n");
2262 }
2263 if (amd_iommu_pgtable == PD_MODE_V2) {
2264 pr_info("V2 page table enabled (Paging mode : %d level)\n",
2265 amd_iommu_gpt_level);
2266 }
2267}
2268
2269static int __init amd_iommu_init_pci(void)
2270{
2271 struct amd_iommu *iommu;
2272 struct amd_iommu_pci_seg *pci_seg;
2273 int ret;
2274
2275 /* Init global identity domain before registering IOMMU */
2276 amd_iommu_init_identity_domain();
2277
2278 for_each_iommu(iommu) {
2279 ret = iommu_init_pci(iommu);
2280 if (ret) {
2281 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
2282 iommu->index, ret);
2283 goto out;
2284 }
2285 /* Need to setup range after PCI init */
2286 iommu_set_cwwb_range(iommu);
2287 }
2288
2289 /*
2290 * Order is important here to make sure any unity map requirements are
2291 * fulfilled. The unity mappings are created and written to the device
2292 * table during the iommu_init_pci() call.
2293 *
2294 * After that we call init_device_table_dma() to make sure any
2295 * uninitialized DTE will block DMA, and in the end we flush the caches
2296 * of all IOMMUs to make sure the changes to the device table are
2297 * active.
2298 */
2299 for_each_pci_segment(pci_seg)
2300 init_device_table_dma(pci_seg);
2301
2302 for_each_iommu(iommu)
2303 amd_iommu_flush_all_caches(iommu);
2304
2305 print_iommu_info();
2306
2307out:
2308 return ret;
2309}
2310
2311/****************************************************************************
2312 *
2313 * The following functions initialize the MSI interrupts for all IOMMUs
2314 * in the system. It's a bit challenging because there could be multiple
2315 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2316 * pci_dev.
2317 *
2318 ****************************************************************************/
2319
2320static int iommu_setup_msi(struct amd_iommu *iommu)
2321{
2322 int r;
2323
2324 r = pci_enable_msi(dev: iommu->dev);
2325 if (r)
2326 return r;
2327
2328 r = request_threaded_irq(irq: iommu->dev->irq,
2329 handler: amd_iommu_int_handler,
2330 thread_fn: amd_iommu_int_thread,
2331 flags: 0, name: "AMD-Vi",
2332 dev: iommu);
2333
2334 if (r) {
2335 pci_disable_msi(dev: iommu->dev);
2336 return r;
2337 }
2338
2339 return 0;
2340}
2341
2342union intcapxt {
2343 u64 capxt;
2344 struct {
2345 u64 reserved_0 : 2,
2346 dest_mode_logical : 1,
2347 reserved_1 : 5,
2348 destid_0_23 : 24,
2349 vector : 8,
2350 reserved_2 : 16,
2351 destid_24_31 : 8;
2352 };
2353} __attribute__ ((packed));
2354
2355
2356static struct irq_chip intcapxt_controller;
2357
2358static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2359 struct irq_data *irqd, bool reserve)
2360{
2361 return 0;
2362}
2363
2364static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2365 struct irq_data *irqd)
2366{
2367}
2368
2369
2370static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2371 unsigned int nr_irqs, void *arg)
2372{
2373 struct irq_alloc_info *info = arg;
2374 int i, ret;
2375
2376 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2377 return -EINVAL;
2378
2379 ret = irq_domain_alloc_irqs_parent(domain, irq_base: virq, nr_irqs, arg);
2380 if (ret < 0)
2381 return ret;
2382
2383 for (i = virq; i < virq + nr_irqs; i++) {
2384 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq: i);
2385
2386 irqd->chip = &intcapxt_controller;
2387 irqd->hwirq = info->hwirq;
2388 irqd->chip_data = info->data;
2389 __irq_set_handler(irq: i, handle: handle_edge_irq, is_chained: 0, name: "edge");
2390 }
2391
2392 return ret;
2393}
2394
2395static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2396 unsigned int nr_irqs)
2397{
2398 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2399}
2400
2401
2402static void intcapxt_unmask_irq(struct irq_data *irqd)
2403{
2404 struct amd_iommu *iommu = irqd->chip_data;
2405 struct irq_cfg *cfg = irqd_cfg(irq_data: irqd);
2406 union intcapxt xt;
2407
2408 xt.capxt = 0ULL;
2409 xt.dest_mode_logical = apic->dest_mode_logical;
2410 xt.vector = cfg->vector;
2411 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2412 xt.destid_24_31 = cfg->dest_apicid >> 24;
2413
2414 writeq(val: xt.capxt, addr: iommu->mmio_base + irqd->hwirq);
2415}
2416
2417static void intcapxt_mask_irq(struct irq_data *irqd)
2418{
2419 struct amd_iommu *iommu = irqd->chip_data;
2420
2421 writeq(val: 0, addr: iommu->mmio_base + irqd->hwirq);
2422}
2423
2424
2425static int intcapxt_set_affinity(struct irq_data *irqd,
2426 const struct cpumask *mask, bool force)
2427{
2428 struct irq_data *parent = irqd->parent_data;
2429 int ret;
2430
2431 ret = parent->chip->irq_set_affinity(parent, mask, force);
2432 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2433 return ret;
2434 return 0;
2435}
2436
2437static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2438{
2439 return on ? -EOPNOTSUPP : 0;
2440}
2441
2442static struct irq_chip intcapxt_controller = {
2443 .name = "IOMMU-MSI",
2444 .irq_unmask = intcapxt_unmask_irq,
2445 .irq_mask = intcapxt_mask_irq,
2446 .irq_ack = irq_chip_ack_parent,
2447 .irq_retrigger = irq_chip_retrigger_hierarchy,
2448 .irq_set_affinity = intcapxt_set_affinity,
2449 .irq_set_wake = intcapxt_set_wake,
2450 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED,
2451};
2452
2453static const struct irq_domain_ops intcapxt_domain_ops = {
2454 .alloc = intcapxt_irqdomain_alloc,
2455 .free = intcapxt_irqdomain_free,
2456 .activate = intcapxt_irqdomain_activate,
2457 .deactivate = intcapxt_irqdomain_deactivate,
2458};
2459
2460
2461static struct irq_domain *iommu_irqdomain;
2462
2463static struct irq_domain *iommu_get_irqdomain(void)
2464{
2465 struct fwnode_handle *fn;
2466
2467 /* No need for locking here (yet) as the init is single-threaded */
2468 if (iommu_irqdomain)
2469 return iommu_irqdomain;
2470
2471 fn = irq_domain_alloc_named_fwnode(name: "AMD-Vi-MSI");
2472 if (!fn)
2473 return NULL;
2474
2475 iommu_irqdomain = irq_domain_create_hierarchy(parent: x86_vector_domain, flags: 0, size: 0,
2476 fwnode: fn, ops: &intcapxt_domain_ops,
2477 NULL);
2478 if (!iommu_irqdomain)
2479 irq_domain_free_fwnode(fwnode: fn);
2480
2481 return iommu_irqdomain;
2482}
2483
2484static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
2485 int hwirq, irq_handler_t thread_fn)
2486{
2487 struct irq_domain *domain;
2488 struct irq_alloc_info info;
2489 int irq, ret;
2490 int node = dev_to_node(dev: &iommu->dev->dev);
2491
2492 domain = iommu_get_irqdomain();
2493 if (!domain)
2494 return -ENXIO;
2495
2496 init_irq_alloc_info(info: &info, NULL);
2497 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2498 info.data = iommu;
2499 info.hwirq = hwirq;
2500
2501 irq = irq_domain_alloc_irqs(domain, nr_irqs: 1, node, arg: &info);
2502 if (irq < 0) {
2503 irq_domain_remove(domain);
2504 return irq;
2505 }
2506
2507 ret = request_threaded_irq(irq, handler: amd_iommu_int_handler,
2508 thread_fn, flags: 0, name: devname, dev: iommu);
2509 if (ret) {
2510 irq_domain_free_irqs(virq: irq, nr_irqs: 1);
2511 irq_domain_remove(domain);
2512 return ret;
2513 }
2514
2515 return 0;
2516}
2517
2518static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2519{
2520 int ret;
2521
2522 snprintf(buf: iommu->evt_irq_name, size: sizeof(iommu->evt_irq_name),
2523 fmt: "AMD-Vi%d-Evt", iommu->index);
2524 ret = __iommu_setup_intcapxt(iommu, devname: iommu->evt_irq_name,
2525 MMIO_INTCAPXT_EVT_OFFSET,
2526 thread_fn: amd_iommu_int_thread_evtlog);
2527 if (ret)
2528 return ret;
2529
2530 snprintf(buf: iommu->ppr_irq_name, size: sizeof(iommu->ppr_irq_name),
2531 fmt: "AMD-Vi%d-PPR", iommu->index);
2532 ret = __iommu_setup_intcapxt(iommu, devname: iommu->ppr_irq_name,
2533 MMIO_INTCAPXT_PPR_OFFSET,
2534 thread_fn: amd_iommu_int_thread_pprlog);
2535 if (ret)
2536 return ret;
2537
2538#ifdef CONFIG_IRQ_REMAP
2539 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
2540 "AMD-Vi%d-GA", iommu->index);
2541 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
2542 MMIO_INTCAPXT_GALOG_OFFSET,
2543 amd_iommu_int_thread_galog);
2544#endif
2545
2546 return ret;
2547}
2548
2549static int iommu_init_irq(struct amd_iommu *iommu)
2550{
2551 int ret;
2552
2553 if (iommu->int_enabled)
2554 goto enable_faults;
2555
2556 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2557 ret = iommu_setup_intcapxt(iommu);
2558 else if (iommu->dev->msi_cap)
2559 ret = iommu_setup_msi(iommu);
2560 else
2561 ret = -ENODEV;
2562
2563 if (ret)
2564 return ret;
2565
2566 iommu->int_enabled = true;
2567enable_faults:
2568
2569 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2570 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2571
2572 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2573
2574 return 0;
2575}
2576
2577/****************************************************************************
2578 *
2579 * The next functions belong to the third pass of parsing the ACPI
2580 * table. In this last pass the memory mapping requirements are
2581 * gathered (like exclusion and unity mapping ranges).
2582 *
2583 ****************************************************************************/
2584
2585static void __init free_unity_maps(void)
2586{
2587 struct unity_map_entry *entry, *next;
2588 struct amd_iommu_pci_seg *p, *pci_seg;
2589
2590 for_each_pci_segment_safe(pci_seg, p) {
2591 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
2592 list_del(entry: &entry->list);
2593 kfree(objp: entry);
2594 }
2595 }
2596}
2597
2598/* called for unity map ACPI definition */
2599static int __init init_unity_map_range(struct ivmd_header *m,
2600 struct acpi_table_header *ivrs_base)
2601{
2602 struct unity_map_entry *e = NULL;
2603 struct amd_iommu_pci_seg *pci_seg;
2604 char *s;
2605
2606 pci_seg = get_pci_segment(id: m->pci_seg, ivrs_base);
2607 if (pci_seg == NULL)
2608 return -ENOMEM;
2609
2610 e = kzalloc(sizeof(*e), GFP_KERNEL);
2611 if (e == NULL)
2612 return -ENOMEM;
2613
2614 switch (m->type) {
2615 default:
2616 kfree(objp: e);
2617 return 0;
2618 case ACPI_IVMD_TYPE:
2619 s = "IVMD_TYPEi\t\t\t";
2620 e->devid_start = e->devid_end = m->devid;
2621 break;
2622 case ACPI_IVMD_TYPE_ALL:
2623 s = "IVMD_TYPE_ALL\t\t";
2624 e->devid_start = 0;
2625 e->devid_end = pci_seg->last_bdf;
2626 break;
2627 case ACPI_IVMD_TYPE_RANGE:
2628 s = "IVMD_TYPE_RANGE\t\t";
2629 e->devid_start = m->devid;
2630 e->devid_end = m->aux;
2631 break;
2632 }
2633 e->address_start = PAGE_ALIGN(m->range_start);
2634 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2635 e->prot = m->flags >> 1;
2636
2637 /*
2638 * Treat per-device exclusion ranges as r/w unity-mapped regions
2639 * since some buggy BIOSes might lead to the overwritten exclusion
2640 * range (exclusion_start and exclusion_length members). This
2641 * happens when there are multiple exclusion ranges (IVMD entries)
2642 * defined in ACPI table.
2643 */
2644 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2645 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2646
2647 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
2648 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
2649 " flags: %x\n", s, m->pci_seg,
2650 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2651 PCI_FUNC(e->devid_start), m->pci_seg,
2652 PCI_BUS_NUM(e->devid_end),
2653 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2654 e->address_start, e->address_end, m->flags);
2655
2656 list_add_tail(new: &e->list, head: &pci_seg->unity_map);
2657
2658 return 0;
2659}
2660
2661/* iterates over all memory definitions we find in the ACPI table */
2662static int __init init_memory_definitions(struct acpi_table_header *table)
2663{
2664 u8 *p = (u8 *)table, *end = (u8 *)table;
2665 struct ivmd_header *m;
2666
2667 end += table->length;
2668 p += IVRS_HEADER_LENGTH;
2669
2670 while (p < end) {
2671 m = (struct ivmd_header *)p;
2672 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2673 init_unity_map_range(m, ivrs_base: table);
2674
2675 p += m->length;
2676 }
2677
2678 return 0;
2679}
2680
2681/*
2682 * Init the device table to not allow DMA access for devices
2683 */
2684static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2685{
2686 u32 devid;
2687 struct dev_table_entry *dev_table = pci_seg->dev_table;
2688
2689 if (!dev_table || amd_iommu_pgtable == PD_MODE_NONE)
2690 return;
2691
2692 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2693 set_dte_bit(dte: &dev_table[devid], DEV_ENTRY_VALID);
2694 if (!amd_iommu_snp_en)
2695 set_dte_bit(dte: &dev_table[devid], DEV_ENTRY_TRANSLATION);
2696 }
2697}
2698
2699static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2700{
2701 u32 devid;
2702 struct dev_table_entry *dev_table = pci_seg->dev_table;
2703
2704 if (dev_table == NULL)
2705 return;
2706
2707 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2708 dev_table[devid].data[0] = 0ULL;
2709 dev_table[devid].data[1] = 0ULL;
2710 }
2711}
2712
2713static void init_device_table(void)
2714{
2715 struct amd_iommu_pci_seg *pci_seg;
2716 u32 devid;
2717
2718 if (!amd_iommu_irq_remap)
2719 return;
2720
2721 for_each_pci_segment(pci_seg) {
2722 for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
2723 set_dte_bit(dte: &pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN);
2724 }
2725}
2726
2727static void iommu_init_flags(struct amd_iommu *iommu)
2728{
2729 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2730 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2731 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2732
2733 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2734 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2735 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2736
2737 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2738 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2739 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2740
2741 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2742 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2743 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2744
2745 /*
2746 * make IOMMU memory accesses cache coherent
2747 */
2748 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2749
2750 /* Set IOTLB invalidation timeout to 1s */
2751 iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT);
2752
2753 /* Enable Enhanced Peripheral Page Request Handling */
2754 if (check_feature(FEATURE_EPHSUP))
2755 iommu_feature_enable(iommu, CONTROL_EPH_EN);
2756}
2757
2758static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2759{
2760 int i, j;
2761 u32 ioc_feature_control;
2762 struct pci_dev *pdev = iommu->root_pdev;
2763
2764 /* RD890 BIOSes may not have completely reconfigured the iommu */
2765 if (!is_rd890_iommu(pdev: iommu->dev) || !pdev)
2766 return;
2767
2768 /*
2769 * First, we need to ensure that the iommu is enabled. This is
2770 * controlled by a register in the northbridge
2771 */
2772
2773 /* Select Northbridge indirect register 0x75 and enable writing */
2774 pci_write_config_dword(dev: pdev, where: 0x60, val: 0x75 | (1 << 7));
2775 pci_read_config_dword(dev: pdev, where: 0x64, val: &ioc_feature_control);
2776
2777 /* Enable the iommu */
2778 if (!(ioc_feature_control & 0x1))
2779 pci_write_config_dword(dev: pdev, where: 0x64, val: ioc_feature_control | 1);
2780
2781 /* Restore the iommu BAR */
2782 pci_write_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 4,
2783 val: iommu->stored_addr_lo);
2784 pci_write_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 8,
2785 val: iommu->stored_addr_hi);
2786
2787 /* Restore the l1 indirect regs for each of the 6 l1s */
2788 for (i = 0; i < 6; i++)
2789 for (j = 0; j < 0x12; j++)
2790 iommu_write_l1(iommu, l1: i, address: j, val: iommu->stored_l1[i][j]);
2791
2792 /* Restore the l2 indirect regs */
2793 for (i = 0; i < 0x83; i++)
2794 iommu_write_l2(iommu, address: i, val: iommu->stored_l2[i]);
2795
2796 /* Lock PCI setup registers */
2797 pci_write_config_dword(dev: iommu->dev, where: iommu->cap_ptr + 4,
2798 val: iommu->stored_addr_lo | 1);
2799}
2800
2801static void iommu_enable_ga(struct amd_iommu *iommu)
2802{
2803#ifdef CONFIG_IRQ_REMAP
2804 switch (amd_iommu_guest_ir) {
2805 case AMD_IOMMU_GUEST_IR_VAPIC:
2806 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2807 iommu_feature_enable(iommu, CONTROL_GA_EN);
2808 iommu->irte_ops = &irte_128_ops;
2809 break;
2810 default:
2811 iommu->irte_ops = &irte_32_ops;
2812 break;
2813 }
2814#endif
2815}
2816
2817static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
2818{
2819 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
2820}
2821
2822static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
2823{
2824 u64 ctrl;
2825
2826 if (!amd_iommu_irtcachedis)
2827 return;
2828
2829 /*
2830 * Note:
2831 * The support for IRTCacheDis feature is dertermined by
2832 * checking if the bit is writable.
2833 */
2834 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
2835 ctrl = readq(addr: iommu->mmio_base + MMIO_CONTROL_OFFSET);
2836 ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
2837 if (ctrl)
2838 iommu->irtcachedis_enabled = true;
2839 pr_info("iommu%d (%#06x) : IRT cache is %s\n",
2840 iommu->index, iommu->devid,
2841 iommu->irtcachedis_enabled ? "disabled" : "enabled");
2842}
2843
2844static void iommu_enable_2k_int(struct amd_iommu *iommu)
2845{
2846 if (!FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
2847 return;
2848
2849 iommu_feature_set(iommu,
2850 CONTROL_NUM_INT_REMAP_MODE_2K,
2851 CONTROL_NUM_INT_REMAP_MODE_MASK,
2852 CONTROL_NUM_INT_REMAP_MODE);
2853}
2854
2855static void early_enable_iommu(struct amd_iommu *iommu)
2856{
2857 iommu_disable(iommu);
2858 iommu_init_flags(iommu);
2859 iommu_set_device_table(iommu);
2860 iommu_enable_command_buffer(iommu);
2861 iommu_enable_event_buffer(iommu);
2862 iommu_set_exclusion_range(iommu);
2863 iommu_enable_gt(iommu);
2864 iommu_enable_ga(iommu);
2865 iommu_enable_xt(iommu);
2866 iommu_enable_irtcachedis(iommu);
2867 iommu_enable_2k_int(iommu);
2868 iommu_enable(iommu);
2869 amd_iommu_flush_all_caches(iommu);
2870}
2871
2872/*
2873 * This function finally enables all IOMMUs found in the system after
2874 * they have been initialized.
2875 *
2876 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse
2877 * the old content of device table entries. Not this case or reuse failed,
2878 * just continue as normal kernel does.
2879 */
2880static void early_enable_iommus(void)
2881{
2882 struct amd_iommu *iommu;
2883 struct amd_iommu_pci_seg *pci_seg;
2884
2885 if (!reuse_device_table()) {
2886 /*
2887 * If come here because of failure in reusing device table from old
2888 * kernel with all IOMMUs enabled, print error message and try to
2889 * free allocated old_dev_tbl_cpy.
2890 */
2891 if (amd_iommu_pre_enabled) {
2892 pr_err("Failed to reuse DEV table from previous kernel.\n");
2893 /*
2894 * Bail out early if unable to remap/reuse DEV table from
2895 * previous kernel if SNP enabled as IOMMU commands will
2896 * time out without DEV table and cause kdump boot panic.
2897 */
2898 BUG_ON(check_feature(FEATURE_SNP));
2899 }
2900
2901 for_each_pci_segment(pci_seg) {
2902 if (pci_seg->old_dev_tbl_cpy != NULL) {
2903 memunmap(addr: (void *)pci_seg->old_dev_tbl_cpy);
2904 pci_seg->old_dev_tbl_cpy = NULL;
2905 }
2906 }
2907
2908 for_each_iommu(iommu) {
2909 clear_translation_pre_enabled(iommu);
2910 early_enable_iommu(iommu);
2911 }
2912 } else {
2913 pr_info("Reused DEV table from previous kernel.\n");
2914
2915 for_each_pci_segment(pci_seg) {
2916 iommu_free_pages(virt: pci_seg->dev_table);
2917 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
2918 }
2919
2920 for_each_iommu(iommu) {
2921 iommu_disable_command_buffer(iommu);
2922 iommu_disable_event_buffer(iommu);
2923 iommu_disable_irtcachedis(iommu);
2924 iommu_enable_command_buffer(iommu);
2925 iommu_enable_event_buffer(iommu);
2926 iommu_enable_ga(iommu);
2927 iommu_enable_xt(iommu);
2928 iommu_enable_irtcachedis(iommu);
2929 iommu_enable_2k_int(iommu);
2930 iommu_set_device_table(iommu);
2931 amd_iommu_flush_all_caches(iommu);
2932 }
2933 }
2934}
2935
2936static void enable_iommus_ppr(void)
2937{
2938 struct amd_iommu *iommu;
2939
2940 if (!amd_iommu_gt_ppr_supported())
2941 return;
2942
2943 for_each_iommu(iommu)
2944 amd_iommu_enable_ppr_log(iommu);
2945}
2946
2947static void enable_iommus_vapic(void)
2948{
2949#ifdef CONFIG_IRQ_REMAP
2950 u32 status, i;
2951 struct amd_iommu *iommu;
2952
2953 for_each_iommu(iommu) {
2954 /*
2955 * Disable GALog if already running. It could have been enabled
2956 * in the previous boot before kdump.
2957 */
2958 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2959 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2960 continue;
2961
2962 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
2963 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
2964
2965 /*
2966 * Need to set and poll check the GALOGRun bit to zero before
2967 * we can set/ modify GA Log registers safely.
2968 */
2969 for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
2970 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2971 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2972 break;
2973 udelay(10);
2974 }
2975
2976 if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
2977 return;
2978 }
2979
2980 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2981 !check_feature(FEATURE_GAM_VAPIC)) {
2982 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2983 return;
2984 }
2985
2986 if (amd_iommu_snp_en &&
2987 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
2988 pr_warn("Force to disable Virtual APIC due to SNP\n");
2989 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2990 return;
2991 }
2992
2993 /* Enabling GAM and SNPAVIC support */
2994 for_each_iommu(iommu) {
2995 if (iommu_init_ga_log(iommu) ||
2996 iommu_ga_log_enable(iommu))
2997 return;
2998
2999 iommu_feature_enable(iommu, CONTROL_GAM_EN);
3000 if (amd_iommu_snp_en)
3001 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
3002 }
3003
3004 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
3005 pr_info("Virtual APIC enabled\n");
3006#endif
3007}
3008
3009static void disable_iommus(void)
3010{
3011 struct amd_iommu *iommu;
3012
3013 for_each_iommu(iommu)
3014 iommu_disable(iommu);
3015
3016#ifdef CONFIG_IRQ_REMAP
3017 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
3018 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
3019#endif
3020}
3021
3022/*
3023 * Suspend/Resume support
3024 * disable suspend until real resume implemented
3025 */
3026
3027static void amd_iommu_resume(void)
3028{
3029 struct amd_iommu *iommu;
3030
3031 for_each_iommu(iommu)
3032 iommu_apply_resume_quirks(iommu);
3033
3034 /* re-load the hardware */
3035 for_each_iommu(iommu)
3036 early_enable_iommu(iommu);
3037
3038 amd_iommu_enable_interrupts();
3039}
3040
3041static int amd_iommu_suspend(void)
3042{
3043 /* disable IOMMUs to go out of the way for BIOS */
3044 disable_iommus();
3045
3046 return 0;
3047}
3048
3049static struct syscore_ops amd_iommu_syscore_ops = {
3050 .suspend = amd_iommu_suspend,
3051 .resume = amd_iommu_resume,
3052};
3053
3054static void __init free_iommu_resources(void)
3055{
3056 free_iommu_all();
3057 free_pci_segments();
3058}
3059
3060/* SB IOAPIC is always on this device in AMD systems */
3061#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
3062
3063static bool __init check_ioapic_information(void)
3064{
3065 const char *fw_bug = FW_BUG;
3066 bool ret, has_sb_ioapic;
3067 int idx;
3068
3069 has_sb_ioapic = false;
3070 ret = false;
3071
3072 /*
3073 * If we have map overrides on the kernel command line the
3074 * messages in this function might not describe firmware bugs
3075 * anymore - so be careful
3076 */
3077 if (cmdline_maps)
3078 fw_bug = "";
3079
3080 for (idx = 0; idx < nr_ioapics; idx++) {
3081 int devid, id = mpc_ioapic_id(ioapic: idx);
3082
3083 devid = get_ioapic_devid(id);
3084 if (devid < 0) {
3085 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
3086 fw_bug, id);
3087 ret = false;
3088 } else if (devid == IOAPIC_SB_DEVID) {
3089 has_sb_ioapic = true;
3090 ret = true;
3091 }
3092 }
3093
3094 if (!has_sb_ioapic) {
3095 /*
3096 * We expect the SB IOAPIC to be listed in the IVRS
3097 * table. The system timer is connected to the SB IOAPIC
3098 * and if we don't have it in the list the system will
3099 * panic at boot time. This situation usually happens
3100 * when the BIOS is buggy and provides us the wrong
3101 * device id for the IOAPIC in the system.
3102 */
3103 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
3104 }
3105
3106 if (!ret)
3107 pr_err("Disabling interrupt remapping\n");
3108
3109 return ret;
3110}
3111
3112static void __init free_dma_resources(void)
3113{
3114 ida_destroy(ida: &pdom_ids);
3115
3116 free_unity_maps();
3117}
3118
3119static void __init ivinfo_init(void *ivrs)
3120{
3121 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
3122}
3123
3124/*
3125 * This is the hardware init function for AMD IOMMU in the system.
3126 * This function is called either from amd_iommu_init or from the interrupt
3127 * remapping setup code.
3128 *
3129 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
3130 * four times:
3131 *
3132 * 1 pass) Discover the most comprehensive IVHD type to use.
3133 *
3134 * 2 pass) Find the highest PCI device id the driver has to handle.
3135 * Upon this information the size of the data structures is
3136 * determined that needs to be allocated.
3137 *
3138 * 3 pass) Initialize the data structures just allocated with the
3139 * information in the ACPI table about available AMD IOMMUs
3140 * in the system. It also maps the PCI devices in the
3141 * system to specific IOMMUs
3142 *
3143 * 4 pass) After the basic data structures are allocated and
3144 * initialized we update them with information about memory
3145 * remapping requirements parsed out of the ACPI table in
3146 * this last pass.
3147 *
3148 * After everything is set up the IOMMUs are enabled and the necessary
3149 * hotplug and suspend notifiers are registered.
3150 */
3151static int __init early_amd_iommu_init(void)
3152{
3153 struct acpi_table_header *ivrs_base;
3154 int ret;
3155 acpi_status status;
3156 u8 efr_hats;
3157
3158 if (!amd_iommu_detected)
3159 return -ENODEV;
3160
3161 status = acpi_get_table(signature: "IVRS", instance: 0, out_table: &ivrs_base);
3162 if (status == AE_NOT_FOUND)
3163 return -ENODEV;
3164 else if (ACPI_FAILURE(status)) {
3165 const char *err = acpi_format_exception(exception: status);
3166 pr_err("IVRS table error: %s\n", err);
3167 return -EINVAL;
3168 }
3169
3170 if (!boot_cpu_has(X86_FEATURE_CX16)) {
3171 pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
3172 ret = -EINVAL;
3173 goto out;
3174 }
3175
3176 /*
3177 * Validate checksum here so we don't need to do it when
3178 * we actually parse the table
3179 */
3180 ret = check_ivrs_checksum(table: ivrs_base);
3181 if (ret)
3182 goto out;
3183
3184 ivinfo_init(ivrs: ivrs_base);
3185
3186 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs: ivrs_base);
3187 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
3188
3189 /*
3190 * now the data structures are allocated and basically initialized
3191 * start the real acpi table scan
3192 */
3193 ret = init_iommu_all(table: ivrs_base);
3194 if (ret)
3195 goto out;
3196
3197 /* 5 level guest page table */
3198 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3199 FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
3200 amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
3201
3202 efr_hats = FIELD_GET(FEATURE_HATS, amd_iommu_efr);
3203 if (efr_hats != 0x3) {
3204 /*
3205 * efr[HATS] bits specify the maximum host translation level
3206 * supported, with LEVEL 4 being initial max level.
3207 */
3208 amd_iommu_hpt_level = efr_hats + PAGE_MODE_4_LEVEL;
3209 } else {
3210 pr_warn_once(FW_BUG "Disable host address translation due to invalid translation level (%#x).\n",
3211 efr_hats);
3212 amd_iommu_hatdis = true;
3213 }
3214
3215 if (amd_iommu_pgtable == PD_MODE_V2) {
3216 if (!amd_iommu_v2_pgtbl_supported()) {
3217 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
3218 amd_iommu_pgtable = PD_MODE_V1;
3219 }
3220 }
3221
3222 if (amd_iommu_hatdis) {
3223 /*
3224 * Host (v1) page table is not available. Attempt to use
3225 * Guest (v2) page table.
3226 */
3227 if (amd_iommu_v2_pgtbl_supported())
3228 amd_iommu_pgtable = PD_MODE_V2;
3229 else
3230 amd_iommu_pgtable = PD_MODE_NONE;
3231 }
3232
3233 /* Disable any previously enabled IOMMUs */
3234 if (!is_kdump_kernel() || amd_iommu_disabled)
3235 disable_iommus();
3236
3237 if (amd_iommu_irq_remap)
3238 amd_iommu_irq_remap = check_ioapic_information();
3239
3240 if (amd_iommu_irq_remap) {
3241 struct amd_iommu_pci_seg *pci_seg;
3242 ret = -ENOMEM;
3243 for_each_pci_segment(pci_seg) {
3244 if (alloc_irq_lookup_table(pci_seg))
3245 goto out;
3246 }
3247 }
3248
3249 ret = init_memory_definitions(table: ivrs_base);
3250 if (ret)
3251 goto out;
3252
3253 /* init the device table */
3254 init_device_table();
3255
3256out:
3257 /* Don't leak any ACPI memory */
3258 acpi_put_table(table: ivrs_base);
3259
3260 return ret;
3261}
3262
3263static int amd_iommu_enable_interrupts(void)
3264{
3265 struct amd_iommu *iommu;
3266 int ret = 0;
3267
3268 for_each_iommu(iommu) {
3269 ret = iommu_init_irq(iommu);
3270 if (ret)
3271 goto out;
3272 }
3273
3274 /*
3275 * Interrupt handler is ready to process interrupts. Enable
3276 * PPR and GA log interrupt for all IOMMUs.
3277 */
3278 enable_iommus_vapic();
3279 enable_iommus_ppr();
3280
3281out:
3282 return ret;
3283}
3284
3285static bool __init detect_ivrs(void)
3286{
3287 struct acpi_table_header *ivrs_base;
3288 acpi_status status;
3289 int i;
3290
3291 status = acpi_get_table(signature: "IVRS", instance: 0, out_table: &ivrs_base);
3292 if (status == AE_NOT_FOUND)
3293 return false;
3294 else if (ACPI_FAILURE(status)) {
3295 const char *err = acpi_format_exception(exception: status);
3296 pr_err("IVRS table error: %s\n", err);
3297 return false;
3298 }
3299
3300 acpi_put_table(table: ivrs_base);
3301
3302 if (amd_iommu_force_enable)
3303 goto out;
3304
3305 /* Don't use IOMMU if there is Stoney Ridge graphics */
3306 for (i = 0; i < 32; i++) {
3307 u32 pci_id;
3308
3309 pci_id = read_pci_config(bus: 0, slot: i, func: 0, offset: 0);
3310 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
3311 pr_info("Disable IOMMU on Stoney Ridge\n");
3312 return false;
3313 }
3314 }
3315
3316out:
3317 /* Make sure ACS will be enabled during PCI probe */
3318 pci_request_acs();
3319
3320 return true;
3321}
3322
3323static __init void iommu_snp_enable(void)
3324{
3325#ifdef CONFIG_KVM_AMD_SEV
3326 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
3327 return;
3328 /*
3329 * The SNP support requires that IOMMU must be enabled, and is
3330 * configured with V1 page table (DTE[Mode] = 0 is not supported).
3331 */
3332 if (no_iommu || iommu_default_passthrough()) {
3333 pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
3334 goto disable_snp;
3335 }
3336
3337 if (amd_iommu_pgtable != PD_MODE_V1) {
3338 pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
3339 goto disable_snp;
3340 }
3341
3342 amd_iommu_snp_en = check_feature(FEATURE_SNP);
3343 if (!amd_iommu_snp_en) {
3344 pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
3345 goto disable_snp;
3346 }
3347
3348 /*
3349 * Enable host SNP support once SNP support is checked on IOMMU.
3350 */
3351 if (snp_rmptable_init()) {
3352 pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n");
3353 goto disable_snp;
3354 }
3355
3356 pr_info("IOMMU SNP support enabled.\n");
3357 return;
3358
3359disable_snp:
3360 cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
3361#endif
3362}
3363
3364/****************************************************************************
3365 *
3366 * AMD IOMMU Initialization State Machine
3367 *
3368 ****************************************************************************/
3369
3370static int __init state_next(void)
3371{
3372 int ret = 0;
3373
3374 switch (init_state) {
3375 case IOMMU_START_STATE:
3376 if (!detect_ivrs()) {
3377 init_state = IOMMU_NOT_FOUND;
3378 ret = -ENODEV;
3379 } else {
3380 init_state = IOMMU_IVRS_DETECTED;
3381 }
3382 break;
3383 case IOMMU_IVRS_DETECTED:
3384 if (amd_iommu_disabled) {
3385 init_state = IOMMU_CMDLINE_DISABLED;
3386 ret = -EINVAL;
3387 } else {
3388 ret = early_amd_iommu_init();
3389 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
3390 }
3391 break;
3392 case IOMMU_ACPI_FINISHED:
3393 early_enable_iommus();
3394 x86_platform.iommu_shutdown = disable_iommus;
3395 init_state = IOMMU_ENABLED;
3396 break;
3397 case IOMMU_ENABLED:
3398 register_syscore_ops(ops: &amd_iommu_syscore_ops);
3399 iommu_snp_enable();
3400 ret = amd_iommu_init_pci();
3401 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
3402 break;
3403 case IOMMU_PCI_INIT:
3404 ret = amd_iommu_enable_interrupts();
3405 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
3406 break;
3407 case IOMMU_INTERRUPTS_EN:
3408 init_state = IOMMU_INITIALIZED;
3409 break;
3410 case IOMMU_INITIALIZED:
3411 /* Nothing to do */
3412 break;
3413 case IOMMU_NOT_FOUND:
3414 case IOMMU_INIT_ERROR:
3415 case IOMMU_CMDLINE_DISABLED:
3416 /* Error states => do nothing */
3417 ret = -EINVAL;
3418 break;
3419 default:
3420 /* Unknown state */
3421 BUG();
3422 }
3423
3424 if (ret) {
3425 free_dma_resources();
3426 if (!irq_remapping_enabled) {
3427 disable_iommus();
3428 free_iommu_resources();
3429 } else {
3430 struct amd_iommu *iommu;
3431 struct amd_iommu_pci_seg *pci_seg;
3432
3433 for_each_pci_segment(pci_seg)
3434 uninit_device_table_dma(pci_seg);
3435
3436 for_each_iommu(iommu)
3437 amd_iommu_flush_all_caches(iommu);
3438 }
3439 }
3440 return ret;
3441}
3442
3443static int __init iommu_go_to_state(enum iommu_init_state state)
3444{
3445 int ret = -EINVAL;
3446
3447 while (init_state != state) {
3448 if (init_state == IOMMU_NOT_FOUND ||
3449 init_state == IOMMU_INIT_ERROR ||
3450 init_state == IOMMU_CMDLINE_DISABLED)
3451 break;
3452 ret = state_next();
3453 }
3454
3455 /*
3456 * SNP platform initilazation requires IOMMUs to be fully configured.
3457 * If the SNP support on IOMMUs has NOT been checked, simply mark SNP
3458 * as unsupported. If the SNP support on IOMMUs has been checked and
3459 * host SNP support enabled but RMP enforcement has not been enabled
3460 * in IOMMUs, then the system is in a half-baked state, but can limp
3461 * along as all memory should be Hypervisor-Owned in the RMP. WARN,
3462 * but leave SNP as "supported" to avoid confusing the kernel.
3463 */
3464 if (ret && cc_platform_has(attr: CC_ATTR_HOST_SEV_SNP) &&
3465 !WARN_ON_ONCE(amd_iommu_snp_en))
3466 cc_platform_clear(attr: CC_ATTR_HOST_SEV_SNP);
3467
3468 return ret;
3469}
3470
3471#ifdef CONFIG_IRQ_REMAP
3472int __init amd_iommu_prepare(void)
3473{
3474 int ret;
3475
3476 amd_iommu_irq_remap = true;
3477
3478 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3479 if (ret) {
3480 amd_iommu_irq_remap = false;
3481 return ret;
3482 }
3483
3484 return amd_iommu_irq_remap ? 0 : -ENODEV;
3485}
3486
3487int __init amd_iommu_enable(void)
3488{
3489 int ret;
3490
3491 ret = iommu_go_to_state(IOMMU_ENABLED);
3492 if (ret)
3493 return ret;
3494
3495 irq_remapping_enabled = 1;
3496 return amd_iommu_xt_mode;
3497}
3498
3499void amd_iommu_disable(void)
3500{
3501 amd_iommu_suspend();
3502}
3503
3504int amd_iommu_reenable(int mode)
3505{
3506 amd_iommu_resume();
3507
3508 return 0;
3509}
3510
3511int amd_iommu_enable_faulting(unsigned int cpu)
3512{
3513 /* We enable MSI later when PCI is initialized */
3514 return 0;
3515}
3516#endif
3517
3518/*
3519 * This is the core init function for AMD IOMMU hardware in the system.
3520 * This function is called from the generic x86 DMA layer initialization
3521 * code.
3522 */
3523static int __init amd_iommu_init(void)
3524{
3525 int ret;
3526
3527 ret = iommu_go_to_state(state: IOMMU_INITIALIZED);
3528#ifdef CONFIG_GART_IOMMU
3529 if (ret && list_empty(&amd_iommu_list)) {
3530 /*
3531 * We failed to initialize the AMD IOMMU - try fallback
3532 * to GART if possible.
3533 */
3534 gart_iommu_init();
3535 }
3536#endif
3537
3538 if (!ret)
3539 amd_iommu_debugfs_setup();
3540
3541 return ret;
3542}
3543
3544static bool amd_iommu_sme_check(void)
3545{
3546 if (!cc_platform_has(attr: CC_ATTR_HOST_MEM_ENCRYPT) ||
3547 (boot_cpu_data.x86 != 0x17))
3548 return true;
3549
3550 /* For Fam17h, a specific level of support is required */
3551 if (boot_cpu_data.microcode >= 0x08001205)
3552 return true;
3553
3554 if ((boot_cpu_data.microcode >= 0x08001126) &&
3555 (boot_cpu_data.microcode <= 0x080011ff))
3556 return true;
3557
3558 pr_notice("IOMMU not currently supported when SME is active\n");
3559
3560 return false;
3561}
3562
3563/****************************************************************************
3564 *
3565 * Early detect code. This code runs at IOMMU detection time in the DMA
3566 * layer. It just looks if there is an IVRS ACPI table to detect AMD
3567 * IOMMUs
3568 *
3569 ****************************************************************************/
3570void __init amd_iommu_detect(void)
3571{
3572 int ret;
3573
3574 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3575 goto disable_snp;
3576
3577 if (!amd_iommu_sme_check())
3578 goto disable_snp;
3579
3580 ret = iommu_go_to_state(state: IOMMU_IVRS_DETECTED);
3581 if (ret)
3582 goto disable_snp;
3583
3584 amd_iommu_detected = true;
3585 iommu_detected = 1;
3586 x86_init.iommu.iommu_init = amd_iommu_init;
3587 return;
3588
3589disable_snp:
3590 if (cc_platform_has(attr: CC_ATTR_HOST_SEV_SNP))
3591 cc_platform_clear(attr: CC_ATTR_HOST_SEV_SNP);
3592}
3593
3594/****************************************************************************
3595 *
3596 * Parsing functions for the AMD IOMMU specific kernel command line
3597 * options.
3598 *
3599 ****************************************************************************/
3600
3601static int __init parse_amd_iommu_dump(char *str)
3602{
3603 amd_iommu_dump = true;
3604
3605 return 1;
3606}
3607
3608static int __init parse_amd_iommu_intr(char *str)
3609{
3610 for (; *str; ++str) {
3611 if (strncmp(str, "legacy", 6) == 0) {
3612 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3613 break;
3614 }
3615 if (strncmp(str, "vapic", 5) == 0) {
3616 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3617 break;
3618 }
3619 }
3620 return 1;
3621}
3622
3623static int __init parse_amd_iommu_options(char *str)
3624{
3625 if (!str)
3626 return -EINVAL;
3627
3628 while (*str) {
3629 if (strncmp(str, "fullflush", 9) == 0) {
3630 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3631 iommu_set_dma_strict();
3632 } else if (strncmp(str, "force_enable", 12) == 0) {
3633 amd_iommu_force_enable = true;
3634 } else if (strncmp(str, "off", 3) == 0) {
3635 amd_iommu_disabled = true;
3636 } else if (strncmp(str, "force_isolation", 15) == 0) {
3637 amd_iommu_force_isolation = true;
3638 } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
3639 amd_iommu_pgtable = PD_MODE_V1;
3640 } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
3641 amd_iommu_pgtable = PD_MODE_V2;
3642 } else if (strncmp(str, "irtcachedis", 11) == 0) {
3643 amd_iommu_irtcachedis = true;
3644 } else if (strncmp(str, "nohugepages", 11) == 0) {
3645 pr_info("Restricting V1 page-sizes to 4KiB");
3646 amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_4K;
3647 } else if (strncmp(str, "v2_pgsizes_only", 15) == 0) {
3648 pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB");
3649 amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
3650 } else {
3651 pr_notice("Unknown option - '%s'\n", str);
3652 }
3653
3654 str += strcspn(str, ",");
3655 while (*str == ',')
3656 str++;
3657 }
3658
3659 return 1;
3660}
3661
3662static int __init parse_ivrs_ioapic(char *str)
3663{
3664 u32 seg = 0, bus, dev, fn;
3665 int id, i;
3666 u32 devid;
3667
3668 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3669 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3670 goto found;
3671
3672 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3673 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3674 pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
3675 str, id, seg, bus, dev, fn);
3676 goto found;
3677 }
3678
3679 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3680 return 1;
3681
3682found:
3683 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3684 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3685 str);
3686 return 1;
3687 }
3688
3689 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3690
3691 cmdline_maps = true;
3692 i = early_ioapic_map_size++;
3693 early_ioapic_map[i].id = id;
3694 early_ioapic_map[i].devid = devid;
3695 early_ioapic_map[i].cmd_line = true;
3696
3697 return 1;
3698}
3699
3700static int __init parse_ivrs_hpet(char *str)
3701{
3702 u32 seg = 0, bus, dev, fn;
3703 int id, i;
3704 u32 devid;
3705
3706 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3707 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3708 goto found;
3709
3710 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3711 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3712 pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
3713 str, id, seg, bus, dev, fn);
3714 goto found;
3715 }
3716
3717 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3718 return 1;
3719
3720found:
3721 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3722 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3723 str);
3724 return 1;
3725 }
3726
3727 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3728
3729 cmdline_maps = true;
3730 i = early_hpet_map_size++;
3731 early_hpet_map[i].id = id;
3732 early_hpet_map[i].devid = devid;
3733 early_hpet_map[i].cmd_line = true;
3734
3735 return 1;
3736}
3737
3738#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
3739
3740static int __init parse_ivrs_acpihid(char *str)
3741{
3742 u32 seg = 0, bus, dev, fn;
3743 char *hid, *uid, *p, *addr;
3744 char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
3745 int i;
3746
3747 addr = strchr(str, '@');
3748 if (!addr) {
3749 addr = strchr(str, '=');
3750 if (!addr)
3751 goto not_found;
3752
3753 ++addr;
3754
3755 if (strlen(addr) > ACPIID_LEN)
3756 goto not_found;
3757
3758 if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
3759 sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
3760 pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
3761 str, acpiid, seg, bus, dev, fn);
3762 goto found;
3763 }
3764 goto not_found;
3765 }
3766
3767 /* We have the '@', make it the terminator to get just the acpiid */
3768 *addr++ = 0;
3769
3770 if (strlen(str) > ACPIID_LEN)
3771 goto not_found;
3772
3773 if (sscanf(str, "=%s", acpiid) != 1)
3774 goto not_found;
3775
3776 if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
3777 sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
3778 goto found;
3779
3780not_found:
3781 pr_err("Invalid command line: ivrs_acpihid%s\n", str);
3782 return 1;
3783
3784found:
3785 p = acpiid;
3786 hid = strsep(&p, ":");
3787 uid = p;
3788
3789 if (!hid || !(*hid) || !uid) {
3790 pr_err("Invalid command line: hid or uid\n");
3791 return 1;
3792 }
3793
3794 /*
3795 * Ignore leading zeroes after ':', so e.g., AMDI0095:00
3796 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
3797 */
3798 while (*uid == '0' && *(uid + 1))
3799 uid++;
3800
3801 if (strlen(hid) >= ACPIHID_HID_LEN) {
3802 pr_err("Invalid command line: hid is too long\n");
3803 return 1;
3804 } else if (strlen(uid) >= ACPIHID_UID_LEN) {
3805 pr_err("Invalid command line: uid is too long\n");
3806 return 1;
3807 }
3808
3809 i = early_acpihid_map_size++;
3810 memcpy(to: early_acpihid_map[i].hid, from: hid, len: strlen(hid));
3811 memcpy(to: early_acpihid_map[i].uid, from: uid, len: strlen(uid));
3812 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3813 early_acpihid_map[i].cmd_line = true;
3814
3815 return 1;
3816}
3817
3818__setup("amd_iommu_dump", parse_amd_iommu_dump);
3819__setup("amd_iommu=", parse_amd_iommu_options);
3820__setup("amd_iommu_intr=", parse_amd_iommu_intr);
3821__setup("ivrs_ioapic", parse_ivrs_ioapic);
3822__setup("ivrs_hpet", parse_ivrs_hpet);
3823__setup("ivrs_acpihid", parse_ivrs_acpihid);
3824
3825bool amd_iommu_pasid_supported(void)
3826{
3827 /* CPU page table size should match IOMMU guest page table size */
3828 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3829 amd_iommu_gpt_level != PAGE_MODE_5_LEVEL)
3830 return false;
3831
3832 /*
3833 * Since DTE[Mode]=0 is prohibited on SNP-enabled system
3834 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
3835 * setting up IOMMUv1 page table.
3836 */
3837 return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
3838}
3839
3840struct amd_iommu *get_amd_iommu(unsigned int idx)
3841{
3842 unsigned int i = 0;
3843 struct amd_iommu *iommu;
3844
3845 for_each_iommu(iommu)
3846 if (i++ == idx)
3847 return iommu;
3848 return NULL;
3849}
3850
3851/****************************************************************************
3852 *
3853 * IOMMU EFR Performance Counter support functionality. This code allows
3854 * access to the IOMMU PC functionality.
3855 *
3856 ****************************************************************************/
3857
3858u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3859{
3860 struct amd_iommu *iommu = get_amd_iommu(idx);
3861
3862 if (iommu)
3863 return iommu->max_banks;
3864
3865 return 0;
3866}
3867
3868bool amd_iommu_pc_supported(void)
3869{
3870 return amd_iommu_pc_present;
3871}
3872
3873u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3874{
3875 struct amd_iommu *iommu = get_amd_iommu(idx);
3876
3877 if (iommu)
3878 return iommu->max_counters;
3879
3880 return 0;
3881}
3882
3883static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3884 u8 fxn, u64 *value, bool is_write)
3885{
3886 u32 offset;
3887 u32 max_offset_lim;
3888
3889 /* Make sure the IOMMU PC resource is available */
3890 if (!amd_iommu_pc_present)
3891 return -ENODEV;
3892
3893 /* Check for valid iommu and pc register indexing */
3894 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3895 return -ENODEV;
3896
3897 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3898
3899 /* Limit the offset to the hw defined mmio region aperture */
3900 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3901 (iommu->max_counters << 8) | 0x28);
3902 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3903 (offset > max_offset_lim))
3904 return -EINVAL;
3905
3906 if (is_write) {
3907 u64 val = *value & GENMASK_ULL(47, 0);
3908
3909 writel(val: (u32)val, addr: iommu->mmio_base + offset);
3910 writel(val: (val >> 32), addr: iommu->mmio_base + offset + 4);
3911 } else {
3912 *value = readl(addr: iommu->mmio_base + offset + 4);
3913 *value <<= 32;
3914 *value |= readl(addr: iommu->mmio_base + offset);
3915 *value &= GENMASK_ULL(47, 0);
3916 }
3917
3918 return 0;
3919}
3920
3921int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3922{
3923 if (!iommu)
3924 return -EINVAL;
3925
3926 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, is_write: false);
3927}
3928
3929int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3930{
3931 if (!iommu)
3932 return -EINVAL;
3933
3934 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, is_write: true);
3935}
3936
3937#ifdef CONFIG_KVM_AMD_SEV
3938static int iommu_page_make_shared(void *page)
3939{
3940 unsigned long paddr, pfn;
3941
3942 paddr = iommu_virt_to_phys(page);
3943 /* Cbit maybe set in the paddr */
3944 pfn = __sme_clr(paddr) >> PAGE_SHIFT;
3945
3946 if (!(pfn % PTRS_PER_PMD)) {
3947 int ret, level;
3948 bool assigned;
3949
3950 ret = snp_lookup_rmpentry(pfn, &assigned, &level);
3951 if (ret) {
3952 pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
3953 return ret;
3954 }
3955
3956 if (!assigned) {
3957 pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
3958 return -EINVAL;
3959 }
3960
3961 if (level > PG_LEVEL_4K) {
3962 ret = psmash(pfn);
3963 if (!ret)
3964 goto done;
3965
3966 pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n",
3967 pfn, ret, level);
3968 return ret;
3969 }
3970 }
3971
3972done:
3973 return rmp_make_shared(pfn, PG_LEVEL_4K);
3974}
3975
3976static int iommu_make_shared(void *va, size_t size)
3977{
3978 void *page;
3979 int ret;
3980
3981 if (!va)
3982 return 0;
3983
3984 for (page = va; page < (va + size); page += PAGE_SIZE) {
3985 ret = iommu_page_make_shared(page);
3986 if (ret)
3987 return ret;
3988 }
3989
3990 return 0;
3991}
3992
3993int amd_iommu_snp_disable(void)
3994{
3995 struct amd_iommu *iommu;
3996 int ret;
3997
3998 if (!amd_iommu_snp_en)
3999 return 0;
4000
4001 for_each_iommu(iommu) {
4002 ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
4003 if (ret)
4004 return ret;
4005
4006 ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
4007 if (ret)
4008 return ret;
4009
4010 ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
4011 if (ret)
4012 return ret;
4013 }
4014
4015 return 0;
4016}
4017EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
4018#endif
4019