| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2006-2015, Intel Corporation. | 
|---|
| 4 | * | 
|---|
| 5 | * Authors: Ashok Raj <ashok.raj@intel.com> | 
|---|
| 6 | *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 
|---|
| 7 | *          David Woodhouse <David.Woodhouse@intel.com> | 
|---|
| 8 | */ | 
|---|
| 9 |  | 
|---|
| 10 | #ifndef _INTEL_IOMMU_H_ | 
|---|
| 11 | #define _INTEL_IOMMU_H_ | 
|---|
| 12 |  | 
|---|
| 13 | #include <linux/types.h> | 
|---|
| 14 | #include <linux/iova.h> | 
|---|
| 15 | #include <linux/io.h> | 
|---|
| 16 | #include <linux/idr.h> | 
|---|
| 17 | #include <linux/mmu_notifier.h> | 
|---|
| 18 | #include <linux/list.h> | 
|---|
| 19 | #include <linux/iommu.h> | 
|---|
| 20 | #include <linux/io-64-nonatomic-lo-hi.h> | 
|---|
| 21 | #include <linux/dmar.h> | 
|---|
| 22 | #include <linux/bitfield.h> | 
|---|
| 23 | #include <linux/xarray.h> | 
|---|
| 24 | #include <linux/perf_event.h> | 
|---|
| 25 | #include <linux/pci.h> | 
|---|
| 26 |  | 
|---|
| 27 | #include <asm/cacheflush.h> | 
|---|
| 28 | #include <asm/iommu.h> | 
|---|
| 29 | #include <uapi/linux/iommufd.h> | 
|---|
| 30 |  | 
|---|
| 31 | /* | 
|---|
| 32 | * VT-d hardware uses 4KiB page size regardless of host page size. | 
|---|
| 33 | */ | 
|---|
| 34 | #define VTD_PAGE_SHIFT		(12) | 
|---|
| 35 | #define VTD_PAGE_SIZE		(1UL << VTD_PAGE_SHIFT) | 
|---|
| 36 | #define VTD_PAGE_MASK		(((u64)-1) << VTD_PAGE_SHIFT) | 
|---|
| 37 | #define VTD_PAGE_ALIGN(addr)	(((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) | 
|---|
| 38 |  | 
|---|
| 39 | #define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT) | 
|---|
| 40 |  | 
|---|
| 41 | #define VTD_STRIDE_SHIFT        (9) | 
|---|
| 42 | #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT) | 
|---|
| 43 |  | 
|---|
| 44 | #define DMA_PTE_READ		BIT_ULL(0) | 
|---|
| 45 | #define DMA_PTE_WRITE		BIT_ULL(1) | 
|---|
| 46 | #define DMA_PTE_LARGE_PAGE	BIT_ULL(7) | 
|---|
| 47 | #define DMA_PTE_SNP		BIT_ULL(11) | 
|---|
| 48 |  | 
|---|
| 49 | #define DMA_FL_PTE_PRESENT	BIT_ULL(0) | 
|---|
| 50 | #define DMA_FL_PTE_US		BIT_ULL(2) | 
|---|
| 51 | #define DMA_FL_PTE_ACCESS	BIT_ULL(5) | 
|---|
| 52 | #define DMA_FL_PTE_DIRTY	BIT_ULL(6) | 
|---|
| 53 |  | 
|---|
| 54 | #define DMA_SL_PTE_DIRTY_BIT	9 | 
|---|
| 55 | #define DMA_SL_PTE_DIRTY	BIT_ULL(DMA_SL_PTE_DIRTY_BIT) | 
|---|
| 56 |  | 
|---|
| 57 | #define ADDR_WIDTH_5LEVEL	(57) | 
|---|
| 58 | #define ADDR_WIDTH_4LEVEL	(48) | 
|---|
| 59 |  | 
|---|
| 60 | #define CONTEXT_TT_MULTI_LEVEL	0 | 
|---|
| 61 | #define CONTEXT_TT_DEV_IOTLB	1 | 
|---|
| 62 | #define CONTEXT_TT_PASS_THROUGH 2 | 
|---|
| 63 | #define CONTEXT_PASIDE		BIT_ULL(3) | 
|---|
| 64 |  | 
|---|
| 65 | /* | 
|---|
| 66 | * Intel IOMMU register specification per version 1.0 public spec. | 
|---|
| 67 | */ | 
|---|
| 68 | #define	DMAR_VER_REG	0x0	/* Arch version supported by this IOMMU */ | 
|---|
| 69 | #define	DMAR_CAP_REG	0x8	/* Hardware supported capabilities */ | 
|---|
| 70 | #define	DMAR_ECAP_REG	0x10	/* Extended capabilities supported */ | 
|---|
| 71 | #define	DMAR_GCMD_REG	0x18	/* Global command register */ | 
|---|
| 72 | #define	DMAR_GSTS_REG	0x1c	/* Global status register */ | 
|---|
| 73 | #define	DMAR_RTADDR_REG	0x20	/* Root entry table */ | 
|---|
| 74 | #define	DMAR_CCMD_REG	0x28	/* Context command reg */ | 
|---|
| 75 | #define	DMAR_FSTS_REG	0x34	/* Fault Status register */ | 
|---|
| 76 | #define	DMAR_FECTL_REG	0x38	/* Fault control register */ | 
|---|
| 77 | #define	DMAR_FEDATA_REG	0x3c	/* Fault event interrupt data register */ | 
|---|
| 78 | #define	DMAR_FEADDR_REG	0x40	/* Fault event interrupt addr register */ | 
|---|
| 79 | #define	DMAR_FEUADDR_REG 0x44	/* Upper address register */ | 
|---|
| 80 | #define	DMAR_PMEN_REG	0x64	/* Enable Protected Memory Region */ | 
|---|
| 81 | #define	DMAR_PLMBASE_REG 0x68	/* PMRR Low addr */ | 
|---|
| 82 | #define	DMAR_PLMLIMIT_REG 0x6c	/* PMRR low limit */ | 
|---|
| 83 | #define	DMAR_PHMBASE_REG 0x70	/* pmrr high base addr */ | 
|---|
| 84 | #define	DMAR_PHMLIMIT_REG 0x78	/* pmrr high limit */ | 
|---|
| 85 | #define DMAR_IQH_REG	0x80	/* Invalidation queue head register */ | 
|---|
| 86 | #define DMAR_IQT_REG	0x88	/* Invalidation queue tail register */ | 
|---|
| 87 | #define DMAR_IQ_SHIFT	4	/* Invalidation queue head/tail shift */ | 
|---|
| 88 | #define DMAR_IQA_REG	0x90	/* Invalidation queue addr register */ | 
|---|
| 89 | #define DMAR_ICS_REG	0x9c	/* Invalidation complete status register */ | 
|---|
| 90 | #define DMAR_IQER_REG	0xb0	/* Invalidation queue error record register */ | 
|---|
| 91 | #define DMAR_IRTA_REG	0xb8    /* Interrupt remapping table addr register */ | 
|---|
| 92 | #define DMAR_PQH_REG	0xc0	/* Page request queue head register */ | 
|---|
| 93 | #define DMAR_PQT_REG	0xc8	/* Page request queue tail register */ | 
|---|
| 94 | #define DMAR_PQA_REG	0xd0	/* Page request queue address register */ | 
|---|
| 95 | #define DMAR_PRS_REG	0xdc	/* Page request status register */ | 
|---|
| 96 | #define DMAR_PECTL_REG	0xe0	/* Page request event control register */ | 
|---|
| 97 | #define	DMAR_PEDATA_REG	0xe4	/* Page request event interrupt data register */ | 
|---|
| 98 | #define	DMAR_PEADDR_REG	0xe8	/* Page request event interrupt addr register */ | 
|---|
| 99 | #define	DMAR_PEUADDR_REG 0xec	/* Page request event Upper address register */ | 
|---|
| 100 | #define DMAR_MTRRCAP_REG 0x100	/* MTRR capability register */ | 
|---|
| 101 | #define DMAR_MTRRDEF_REG 0x108	/* MTRR default type register */ | 
|---|
| 102 | #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ | 
|---|
| 103 | #define DMAR_MTRR_FIX16K_80000_REG 0x128 | 
|---|
| 104 | #define DMAR_MTRR_FIX16K_A0000_REG 0x130 | 
|---|
| 105 | #define DMAR_MTRR_FIX4K_C0000_REG 0x138 | 
|---|
| 106 | #define DMAR_MTRR_FIX4K_C8000_REG 0x140 | 
|---|
| 107 | #define DMAR_MTRR_FIX4K_D0000_REG 0x148 | 
|---|
| 108 | #define DMAR_MTRR_FIX4K_D8000_REG 0x150 | 
|---|
| 109 | #define DMAR_MTRR_FIX4K_E0000_REG 0x158 | 
|---|
| 110 | #define DMAR_MTRR_FIX4K_E8000_REG 0x160 | 
|---|
| 111 | #define DMAR_MTRR_FIX4K_F0000_REG 0x168 | 
|---|
| 112 | #define DMAR_MTRR_FIX4K_F8000_REG 0x170 | 
|---|
| 113 | #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ | 
|---|
| 114 | #define DMAR_MTRR_PHYSMASK0_REG 0x188 | 
|---|
| 115 | #define DMAR_MTRR_PHYSBASE1_REG 0x190 | 
|---|
| 116 | #define DMAR_MTRR_PHYSMASK1_REG 0x198 | 
|---|
| 117 | #define DMAR_MTRR_PHYSBASE2_REG 0x1a0 | 
|---|
| 118 | #define DMAR_MTRR_PHYSMASK2_REG 0x1a8 | 
|---|
| 119 | #define DMAR_MTRR_PHYSBASE3_REG 0x1b0 | 
|---|
| 120 | #define DMAR_MTRR_PHYSMASK3_REG 0x1b8 | 
|---|
| 121 | #define DMAR_MTRR_PHYSBASE4_REG 0x1c0 | 
|---|
| 122 | #define DMAR_MTRR_PHYSMASK4_REG 0x1c8 | 
|---|
| 123 | #define DMAR_MTRR_PHYSBASE5_REG 0x1d0 | 
|---|
| 124 | #define DMAR_MTRR_PHYSMASK5_REG 0x1d8 | 
|---|
| 125 | #define DMAR_MTRR_PHYSBASE6_REG 0x1e0 | 
|---|
| 126 | #define DMAR_MTRR_PHYSMASK6_REG 0x1e8 | 
|---|
| 127 | #define DMAR_MTRR_PHYSBASE7_REG 0x1f0 | 
|---|
| 128 | #define DMAR_MTRR_PHYSMASK7_REG 0x1f8 | 
|---|
| 129 | #define DMAR_MTRR_PHYSBASE8_REG 0x200 | 
|---|
| 130 | #define DMAR_MTRR_PHYSMASK8_REG 0x208 | 
|---|
| 131 | #define DMAR_MTRR_PHYSBASE9_REG 0x210 | 
|---|
| 132 | #define DMAR_MTRR_PHYSMASK9_REG 0x218 | 
|---|
| 133 | #define DMAR_PERFCAP_REG	0x300 | 
|---|
| 134 | #define DMAR_PERFCFGOFF_REG	0x310 | 
|---|
| 135 | #define DMAR_PERFOVFOFF_REG	0x318 | 
|---|
| 136 | #define DMAR_PERFCNTROFF_REG	0x31c | 
|---|
| 137 | #define DMAR_PERFINTRSTS_REG	0x324 | 
|---|
| 138 | #define DMAR_PERFINTRCTL_REG	0x328 | 
|---|
| 139 | #define DMAR_PERFEVNTCAP_REG	0x380 | 
|---|
| 140 | #define DMAR_ECMD_REG		0x400 | 
|---|
| 141 | #define DMAR_ECEO_REG		0x408 | 
|---|
| 142 | #define DMAR_ECRSP_REG		0x410 | 
|---|
| 143 | #define DMAR_ECCAP_REG		0x430 | 
|---|
| 144 |  | 
|---|
| 145 | #define DMAR_IQER_REG_IQEI(reg)		FIELD_GET(GENMASK_ULL(3, 0), reg) | 
|---|
| 146 | #define DMAR_IQER_REG_ITESID(reg)	FIELD_GET(GENMASK_ULL(47, 32), reg) | 
|---|
| 147 | #define DMAR_IQER_REG_ICESID(reg)	FIELD_GET(GENMASK_ULL(63, 48), reg) | 
|---|
| 148 |  | 
|---|
| 149 | #define OFFSET_STRIDE		(9) | 
|---|
| 150 |  | 
|---|
| 151 | #define dmar_readq(a) readq(a) | 
|---|
| 152 | #define dmar_writeq(a,v) writeq(v,a) | 
|---|
| 153 | #define dmar_readl(a) readl(a) | 
|---|
| 154 | #define dmar_writel(a, v) writel(v, a) | 
|---|
| 155 |  | 
|---|
| 156 | #define DMAR_VER_MAJOR(v)		(((v) & 0xf0) >> 4) | 
|---|
| 157 | #define DMAR_VER_MINOR(v)		((v) & 0x0f) | 
|---|
| 158 |  | 
|---|
| 159 | /* | 
|---|
| 160 | * Decoding Capability Register | 
|---|
| 161 | */ | 
|---|
| 162 | #define cap_esrtps(c)		(((c) >> 63) & 1) | 
|---|
| 163 | #define cap_esirtps(c)		(((c) >> 62) & 1) | 
|---|
| 164 | #define cap_ecmds(c)		(((c) >> 61) & 1) | 
|---|
| 165 | #define cap_fl5lp_support(c)	(((c) >> 60) & 1) | 
|---|
| 166 | #define cap_pi_support(c)	(((c) >> 59) & 1) | 
|---|
| 167 | #define cap_fl1gp_support(c)	(((c) >> 56) & 1) | 
|---|
| 168 | #define cap_read_drain(c)	(((c) >> 55) & 1) | 
|---|
| 169 | #define cap_write_drain(c)	(((c) >> 54) & 1) | 
|---|
| 170 | #define cap_max_amask_val(c)	(((c) >> 48) & 0x3f) | 
|---|
| 171 | #define cap_num_fault_regs(c)	((((c) >> 40) & 0xff) + 1) | 
|---|
| 172 | #define cap_pgsel_inv(c)	(((c) >> 39) & 1) | 
|---|
| 173 |  | 
|---|
| 174 | #define cap_super_page_val(c)	(((c) >> 34) & 0xf) | 
|---|
| 175 |  | 
|---|
| 176 | #define cap_fault_reg_offset(c)	((((c) >> 24) & 0x3ff) * 16) | 
|---|
| 177 | #define cap_max_fault_reg_offset(c) \ | 
|---|
| 178 | (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) | 
|---|
| 179 |  | 
|---|
| 180 | #define cap_zlr(c)		(((c) >> 22) & 1) | 
|---|
| 181 | #define cap_isoch(c)		(((c) >> 23) & 1) | 
|---|
| 182 | #define cap_mgaw(c)		((((c) >> 16) & 0x3f) + 1) | 
|---|
| 183 | #define cap_sagaw(c)		(((c) >> 8) & 0x1f) | 
|---|
| 184 | #define cap_caching_mode(c)	(((c) >> 7) & 1) | 
|---|
| 185 | #define cap_phmr(c)		(((c) >> 6) & 1) | 
|---|
| 186 | #define cap_plmr(c)		(((c) >> 5) & 1) | 
|---|
| 187 | #define cap_rwbf(c)		(((c) >> 4) & 1) | 
|---|
| 188 | #define cap_afl(c)		(((c) >> 3) & 1) | 
|---|
| 189 | #define cap_ndoms(c)		(((unsigned long)1) << (4 + 2 * ((c) & 0x7))) | 
|---|
| 190 | /* | 
|---|
| 191 | * Extended Capability Register | 
|---|
| 192 | */ | 
|---|
| 193 |  | 
|---|
| 194 | #define ecap_pms(e)		(((e) >> 51) & 0x1) | 
|---|
| 195 | #define ecap_rps(e)		(((e) >> 49) & 0x1) | 
|---|
| 196 | #define ecap_smpwc(e)		(((e) >> 48) & 0x1) | 
|---|
| 197 | #define ecap_flts(e)		(((e) >> 47) & 0x1) | 
|---|
| 198 | #define ecap_slts(e)		(((e) >> 46) & 0x1) | 
|---|
| 199 | #define ecap_slads(e)		(((e) >> 45) & 0x1) | 
|---|
| 200 | #define ecap_smts(e)		(((e) >> 43) & 0x1) | 
|---|
| 201 | #define ecap_dit(e)		(((e) >> 41) & 0x1) | 
|---|
| 202 | #define ecap_pds(e)		(((e) >> 42) & 0x1) | 
|---|
| 203 | #define ecap_pasid(e)		(((e) >> 40) & 0x1) | 
|---|
| 204 | #define ecap_pss(e)		(((e) >> 35) & 0x1f) | 
|---|
| 205 | #define ecap_eafs(e)		(((e) >> 34) & 0x1) | 
|---|
| 206 | #define ecap_nwfs(e)		(((e) >> 33) & 0x1) | 
|---|
| 207 | #define ecap_srs(e)		(((e) >> 31) & 0x1) | 
|---|
| 208 | #define ecap_ers(e)		(((e) >> 30) & 0x1) | 
|---|
| 209 | #define ecap_prs(e)		(((e) >> 29) & 0x1) | 
|---|
| 210 | #define ecap_broken_pasid(e)	(((e) >> 28) & 0x1) | 
|---|
| 211 | #define ecap_dis(e)		(((e) >> 27) & 0x1) | 
|---|
| 212 | #define ecap_nest(e)		(((e) >> 26) & 0x1) | 
|---|
| 213 | #define ecap_mts(e)		(((e) >> 25) & 0x1) | 
|---|
| 214 | #define ecap_iotlb_offset(e) 	((((e) >> 8) & 0x3ff) * 16) | 
|---|
| 215 | #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) | 
|---|
| 216 | #define ecap_coherent(e)	((e) & 0x1) | 
|---|
| 217 | #define ecap_qis(e)		((e) & 0x2) | 
|---|
| 218 | #define ecap_pass_through(e)	(((e) >> 6) & 0x1) | 
|---|
| 219 | #define ecap_eim_support(e)	(((e) >> 4) & 0x1) | 
|---|
| 220 | #define ecap_ir_support(e)	(((e) >> 3) & 0x1) | 
|---|
| 221 | #define ecap_dev_iotlb_support(e)	(((e) >> 2) & 0x1) | 
|---|
| 222 | #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf) | 
|---|
| 223 | #define ecap_sc_support(e)	(((e) >> 7) & 0x1) /* Snooping Control */ | 
|---|
| 224 |  | 
|---|
| 225 | /* | 
|---|
| 226 | * Decoding Perf Capability Register | 
|---|
| 227 | */ | 
|---|
| 228 | #define pcap_num_cntr(p)	((p) & 0xffff) | 
|---|
| 229 | #define pcap_cntr_width(p)	(((p) >> 16) & 0x7f) | 
|---|
| 230 | #define pcap_num_event_group(p)	(((p) >> 24) & 0x1f) | 
|---|
| 231 | #define pcap_filters_mask(p)	(((p) >> 32) & 0x1f) | 
|---|
| 232 | #define pcap_interrupt(p)	(((p) >> 50) & 0x1) | 
|---|
| 233 | /* The counter stride is calculated as 2 ^ (x+10) bytes */ | 
|---|
| 234 | #define pcap_cntr_stride(p)	(1ULL << ((((p) >> 52) & 0x7) + 10)) | 
|---|
| 235 |  | 
|---|
| 236 | /* | 
|---|
| 237 | * Decoding Perf Event Capability Register | 
|---|
| 238 | */ | 
|---|
| 239 | #define pecap_es(p)		((p) & 0xfffffff) | 
|---|
| 240 |  | 
|---|
| 241 | /* Virtual command interface capability */ | 
|---|
| 242 | #define vccap_pasid(v)		(((v) & DMA_VCS_PAS)) /* PASID allocation */ | 
|---|
| 243 |  | 
|---|
| 244 | /* IOTLB_REG */ | 
|---|
| 245 | #define DMA_TLB_FLUSH_GRANU_OFFSET  60 | 
|---|
| 246 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) | 
|---|
| 247 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) | 
|---|
| 248 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) | 
|---|
| 249 | #define DMA_TLB_IIRG(type) ((type >> 60) & 3) | 
|---|
| 250 | #define DMA_TLB_IAIG(val) (((val) >> 57) & 3) | 
|---|
| 251 | #define DMA_TLB_READ_DRAIN (((u64)1) << 49) | 
|---|
| 252 | #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) | 
|---|
| 253 | #define DMA_TLB_DID(id)	(((u64)((id) & 0xffff)) << 32) | 
|---|
| 254 | #define DMA_TLB_IVT (((u64)1) << 63) | 
|---|
| 255 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) | 
|---|
| 256 | #define DMA_TLB_MAX_SIZE (0x3f) | 
|---|
| 257 |  | 
|---|
| 258 | /* INVALID_DESC */ | 
|---|
| 259 | #define DMA_CCMD_INVL_GRANU_OFFSET  61 | 
|---|
| 260 | #define DMA_ID_TLB_GLOBAL_FLUSH	(((u64)1) << 4) | 
|---|
| 261 | #define DMA_ID_TLB_DSI_FLUSH	(((u64)2) << 4) | 
|---|
| 262 | #define DMA_ID_TLB_PSI_FLUSH	(((u64)3) << 4) | 
|---|
| 263 | #define DMA_ID_TLB_READ_DRAIN	(((u64)1) << 7) | 
|---|
| 264 | #define DMA_ID_TLB_WRITE_DRAIN	(((u64)1) << 6) | 
|---|
| 265 | #define DMA_ID_TLB_DID(id)	(((u64)((id & 0xffff) << 16))) | 
|---|
| 266 | #define DMA_ID_TLB_IH_NONLEAF	(((u64)1) << 6) | 
|---|
| 267 | #define DMA_ID_TLB_ADDR(addr)	(addr) | 
|---|
| 268 | #define DMA_ID_TLB_ADDR_MASK(mask)	(mask) | 
|---|
| 269 |  | 
|---|
| 270 | /* PMEN_REG */ | 
|---|
| 271 | #define DMA_PMEN_EPM (((u32)1)<<31) | 
|---|
| 272 | #define DMA_PMEN_PRS (((u32)1)<<0) | 
|---|
| 273 |  | 
|---|
| 274 | /* GCMD_REG */ | 
|---|
| 275 | #define DMA_GCMD_TE (((u32)1) << 31) | 
|---|
| 276 | #define DMA_GCMD_SRTP (((u32)1) << 30) | 
|---|
| 277 | #define DMA_GCMD_SFL (((u32)1) << 29) | 
|---|
| 278 | #define DMA_GCMD_EAFL (((u32)1) << 28) | 
|---|
| 279 | #define DMA_GCMD_WBF (((u32)1) << 27) | 
|---|
| 280 | #define DMA_GCMD_QIE (((u32)1) << 26) | 
|---|
| 281 | #define DMA_GCMD_SIRTP (((u32)1) << 24) | 
|---|
| 282 | #define DMA_GCMD_IRE (((u32) 1) << 25) | 
|---|
| 283 | #define DMA_GCMD_CFI (((u32) 1) << 23) | 
|---|
| 284 |  | 
|---|
| 285 | /* GSTS_REG */ | 
|---|
| 286 | #define DMA_GSTS_TES (((u32)1) << 31) | 
|---|
| 287 | #define DMA_GSTS_RTPS (((u32)1) << 30) | 
|---|
| 288 | #define DMA_GSTS_FLS (((u32)1) << 29) | 
|---|
| 289 | #define DMA_GSTS_AFLS (((u32)1) << 28) | 
|---|
| 290 | #define DMA_GSTS_WBFS (((u32)1) << 27) | 
|---|
| 291 | #define DMA_GSTS_QIES (((u32)1) << 26) | 
|---|
| 292 | #define DMA_GSTS_IRTPS (((u32)1) << 24) | 
|---|
| 293 | #define DMA_GSTS_IRES (((u32)1) << 25) | 
|---|
| 294 | #define DMA_GSTS_CFIS (((u32)1) << 23) | 
|---|
| 295 |  | 
|---|
| 296 | /* DMA_RTADDR_REG */ | 
|---|
| 297 | #define DMA_RTADDR_SMT (((u64)1) << 10) | 
|---|
| 298 |  | 
|---|
| 299 | /* CCMD_REG */ | 
|---|
| 300 | #define DMA_CCMD_ICC (((u64)1) << 63) | 
|---|
| 301 | #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) | 
|---|
| 302 | #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) | 
|---|
| 303 | #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) | 
|---|
| 304 | #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) | 
|---|
| 305 | #define DMA_CCMD_MASK_NOBIT 0 | 
|---|
| 306 | #define DMA_CCMD_MASK_1BIT 1 | 
|---|
| 307 | #define DMA_CCMD_MASK_2BIT 2 | 
|---|
| 308 | #define DMA_CCMD_MASK_3BIT 3 | 
|---|
| 309 | #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) | 
|---|
| 310 | #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) | 
|---|
| 311 |  | 
|---|
| 312 | /* ECMD_REG */ | 
|---|
| 313 | #define DMA_MAX_NUM_ECMD		256 | 
|---|
| 314 | #define DMA_MAX_NUM_ECMDCAP		(DMA_MAX_NUM_ECMD / 64) | 
|---|
| 315 | #define DMA_ECMD_REG_STEP		8 | 
|---|
| 316 | #define DMA_ECMD_ENABLE			0xf0 | 
|---|
| 317 | #define DMA_ECMD_DISABLE		0xf1 | 
|---|
| 318 | #define DMA_ECMD_FREEZE			0xf4 | 
|---|
| 319 | #define DMA_ECMD_UNFREEZE		0xf5 | 
|---|
| 320 | #define DMA_ECMD_OA_SHIFT		16 | 
|---|
| 321 | #define DMA_ECMD_ECRSP_IP		0x1 | 
|---|
| 322 | #define DMA_ECMD_ECCAP3			3 | 
|---|
| 323 | #define DMA_ECMD_ECCAP3_ECNTS		BIT_ULL(48) | 
|---|
| 324 | #define DMA_ECMD_ECCAP3_DCNTS		BIT_ULL(49) | 
|---|
| 325 | #define DMA_ECMD_ECCAP3_FCNTS		BIT_ULL(52) | 
|---|
| 326 | #define DMA_ECMD_ECCAP3_UFCNTS		BIT_ULL(53) | 
|---|
| 327 | #define DMA_ECMD_ECCAP3_ESSENTIAL	(DMA_ECMD_ECCAP3_ECNTS |	\ | 
|---|
| 328 | DMA_ECMD_ECCAP3_DCNTS |	\ | 
|---|
| 329 | DMA_ECMD_ECCAP3_FCNTS |	\ | 
|---|
| 330 | DMA_ECMD_ECCAP3_UFCNTS) | 
|---|
| 331 |  | 
|---|
| 332 | /* FECTL_REG */ | 
|---|
| 333 | #define DMA_FECTL_IM (((u32)1) << 31) | 
|---|
| 334 |  | 
|---|
| 335 | /* FSTS_REG */ | 
|---|
| 336 | #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ | 
|---|
| 337 | #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ | 
|---|
| 338 | #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ | 
|---|
| 339 | #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ | 
|---|
| 340 | #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ | 
|---|
| 341 | #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ | 
|---|
| 342 | #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) | 
|---|
| 343 |  | 
|---|
| 344 | /* FRCD_REG, 32 bits access */ | 
|---|
| 345 | #define DMA_FRCD_F (((u32)1) << 31) | 
|---|
| 346 | #define dma_frcd_type(d) ((d >> 30) & 1) | 
|---|
| 347 | #define dma_frcd_fault_reason(c) (c & 0xff) | 
|---|
| 348 | #define dma_frcd_source_id(c) (c & 0xffff) | 
|---|
| 349 | #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) | 
|---|
| 350 | #define dma_frcd_pasid_present(c) (((c) >> 31) & 1) | 
|---|
| 351 | /* low 64 bit */ | 
|---|
| 352 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) | 
|---|
| 353 |  | 
|---|
| 354 | /* PRS_REG */ | 
|---|
| 355 | #define DMA_PRS_PPR	((u32)1) | 
|---|
| 356 | #define DMA_PRS_PRO	((u32)2) | 
|---|
| 357 |  | 
|---|
| 358 | #define DMA_VCS_PAS	((u64)1) | 
|---|
| 359 |  | 
|---|
| 360 | /* PERFINTRSTS_REG */ | 
|---|
| 361 | #define DMA_PERFINTRSTS_PIS	((u32)1) | 
|---|
| 362 |  | 
|---|
| 363 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\ | 
|---|
| 364 | do {									\ | 
|---|
| 365 | cycles_t start_time = get_cycles();				\ | 
|---|
| 366 | while (1) {							\ | 
|---|
| 367 | sts = op(iommu->reg + offset);				\ | 
|---|
| 368 | if (cond)						\ | 
|---|
| 369 | break;						\ | 
|---|
| 370 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | 
|---|
| 371 | panic("DMAR hardware is malfunctioning\n");	\ | 
|---|
| 372 | cpu_relax();						\ | 
|---|
| 373 | }								\ | 
|---|
| 374 | } while (0) | 
|---|
| 375 |  | 
|---|
| 376 | #define QI_LENGTH	256	/* queue length */ | 
|---|
| 377 |  | 
|---|
| 378 | enum { | 
|---|
| 379 | QI_FREE, | 
|---|
| 380 | QI_IN_USE, | 
|---|
| 381 | QI_DONE, | 
|---|
| 382 | QI_ABORT | 
|---|
| 383 | }; | 
|---|
| 384 |  | 
|---|
| 385 | #define QI_CC_TYPE		0x1 | 
|---|
| 386 | #define QI_IOTLB_TYPE		0x2 | 
|---|
| 387 | #define QI_DIOTLB_TYPE		0x3 | 
|---|
| 388 | #define QI_IEC_TYPE		0x4 | 
|---|
| 389 | #define QI_IWD_TYPE		0x5 | 
|---|
| 390 | #define QI_EIOTLB_TYPE		0x6 | 
|---|
| 391 | #define QI_PC_TYPE		0x7 | 
|---|
| 392 | #define QI_DEIOTLB_TYPE		0x8 | 
|---|
| 393 | #define QI_PGRP_RESP_TYPE	0x9 | 
|---|
| 394 | #define QI_PSTRM_RESP_TYPE	0xa | 
|---|
| 395 |  | 
|---|
| 396 | #define QI_IEC_SELECTIVE	(((u64)1) << 4) | 
|---|
| 397 | #define QI_IEC_IIDEX(idx)	(((u64)(idx & 0xffff) << 32)) | 
|---|
| 398 | #define QI_IEC_IM(m)		(((u64)(m & 0x1f) << 27)) | 
|---|
| 399 |  | 
|---|
| 400 | #define QI_IWD_STATUS_DATA(d)	(((u64)d) << 32) | 
|---|
| 401 | #define QI_IWD_STATUS_WRITE	(((u64)1) << 5) | 
|---|
| 402 | #define QI_IWD_FENCE		(((u64)1) << 6) | 
|---|
| 403 | #define QI_IWD_PRQ_DRAIN	(((u64)1) << 7) | 
|---|
| 404 |  | 
|---|
| 405 | #define QI_IOTLB_DID(did) 	(((u64)did) << 16) | 
|---|
| 406 | #define QI_IOTLB_DR(dr) 	(((u64)dr) << 7) | 
|---|
| 407 | #define QI_IOTLB_DW(dw) 	(((u64)dw) << 6) | 
|---|
| 408 | #define QI_IOTLB_GRAN(gran) 	(((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) | 
|---|
| 409 | #define QI_IOTLB_ADDR(addr)	(((u64)addr) & VTD_PAGE_MASK) | 
|---|
| 410 | #define QI_IOTLB_IH(ih)		(((u64)ih) << 6) | 
|---|
| 411 | #define QI_IOTLB_AM(am)		(((u8)am) & 0x3f) | 
|---|
| 412 |  | 
|---|
| 413 | #define QI_CC_FM(fm)		(((u64)fm) << 48) | 
|---|
| 414 | #define QI_CC_SID(sid)		(((u64)sid) << 32) | 
|---|
| 415 | #define QI_CC_DID(did)		(((u64)did) << 16) | 
|---|
| 416 | #define QI_CC_GRAN(gran)	(((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) | 
|---|
| 417 |  | 
|---|
| 418 | #define QI_DEV_IOTLB_SID(sid)	((u64)((sid) & 0xffff) << 32) | 
|---|
| 419 | #define QI_DEV_IOTLB_QDEP(qdep)	(((qdep) & 0x1f) << 16) | 
|---|
| 420 | #define QI_DEV_IOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK) | 
|---|
| 421 | #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ | 
|---|
| 422 | ((u64)((pfsid >> 4) & 0xfff) << 52)) | 
|---|
| 423 | #define QI_DEV_IOTLB_SIZE	1 | 
|---|
| 424 | #define QI_DEV_IOTLB_MAX_INVS	32 | 
|---|
| 425 |  | 
|---|
| 426 | #define QI_PC_PASID(pasid)	(((u64)pasid) << 32) | 
|---|
| 427 | #define QI_PC_DID(did)		(((u64)did) << 16) | 
|---|
| 428 | #define QI_PC_GRAN(gran)	(((u64)gran) << 4) | 
|---|
| 429 |  | 
|---|
| 430 | /* PASID cache invalidation granu */ | 
|---|
| 431 | #define QI_PC_ALL_PASIDS	0 | 
|---|
| 432 | #define QI_PC_PASID_SEL		1 | 
|---|
| 433 | #define QI_PC_GLOBAL		3 | 
|---|
| 434 |  | 
|---|
| 435 | #define QI_EIOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK) | 
|---|
| 436 | #define QI_EIOTLB_IH(ih)	(((u64)ih) << 6) | 
|---|
| 437 | #define QI_EIOTLB_AM(am)	(((u64)am) & 0x3f) | 
|---|
| 438 | #define QI_EIOTLB_PASID(pasid) 	(((u64)pasid) << 32) | 
|---|
| 439 | #define QI_EIOTLB_DID(did)	(((u64)did) << 16) | 
|---|
| 440 | #define QI_EIOTLB_GRAN(gran) 	(((u64)gran) << 4) | 
|---|
| 441 |  | 
|---|
| 442 | /* QI Dev-IOTLB inv granu */ | 
|---|
| 443 | #define QI_DEV_IOTLB_GRAN_ALL		1 | 
|---|
| 444 | #define QI_DEV_IOTLB_GRAN_PASID_SEL	0 | 
|---|
| 445 |  | 
|---|
| 446 | #define QI_DEV_EIOTLB_ADDR(a)	((u64)(a) & VTD_PAGE_MASK) | 
|---|
| 447 | #define QI_DEV_EIOTLB_SIZE	(((u64)1) << 11) | 
|---|
| 448 | #define QI_DEV_EIOTLB_PASID(p)	((u64)((p) & 0xfffff) << 32) | 
|---|
| 449 | #define QI_DEV_EIOTLB_SID(sid)	((u64)((sid) & 0xffff) << 16) | 
|---|
| 450 | #define QI_DEV_EIOTLB_QDEP(qd)	((u64)((qd) & 0x1f) << 4) | 
|---|
| 451 | #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ | 
|---|
| 452 | ((u64)((pfsid >> 4) & 0xfff) << 52)) | 
|---|
| 453 | #define QI_DEV_EIOTLB_MAX_INVS	32 | 
|---|
| 454 |  | 
|---|
| 455 | /* Page group response descriptor QW0 */ | 
|---|
| 456 | #define QI_PGRP_PASID_P(p)	(((u64)(p)) << 4) | 
|---|
| 457 | #define QI_PGRP_RESP_CODE(res)	(((u64)(res)) << 12) | 
|---|
| 458 | #define QI_PGRP_DID(rid)	(((u64)(rid)) << 16) | 
|---|
| 459 | #define QI_PGRP_PASID(pasid)	(((u64)(pasid)) << 32) | 
|---|
| 460 |  | 
|---|
| 461 | /* Page group response descriptor QW1 */ | 
|---|
| 462 | #define QI_PGRP_IDX(idx)	(((u64)(idx)) << 3) | 
|---|
| 463 |  | 
|---|
| 464 |  | 
|---|
| 465 | #define QI_RESP_SUCCESS		0x0 | 
|---|
| 466 | #define QI_RESP_INVALID		0x1 | 
|---|
| 467 | #define QI_RESP_FAILURE		0xf | 
|---|
| 468 |  | 
|---|
| 469 | #define QI_GRAN_NONG_PASID		2 | 
|---|
| 470 | #define QI_GRAN_PSI_PASID		3 | 
|---|
| 471 |  | 
|---|
| 472 | #define qi_shift(iommu)		(DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) | 
|---|
| 473 |  | 
|---|
| 474 | struct qi_desc { | 
|---|
| 475 | u64 qw0; | 
|---|
| 476 | u64 qw1; | 
|---|
| 477 | u64 qw2; | 
|---|
| 478 | u64 qw3; | 
|---|
| 479 | }; | 
|---|
| 480 |  | 
|---|
| 481 | struct q_inval { | 
|---|
| 482 | raw_spinlock_t  q_lock; | 
|---|
| 483 | void		*desc;          /* invalidation queue */ | 
|---|
| 484 | int             *desc_status;   /* desc status */ | 
|---|
| 485 | int             free_head;      /* first free entry */ | 
|---|
| 486 | int             free_tail;      /* last free entry */ | 
|---|
| 487 | int             free_cnt; | 
|---|
| 488 | }; | 
|---|
| 489 |  | 
|---|
| 490 | /* Page Request Queue depth */ | 
|---|
| 491 | #define PRQ_ORDER	4 | 
|---|
| 492 | #define PRQ_SIZE	(SZ_4K << PRQ_ORDER) | 
|---|
| 493 | #define PRQ_RING_MASK	(PRQ_SIZE - 0x20) | 
|---|
| 494 | #define PRQ_DEPTH	(PRQ_SIZE >> 5) | 
|---|
| 495 |  | 
|---|
| 496 | struct dmar_pci_notify_info; | 
|---|
| 497 |  | 
|---|
| 498 | #ifdef CONFIG_IRQ_REMAP | 
|---|
| 499 | #define INTR_REMAP_TABLE_REG_SIZE	0xf | 
|---|
| 500 | #define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf | 
|---|
| 501 |  | 
|---|
| 502 | #define INTR_REMAP_TABLE_ENTRIES	65536 | 
|---|
| 503 |  | 
|---|
| 504 | struct irq_domain; | 
|---|
| 505 |  | 
|---|
| 506 | struct ir_table { | 
|---|
| 507 | struct irte *base; | 
|---|
| 508 | unsigned long *bitmap; | 
|---|
| 509 | }; | 
|---|
| 510 |  | 
|---|
| 511 | void intel_irq_remap_add_device(struct dmar_pci_notify_info *info); | 
|---|
| 512 | #else | 
|---|
| 513 | static inline void | 
|---|
| 514 | intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { } | 
|---|
| 515 | #endif | 
|---|
| 516 |  | 
|---|
| 517 | struct iommu_flush { | 
|---|
| 518 | void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, | 
|---|
| 519 | u8 fm, u64 type); | 
|---|
| 520 | void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, | 
|---|
| 521 | unsigned int size_order, u64 type); | 
|---|
| 522 | }; | 
|---|
| 523 |  | 
|---|
| 524 | enum { | 
|---|
| 525 | SR_DMAR_FECTL_REG, | 
|---|
| 526 | SR_DMAR_FEDATA_REG, | 
|---|
| 527 | SR_DMAR_FEADDR_REG, | 
|---|
| 528 | SR_DMAR_FEUADDR_REG, | 
|---|
| 529 | MAX_SR_DMAR_REGS | 
|---|
| 530 | }; | 
|---|
| 531 |  | 
|---|
| 532 | #define VTD_FLAG_TRANS_PRE_ENABLED	(1 << 0) | 
|---|
| 533 | #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED	(1 << 1) | 
|---|
| 534 | #define VTD_FLAG_SVM_CAPABLE		(1 << 2) | 
|---|
| 535 |  | 
|---|
| 536 | #define sm_supported(iommu)	(intel_iommu_sm && ecap_smts((iommu)->ecap)) | 
|---|
| 537 | #define pasid_supported(iommu)	(sm_supported(iommu) &&			\ | 
|---|
| 538 | ecap_pasid((iommu)->ecap)) | 
|---|
| 539 | #define ssads_supported(iommu) (sm_supported(iommu) &&                 \ | 
|---|
| 540 | ecap_slads((iommu)->ecap) &&           \ | 
|---|
| 541 | ecap_smpwc(iommu->ecap)) | 
|---|
| 542 | #define nested_supported(iommu)	(sm_supported(iommu) &&			\ | 
|---|
| 543 | ecap_nest((iommu)->ecap)) | 
|---|
| 544 |  | 
|---|
| 545 | struct pasid_entry; | 
|---|
| 546 | struct pasid_state_entry; | 
|---|
| 547 | struct page_req_dsc; | 
|---|
| 548 |  | 
|---|
| 549 | /* | 
|---|
| 550 | * 0: Present | 
|---|
| 551 | * 1-11: Reserved | 
|---|
| 552 | * 12-63: Context Ptr (12 - (haw-1)) | 
|---|
| 553 | * 64-127: Reserved | 
|---|
| 554 | */ | 
|---|
| 555 | struct root_entry { | 
|---|
| 556 | u64     lo; | 
|---|
| 557 | u64     hi; | 
|---|
| 558 | }; | 
|---|
| 559 |  | 
|---|
| 560 | /* | 
|---|
| 561 | * low 64 bits: | 
|---|
| 562 | * 0: present | 
|---|
| 563 | * 1: fault processing disable | 
|---|
| 564 | * 2-3: translation type | 
|---|
| 565 | * 12-63: address space root | 
|---|
| 566 | * high 64 bits: | 
|---|
| 567 | * 0-2: address width | 
|---|
| 568 | * 3-6: aval | 
|---|
| 569 | * 8-23: domain id | 
|---|
| 570 | */ | 
|---|
| 571 | struct context_entry { | 
|---|
| 572 | u64 lo; | 
|---|
| 573 | u64 hi; | 
|---|
| 574 | }; | 
|---|
| 575 |  | 
|---|
| 576 | struct iommu_domain_info { | 
|---|
| 577 | struct intel_iommu *iommu; | 
|---|
| 578 | unsigned int refcnt;		/* Refcount of devices per iommu */ | 
|---|
| 579 | u16 did;			/* Domain ids per IOMMU. Use u16 since | 
|---|
| 580 | * domain ids are 16 bit wide according | 
|---|
| 581 | * to VT-d spec, section 9.3 */ | 
|---|
| 582 | }; | 
|---|
| 583 |  | 
|---|
| 584 | /* | 
|---|
| 585 | * We start simply by using a fixed size for the batched descriptors. This | 
|---|
| 586 | * size is currently sufficient for our needs. Future improvements could | 
|---|
| 587 | * involve dynamically allocating the batch buffer based on actual demand, | 
|---|
| 588 | * allowing us to adjust the batch size for optimal performance in different | 
|---|
| 589 | * scenarios. | 
|---|
| 590 | */ | 
|---|
| 591 | #define QI_MAX_BATCHED_DESC_COUNT 16 | 
|---|
| 592 | struct qi_batch { | 
|---|
| 593 | struct qi_desc descs[QI_MAX_BATCHED_DESC_COUNT]; | 
|---|
| 594 | unsigned int index; | 
|---|
| 595 | }; | 
|---|
| 596 |  | 
|---|
| 597 | struct dmar_domain { | 
|---|
| 598 | int	nid;			/* node id */ | 
|---|
| 599 | struct xarray iommu_array;	/* Attached IOMMU array */ | 
|---|
| 600 |  | 
|---|
| 601 | u8 iommu_coherency: 1;		/* indicate coherency of iommu access */ | 
|---|
| 602 | u8 force_snooping : 1;		/* Create IOPTEs with snoop control */ | 
|---|
| 603 | u8 set_pte_snp:1; | 
|---|
| 604 | u8 use_first_level:1;		/* DMA translation for the domain goes | 
|---|
| 605 | * through the first level page table, | 
|---|
| 606 | * otherwise, goes through the second | 
|---|
| 607 | * level. | 
|---|
| 608 | */ | 
|---|
| 609 | u8 dirty_tracking:1;		/* Dirty tracking is enabled */ | 
|---|
| 610 | u8 nested_parent:1;		/* Has other domains nested on it */ | 
|---|
| 611 | u8 has_mappings:1;		/* Has mappings configured through | 
|---|
| 612 | * iommu_map() interface. | 
|---|
| 613 | */ | 
|---|
| 614 | u8 iotlb_sync_map:1;		/* Need to flush IOTLB cache or write | 
|---|
| 615 | * buffer when creating mappings. | 
|---|
| 616 | */ | 
|---|
| 617 |  | 
|---|
| 618 | spinlock_t lock;		/* Protect device tracking lists */ | 
|---|
| 619 | struct list_head devices;	/* all devices' list */ | 
|---|
| 620 | struct list_head dev_pasids;	/* all attached pasids */ | 
|---|
| 621 |  | 
|---|
| 622 | spinlock_t cache_lock;		/* Protect the cache tag list */ | 
|---|
| 623 | struct list_head cache_tags;	/* Cache tag list */ | 
|---|
| 624 | struct qi_batch *qi_batch;	/* Batched QI descriptors */ | 
|---|
| 625 |  | 
|---|
| 626 | int		iommu_superpage;/* Level of superpages supported: | 
|---|
| 627 | 0 == 4KiB (no superpages), 1 == 2MiB, | 
|---|
| 628 | 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ | 
|---|
| 629 | union { | 
|---|
| 630 | /* DMA remapping domain */ | 
|---|
| 631 | struct { | 
|---|
| 632 | /* virtual address */ | 
|---|
| 633 | struct dma_pte	*pgd; | 
|---|
| 634 | /* max guest address width */ | 
|---|
| 635 | int		gaw; | 
|---|
| 636 | /* | 
|---|
| 637 | * adjusted guest address width: | 
|---|
| 638 | *   0: level 2 30-bit | 
|---|
| 639 | *   1: level 3 39-bit | 
|---|
| 640 | *   2: level 4 48-bit | 
|---|
| 641 | *   3: level 5 57-bit | 
|---|
| 642 | */ | 
|---|
| 643 | int		agaw; | 
|---|
| 644 | /* maximum mapped address */ | 
|---|
| 645 | u64		max_addr; | 
|---|
| 646 | /* Protect the s1_domains list */ | 
|---|
| 647 | spinlock_t	s1_lock; | 
|---|
| 648 | /* Track s1_domains nested on this domain */ | 
|---|
| 649 | struct list_head s1_domains; | 
|---|
| 650 | }; | 
|---|
| 651 |  | 
|---|
| 652 | /* Nested user domain */ | 
|---|
| 653 | struct { | 
|---|
| 654 | /* parent page table which the user domain is nested on */ | 
|---|
| 655 | struct dmar_domain *s2_domain; | 
|---|
| 656 | /* page table attributes */ | 
|---|
| 657 | struct iommu_hwpt_vtd_s1 s1_cfg; | 
|---|
| 658 | /* link to parent domain siblings */ | 
|---|
| 659 | struct list_head s2_link; | 
|---|
| 660 | }; | 
|---|
| 661 |  | 
|---|
| 662 | /* SVA domain */ | 
|---|
| 663 | struct { | 
|---|
| 664 | struct mmu_notifier notifier; | 
|---|
| 665 | }; | 
|---|
| 666 | }; | 
|---|
| 667 |  | 
|---|
| 668 | struct iommu_domain domain;	/* generic domain data structure for | 
|---|
| 669 | iommu core */ | 
|---|
| 670 | }; | 
|---|
| 671 |  | 
|---|
| 672 | /* | 
|---|
| 673 | * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters. | 
|---|
| 674 | * But in practice, there are only 14 counters for the existing | 
|---|
| 675 | * platform. Setting the max number of counters to 64 should be good | 
|---|
| 676 | * enough for a long time. Also, supporting more than 64 counters | 
|---|
| 677 | * requires more extras, e.g., extra freeze and overflow registers, | 
|---|
| 678 | * which is not necessary for now. | 
|---|
| 679 | */ | 
|---|
| 680 | #define IOMMU_PMU_IDX_MAX		64 | 
|---|
| 681 |  | 
|---|
| 682 | struct iommu_pmu { | 
|---|
| 683 | struct intel_iommu	*iommu; | 
|---|
| 684 | u32			num_cntr;	/* Number of counters */ | 
|---|
| 685 | u32			num_eg;		/* Number of event group */ | 
|---|
| 686 | u32			cntr_width;	/* Counter width */ | 
|---|
| 687 | u32			cntr_stride;	/* Counter Stride */ | 
|---|
| 688 | u32			filter;		/* Bitmask of filter support */ | 
|---|
| 689 | void __iomem		*base;		/* the PerfMon base address */ | 
|---|
| 690 | void __iomem		*cfg_reg;	/* counter configuration base address */ | 
|---|
| 691 | void __iomem		*cntr_reg;	/* counter 0 address*/ | 
|---|
| 692 | void __iomem		*overflow;	/* overflow status register */ | 
|---|
| 693 |  | 
|---|
| 694 | u64			*evcap;		/* Indicates all supported events */ | 
|---|
| 695 | u32			**cntr_evcap;	/* Supported events of each counter. */ | 
|---|
| 696 |  | 
|---|
| 697 | struct pmu		pmu; | 
|---|
| 698 | DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX); | 
|---|
| 699 | struct perf_event	*event_list[IOMMU_PMU_IDX_MAX]; | 
|---|
| 700 | unsigned char		irq_name[16]; | 
|---|
| 701 | }; | 
|---|
| 702 |  | 
|---|
| 703 | #define IOMMU_IRQ_ID_OFFSET_PRQ		(DMAR_UNITS_SUPPORTED) | 
|---|
| 704 | #define IOMMU_IRQ_ID_OFFSET_PERF	(2 * DMAR_UNITS_SUPPORTED) | 
|---|
| 705 |  | 
|---|
| 706 | struct intel_iommu { | 
|---|
| 707 | void __iomem	*reg; /* Pointer to hardware regs, virtual addr */ | 
|---|
| 708 | u64 		reg_phys; /* physical address of hw register set */ | 
|---|
| 709 | u64		reg_size; /* size of hw register set */ | 
|---|
| 710 | u64		cap; | 
|---|
| 711 | u64		ecap; | 
|---|
| 712 | u64		vccap; | 
|---|
| 713 | u64		ecmdcap[DMA_MAX_NUM_ECMDCAP]; | 
|---|
| 714 | u32		gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ | 
|---|
| 715 | raw_spinlock_t	register_lock; /* protect register handling */ | 
|---|
| 716 | int		seq_id;	/* sequence id of the iommu */ | 
|---|
| 717 | int		agaw; /* agaw of this iommu */ | 
|---|
| 718 | int		msagaw; /* max sagaw of this iommu */ | 
|---|
| 719 | unsigned int	irq, pr_irq, perf_irq; | 
|---|
| 720 | u16		segment;     /* PCI segment# */ | 
|---|
| 721 | unsigned char	name[16];    /* Device Name */ | 
|---|
| 722 |  | 
|---|
| 723 | #ifdef CONFIG_INTEL_IOMMU | 
|---|
| 724 | /* mutex to protect domain_ida */ | 
|---|
| 725 | struct mutex	did_lock; | 
|---|
| 726 | struct ida	domain_ida; /* domain id allocator */ | 
|---|
| 727 | unsigned long	*copied_tables; /* bitmap of copied tables */ | 
|---|
| 728 | spinlock_t	lock; /* protect context, domain ids */ | 
|---|
| 729 | struct root_entry *root_entry; /* virtual address */ | 
|---|
| 730 |  | 
|---|
| 731 | struct iommu_flush flush; | 
|---|
| 732 | #endif | 
|---|
| 733 | struct page_req_dsc *prq; | 
|---|
| 734 | unsigned char prq_name[16];    /* Name for PRQ interrupt */ | 
|---|
| 735 | unsigned long prq_seq_number; | 
|---|
| 736 | struct completion prq_complete; | 
|---|
| 737 | struct iopf_queue *iopf_queue; | 
|---|
| 738 | unsigned char iopfq_name[16]; | 
|---|
| 739 | /* Synchronization between fault report and iommu device release. */ | 
|---|
| 740 | struct mutex iopf_lock; | 
|---|
| 741 | struct q_inval  *qi;            /* Queued invalidation info */ | 
|---|
| 742 | u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/ | 
|---|
| 743 |  | 
|---|
| 744 | /* rb tree for all probed devices */ | 
|---|
| 745 | struct rb_root device_rbtree; | 
|---|
| 746 | /* protect the device_rbtree */ | 
|---|
| 747 | spinlock_t device_rbtree_lock; | 
|---|
| 748 |  | 
|---|
| 749 | #ifdef CONFIG_IRQ_REMAP | 
|---|
| 750 | struct ir_table *ir_table;	/* Interrupt remapping info */ | 
|---|
| 751 | struct irq_domain *ir_domain; | 
|---|
| 752 | #endif | 
|---|
| 753 | struct iommu_device iommu;  /* IOMMU core code handle */ | 
|---|
| 754 | int		node; | 
|---|
| 755 | u32		flags;      /* Software defined flags */ | 
|---|
| 756 |  | 
|---|
| 757 | struct dmar_drhd_unit *drhd; | 
|---|
| 758 | void *perf_statistic; | 
|---|
| 759 |  | 
|---|
| 760 | struct iommu_pmu *pmu; | 
|---|
| 761 | }; | 
|---|
| 762 |  | 
|---|
| 763 | /* PCI domain-device relationship */ | 
|---|
| 764 | struct device_domain_info { | 
|---|
| 765 | struct list_head link;	/* link to domain siblings */ | 
|---|
| 766 | u32 segment;		/* PCI segment number */ | 
|---|
| 767 | u8 bus;			/* PCI bus number */ | 
|---|
| 768 | u8 devfn;		/* PCI devfn number */ | 
|---|
| 769 | u16 pfsid;		/* SRIOV physical function source ID */ | 
|---|
| 770 | u8 pasid_supported:3; | 
|---|
| 771 | u8 pasid_enabled:1; | 
|---|
| 772 | u8 pri_supported:1; | 
|---|
| 773 | u8 pri_enabled:1; | 
|---|
| 774 | u8 ats_supported:1; | 
|---|
| 775 | u8 ats_enabled:1; | 
|---|
| 776 | u8 dtlb_extra_inval:1;	/* Quirk for devices need extra flush */ | 
|---|
| 777 | u8 domain_attached:1;	/* Device has domain attached */ | 
|---|
| 778 | u8 ats_qdep; | 
|---|
| 779 | unsigned int iopf_refcount; | 
|---|
| 780 | struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ | 
|---|
| 781 | struct intel_iommu *iommu; /* IOMMU used by this device */ | 
|---|
| 782 | struct dmar_domain *domain; /* pointer to domain */ | 
|---|
| 783 | struct pasid_table *pasid_table; /* pasid table */ | 
|---|
| 784 | /* device tracking node(lookup by PCI RID) */ | 
|---|
| 785 | struct rb_node node; | 
|---|
| 786 | #ifdef CONFIG_INTEL_IOMMU_DEBUGFS | 
|---|
| 787 | struct dentry *debugfs_dentry; /* pointer to device directory dentry */ | 
|---|
| 788 | #endif | 
|---|
| 789 | }; | 
|---|
| 790 |  | 
|---|
| 791 | struct dev_pasid_info { | 
|---|
| 792 | struct list_head link_domain;	/* link to domain siblings */ | 
|---|
| 793 | struct device *dev; | 
|---|
| 794 | ioasid_t pasid; | 
|---|
| 795 | #ifdef CONFIG_INTEL_IOMMU_DEBUGFS | 
|---|
| 796 | struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */ | 
|---|
| 797 | #endif | 
|---|
| 798 | }; | 
|---|
| 799 |  | 
|---|
| 800 | static inline void __iommu_flush_cache( | 
|---|
| 801 | struct intel_iommu *iommu, void *addr, int size) | 
|---|
| 802 | { | 
|---|
| 803 | if (!ecap_coherent(iommu->ecap)) | 
|---|
| 804 | clflush_cache_range(addr, size); | 
|---|
| 805 | } | 
|---|
| 806 |  | 
|---|
| 807 | /* Convert generic struct iommu_domain to private struct dmar_domain */ | 
|---|
| 808 | static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) | 
|---|
| 809 | { | 
|---|
| 810 | return container_of(dom, struct dmar_domain, domain); | 
|---|
| 811 | } | 
|---|
| 812 |  | 
|---|
| 813 | /* | 
|---|
| 814 | * Domain ID 0 and 1 are reserved: | 
|---|
| 815 | * | 
|---|
| 816 | * If Caching mode is set, then invalid translations are tagged | 
|---|
| 817 | * with domain-id 0, hence we need to pre-allocate it. We also | 
|---|
| 818 | * use domain-id 0 as a marker for non-allocated domain-id, so | 
|---|
| 819 | * make sure it is not used for a real domain. | 
|---|
| 820 | * | 
|---|
| 821 | * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid | 
|---|
| 822 | * entry for first-level or pass-through translation modes should | 
|---|
| 823 | * be programmed with a domain id different from those used for | 
|---|
| 824 | * second-level or nested translation. We reserve a domain id for | 
|---|
| 825 | * this purpose. This domain id is also used for identity domain | 
|---|
| 826 | * in legacy mode. | 
|---|
| 827 | */ | 
|---|
| 828 | #define FLPT_DEFAULT_DID		1 | 
|---|
| 829 | #define IDA_START_DID			2 | 
|---|
| 830 |  | 
|---|
| 831 | /* Retrieve the domain ID which has allocated to the domain */ | 
|---|
| 832 | static inline u16 | 
|---|
| 833 | domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) | 
|---|
| 834 | { | 
|---|
| 835 | struct iommu_domain_info *info = | 
|---|
| 836 | xa_load(&domain->iommu_array, index: iommu->seq_id); | 
|---|
| 837 |  | 
|---|
| 838 | return info->did; | 
|---|
| 839 | } | 
|---|
| 840 |  | 
|---|
| 841 | static inline u16 | 
|---|
| 842 | iommu_domain_did(struct iommu_domain *domain, struct intel_iommu *iommu) | 
|---|
| 843 | { | 
|---|
| 844 | if (domain->type == IOMMU_DOMAIN_SVA || | 
|---|
| 845 | domain->type == IOMMU_DOMAIN_IDENTITY) | 
|---|
| 846 | return FLPT_DEFAULT_DID; | 
|---|
| 847 | return domain_id_iommu(domain: to_dmar_domain(dom: domain), iommu); | 
|---|
| 848 | } | 
|---|
| 849 |  | 
|---|
| 850 | static inline bool dev_is_real_dma_subdevice(struct device *dev) | 
|---|
| 851 | { | 
|---|
| 852 | return dev && dev_is_pci(dev) && | 
|---|
| 853 | pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev); | 
|---|
| 854 | } | 
|---|
| 855 |  | 
|---|
| 856 | /* | 
|---|
| 857 | * 0: readable | 
|---|
| 858 | * 1: writable | 
|---|
| 859 | * 2-6: reserved | 
|---|
| 860 | * 7: super page | 
|---|
| 861 | * 8-10: available | 
|---|
| 862 | * 11: snoop behavior | 
|---|
| 863 | * 12-63: Host physical address | 
|---|
| 864 | */ | 
|---|
| 865 | struct dma_pte { | 
|---|
| 866 | u64 val; | 
|---|
| 867 | }; | 
|---|
| 868 |  | 
|---|
| 869 | static inline void dma_clear_pte(struct dma_pte *pte) | 
|---|
| 870 | { | 
|---|
| 871 | pte->val = 0; | 
|---|
| 872 | } | 
|---|
| 873 |  | 
|---|
| 874 | static inline u64 dma_pte_addr(struct dma_pte *pte) | 
|---|
| 875 | { | 
|---|
| 876 | #ifdef CONFIG_64BIT | 
|---|
| 877 | return pte->val & VTD_PAGE_MASK; | 
|---|
| 878 | #else | 
|---|
| 879 | /* Must have a full atomic 64-bit read */ | 
|---|
| 880 | return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; | 
|---|
| 881 | #endif | 
|---|
| 882 | } | 
|---|
| 883 |  | 
|---|
| 884 | static inline bool dma_pte_present(struct dma_pte *pte) | 
|---|
| 885 | { | 
|---|
| 886 | return (pte->val & 3) != 0; | 
|---|
| 887 | } | 
|---|
| 888 |  | 
|---|
| 889 | static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte, | 
|---|
| 890 | unsigned long flags) | 
|---|
| 891 | { | 
|---|
| 892 | if (flags & IOMMU_DIRTY_NO_CLEAR) | 
|---|
| 893 | return (pte->val & DMA_SL_PTE_DIRTY) != 0; | 
|---|
| 894 |  | 
|---|
| 895 | return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT, | 
|---|
| 896 | addr: (unsigned long *)&pte->val); | 
|---|
| 897 | } | 
|---|
| 898 |  | 
|---|
| 899 | static inline bool dma_pte_superpage(struct dma_pte *pte) | 
|---|
| 900 | { | 
|---|
| 901 | return (pte->val & DMA_PTE_LARGE_PAGE); | 
|---|
| 902 | } | 
|---|
| 903 |  | 
|---|
| 904 | static inline bool first_pte_in_page(struct dma_pte *pte) | 
|---|
| 905 | { | 
|---|
| 906 | return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE); | 
|---|
| 907 | } | 
|---|
| 908 |  | 
|---|
| 909 | static inline int nr_pte_to_next_page(struct dma_pte *pte) | 
|---|
| 910 | { | 
|---|
| 911 | return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) : | 
|---|
| 912 | (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; | 
|---|
| 913 | } | 
|---|
| 914 |  | 
|---|
| 915 | static inline bool context_present(struct context_entry *context) | 
|---|
| 916 | { | 
|---|
| 917 | return (context->lo & 1); | 
|---|
| 918 | } | 
|---|
| 919 |  | 
|---|
| 920 | #define LEVEL_STRIDE		(9) | 
|---|
| 921 | #define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1) | 
|---|
| 922 | #define MAX_AGAW_WIDTH		(64) | 
|---|
| 923 | #define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT) | 
|---|
| 924 |  | 
|---|
| 925 | static inline int agaw_to_level(int agaw) | 
|---|
| 926 | { | 
|---|
| 927 | return agaw + 2; | 
|---|
| 928 | } | 
|---|
| 929 |  | 
|---|
| 930 | static inline int agaw_to_width(int agaw) | 
|---|
| 931 | { | 
|---|
| 932 | return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH); | 
|---|
| 933 | } | 
|---|
| 934 |  | 
|---|
| 935 | static inline int width_to_agaw(int width) | 
|---|
| 936 | { | 
|---|
| 937 | return DIV_ROUND_UP(width - 30, LEVEL_STRIDE); | 
|---|
| 938 | } | 
|---|
| 939 |  | 
|---|
| 940 | static inline unsigned int level_to_offset_bits(int level) | 
|---|
| 941 | { | 
|---|
| 942 | return (level - 1) * LEVEL_STRIDE; | 
|---|
| 943 | } | 
|---|
| 944 |  | 
|---|
| 945 | static inline int pfn_level_offset(u64 pfn, int level) | 
|---|
| 946 | { | 
|---|
| 947 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; | 
|---|
| 948 | } | 
|---|
| 949 |  | 
|---|
| 950 | static inline u64 level_mask(int level) | 
|---|
| 951 | { | 
|---|
| 952 | return -1ULL << level_to_offset_bits(level); | 
|---|
| 953 | } | 
|---|
| 954 |  | 
|---|
| 955 | static inline u64 level_size(int level) | 
|---|
| 956 | { | 
|---|
| 957 | return 1ULL << level_to_offset_bits(level); | 
|---|
| 958 | } | 
|---|
| 959 |  | 
|---|
| 960 | static inline u64 align_to_level(u64 pfn, int level) | 
|---|
| 961 | { | 
|---|
| 962 | return (pfn + level_size(level) - 1) & level_mask(level); | 
|---|
| 963 | } | 
|---|
| 964 |  | 
|---|
| 965 | static inline unsigned long lvl_to_nr_pages(unsigned int lvl) | 
|---|
| 966 | { | 
|---|
| 967 | return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); | 
|---|
| 968 | } | 
|---|
| 969 |  | 
|---|
| 970 | static inline void context_set_present(struct context_entry *context) | 
|---|
| 971 | { | 
|---|
| 972 | context->lo |= 1; | 
|---|
| 973 | } | 
|---|
| 974 |  | 
|---|
| 975 | static inline void context_set_fault_enable(struct context_entry *context) | 
|---|
| 976 | { | 
|---|
| 977 | context->lo &= (((u64)-1) << 2) | 1; | 
|---|
| 978 | } | 
|---|
| 979 |  | 
|---|
| 980 | static inline void context_set_translation_type(struct context_entry *context, | 
|---|
| 981 | unsigned long value) | 
|---|
| 982 | { | 
|---|
| 983 | context->lo &= (((u64)-1) << 4) | 3; | 
|---|
| 984 | context->lo |= (value & 3) << 2; | 
|---|
| 985 | } | 
|---|
| 986 |  | 
|---|
| 987 | static inline void context_set_address_root(struct context_entry *context, | 
|---|
| 988 | unsigned long value) | 
|---|
| 989 | { | 
|---|
| 990 | context->lo &= ~VTD_PAGE_MASK; | 
|---|
| 991 | context->lo |= value & VTD_PAGE_MASK; | 
|---|
| 992 | } | 
|---|
| 993 |  | 
|---|
| 994 | static inline void context_set_address_width(struct context_entry *context, | 
|---|
| 995 | unsigned long value) | 
|---|
| 996 | { | 
|---|
| 997 | context->hi |= value & 7; | 
|---|
| 998 | } | 
|---|
| 999 |  | 
|---|
| 1000 | static inline void context_set_domain_id(struct context_entry *context, | 
|---|
| 1001 | unsigned long value) | 
|---|
| 1002 | { | 
|---|
| 1003 | context->hi |= (value & ((1 << 16) - 1)) << 8; | 
|---|
| 1004 | } | 
|---|
| 1005 |  | 
|---|
| 1006 | static inline void context_set_pasid(struct context_entry *context) | 
|---|
| 1007 | { | 
|---|
| 1008 | context->lo |= CONTEXT_PASIDE; | 
|---|
| 1009 | } | 
|---|
| 1010 |  | 
|---|
| 1011 | static inline int context_domain_id(struct context_entry *c) | 
|---|
| 1012 | { | 
|---|
| 1013 | return((c->hi >> 8) & 0xffff); | 
|---|
| 1014 | } | 
|---|
| 1015 |  | 
|---|
| 1016 | static inline void context_clear_entry(struct context_entry *context) | 
|---|
| 1017 | { | 
|---|
| 1018 | context->lo = 0; | 
|---|
| 1019 | context->hi = 0; | 
|---|
| 1020 | } | 
|---|
| 1021 |  | 
|---|
| 1022 | #ifdef CONFIG_INTEL_IOMMU | 
|---|
| 1023 | static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) | 
|---|
| 1024 | { | 
|---|
| 1025 | if (!iommu->copied_tables) | 
|---|
| 1026 | return false; | 
|---|
| 1027 |  | 
|---|
| 1028 | return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); | 
|---|
| 1029 | } | 
|---|
| 1030 |  | 
|---|
| 1031 | static inline void | 
|---|
| 1032 | set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) | 
|---|
| 1033 | { | 
|---|
| 1034 | set_bit(nr: ((long)bus << 8) | devfn, addr: iommu->copied_tables); | 
|---|
| 1035 | } | 
|---|
| 1036 |  | 
|---|
| 1037 | static inline void | 
|---|
| 1038 | clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) | 
|---|
| 1039 | { | 
|---|
| 1040 | clear_bit(nr: ((long)bus << 8) | devfn, addr: iommu->copied_tables); | 
|---|
| 1041 | } | 
|---|
| 1042 | #endif /* CONFIG_INTEL_IOMMU */ | 
|---|
| 1043 |  | 
|---|
| 1044 | /* | 
|---|
| 1045 | * Set the RID_PASID field of a scalable mode context entry. The | 
|---|
| 1046 | * IOMMU hardware will use the PASID value set in this field for | 
|---|
| 1047 | * DMA translations of DMA requests without PASID. | 
|---|
| 1048 | */ | 
|---|
| 1049 | static inline void | 
|---|
| 1050 | context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid) | 
|---|
| 1051 | { | 
|---|
| 1052 | context->hi |= pasid & ((1 << 20) - 1); | 
|---|
| 1053 | } | 
|---|
| 1054 |  | 
|---|
| 1055 | /* | 
|---|
| 1056 | * Set the DTE(Device-TLB Enable) field of a scalable mode context | 
|---|
| 1057 | * entry. | 
|---|
| 1058 | */ | 
|---|
| 1059 | static inline void context_set_sm_dte(struct context_entry *context) | 
|---|
| 1060 | { | 
|---|
| 1061 | context->lo |= BIT_ULL(2); | 
|---|
| 1062 | } | 
|---|
| 1063 |  | 
|---|
| 1064 | /* | 
|---|
| 1065 | * Set the PRE(Page Request Enable) field of a scalable mode context | 
|---|
| 1066 | * entry. | 
|---|
| 1067 | */ | 
|---|
| 1068 | static inline void context_set_sm_pre(struct context_entry *context) | 
|---|
| 1069 | { | 
|---|
| 1070 | context->lo |= BIT_ULL(4); | 
|---|
| 1071 | } | 
|---|
| 1072 |  | 
|---|
| 1073 | /* | 
|---|
| 1074 | * Clear the PRE(Page Request Enable) field of a scalable mode context | 
|---|
| 1075 | * entry. | 
|---|
| 1076 | */ | 
|---|
| 1077 | static inline void context_clear_sm_pre(struct context_entry *context) | 
|---|
| 1078 | { | 
|---|
| 1079 | context->lo &= ~BIT_ULL(4); | 
|---|
| 1080 | } | 
|---|
| 1081 |  | 
|---|
| 1082 | /* Returns a number of VTD pages, but aligned to MM page size */ | 
|---|
| 1083 | static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size) | 
|---|
| 1084 | { | 
|---|
| 1085 | host_addr &= ~PAGE_MASK; | 
|---|
| 1086 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | 
|---|
| 1087 | } | 
|---|
| 1088 |  | 
|---|
| 1089 | /* Return a size from number of VTD pages. */ | 
|---|
| 1090 | static inline unsigned long nrpages_to_size(unsigned long npages) | 
|---|
| 1091 | { | 
|---|
| 1092 | return npages << VTD_PAGE_SHIFT; | 
|---|
| 1093 | } | 
|---|
| 1094 |  | 
|---|
| 1095 | static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 
|---|
| 1096 | unsigned int size_order, u64 type, | 
|---|
| 1097 | struct qi_desc *desc) | 
|---|
| 1098 | { | 
|---|
| 1099 | u8 dw = 0, dr = 0; | 
|---|
| 1100 | int ih = 0; | 
|---|
| 1101 |  | 
|---|
| 1102 | if (cap_write_drain(iommu->cap)) | 
|---|
| 1103 | dw = 1; | 
|---|
| 1104 |  | 
|---|
| 1105 | if (cap_read_drain(iommu->cap)) | 
|---|
| 1106 | dr = 1; | 
|---|
| 1107 |  | 
|---|
| 1108 | desc->qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | 
|---|
| 1109 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | 
|---|
| 1110 | desc->qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | 
|---|
| 1111 | | QI_IOTLB_AM(size_order); | 
|---|
| 1112 | desc->qw2 = 0; | 
|---|
| 1113 | desc->qw3 = 0; | 
|---|
| 1114 | } | 
|---|
| 1115 |  | 
|---|
| 1116 | static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr, | 
|---|
| 1117 | unsigned int mask, struct qi_desc *desc) | 
|---|
| 1118 | { | 
|---|
| 1119 | if (mask) { | 
|---|
| 1120 | addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; | 
|---|
| 1121 | desc->qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; | 
|---|
| 1122 | } else { | 
|---|
| 1123 | desc->qw1 = QI_DEV_IOTLB_ADDR(addr); | 
|---|
| 1124 | } | 
|---|
| 1125 |  | 
|---|
| 1126 | if (qdep >= QI_DEV_IOTLB_MAX_INVS) | 
|---|
| 1127 | qdep = 0; | 
|---|
| 1128 |  | 
|---|
| 1129 | desc->qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | | 
|---|
| 1130 | QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); | 
|---|
| 1131 | desc->qw2 = 0; | 
|---|
| 1132 | desc->qw3 = 0; | 
|---|
| 1133 | } | 
|---|
| 1134 |  | 
|---|
| 1135 | static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr, | 
|---|
| 1136 | unsigned long npages, bool ih, | 
|---|
| 1137 | struct qi_desc *desc) | 
|---|
| 1138 | { | 
|---|
| 1139 | if (npages == -1) { | 
|---|
| 1140 | desc->qw0 = QI_EIOTLB_PASID(pasid) | | 
|---|
| 1141 | QI_EIOTLB_DID(did) | | 
|---|
| 1142 | QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | | 
|---|
| 1143 | QI_EIOTLB_TYPE; | 
|---|
| 1144 | desc->qw1 = 0; | 
|---|
| 1145 | } else { | 
|---|
| 1146 | int mask = ilog2(__roundup_pow_of_two(npages)); | 
|---|
| 1147 | unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); | 
|---|
| 1148 |  | 
|---|
| 1149 | if (WARN_ON_ONCE(!IS_ALIGNED(addr, align))) | 
|---|
| 1150 | addr = ALIGN_DOWN(addr, align); | 
|---|
| 1151 |  | 
|---|
| 1152 | desc->qw0 = QI_EIOTLB_PASID(pasid) | | 
|---|
| 1153 | QI_EIOTLB_DID(did) | | 
|---|
| 1154 | QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | | 
|---|
| 1155 | QI_EIOTLB_TYPE; | 
|---|
| 1156 | desc->qw1 = QI_EIOTLB_ADDR(addr) | | 
|---|
| 1157 | QI_EIOTLB_IH(ih) | | 
|---|
| 1158 | QI_EIOTLB_AM(mask); | 
|---|
| 1159 | } | 
|---|
| 1160 | } | 
|---|
| 1161 |  | 
|---|
| 1162 | static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid, | 
|---|
| 1163 | u16 qdep, u64 addr, | 
|---|
| 1164 | unsigned int size_order, | 
|---|
| 1165 | struct qi_desc *desc) | 
|---|
| 1166 | { | 
|---|
| 1167 | unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); | 
|---|
| 1168 |  | 
|---|
| 1169 | desc->qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | | 
|---|
| 1170 | QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | | 
|---|
| 1171 | QI_DEV_IOTLB_PFSID(pfsid); | 
|---|
| 1172 |  | 
|---|
| 1173 | /* | 
|---|
| 1174 | * If S bit is 0, we only flush a single page. If S bit is set, | 
|---|
| 1175 | * The least significant zero bit indicates the invalidation address | 
|---|
| 1176 | * range. VT-d spec 6.5.2.6. | 
|---|
| 1177 | * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB. | 
|---|
| 1178 | * size order = 0 is PAGE_SIZE 4KB | 
|---|
| 1179 | * Max Invs Pending (MIP) is set to 0 for now until we have DIT in | 
|---|
| 1180 | * ECAP. | 
|---|
| 1181 | */ | 
|---|
| 1182 | if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order)) | 
|---|
| 1183 | pr_warn_ratelimited( "Invalidate non-aligned address %llx, order %d\n", | 
|---|
| 1184 | addr, size_order); | 
|---|
| 1185 |  | 
|---|
| 1186 | /* Take page address */ | 
|---|
| 1187 | desc->qw1 = QI_DEV_EIOTLB_ADDR(addr); | 
|---|
| 1188 |  | 
|---|
| 1189 | if (size_order) { | 
|---|
| 1190 | /* | 
|---|
| 1191 | * Existing 0s in address below size_order may be the least | 
|---|
| 1192 | * significant bit, we must set them to 1s to avoid having | 
|---|
| 1193 | * smaller size than desired. | 
|---|
| 1194 | */ | 
|---|
| 1195 | desc->qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1, | 
|---|
| 1196 | VTD_PAGE_SHIFT); | 
|---|
| 1197 | /* Clear size_order bit to indicate size */ | 
|---|
| 1198 | desc->qw1 &= ~mask; | 
|---|
| 1199 | /* Set the S bit to indicate flushing more than 1 page */ | 
|---|
| 1200 | desc->qw1 |= QI_DEV_EIOTLB_SIZE; | 
|---|
| 1201 | } | 
|---|
| 1202 | } | 
|---|
| 1203 |  | 
|---|
| 1204 | /* Convert value to context PASID directory size field coding. */ | 
|---|
| 1205 | #define context_pdts(pds)	(((pds) & 0x7) << 9) | 
|---|
| 1206 |  | 
|---|
| 1207 | struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev); | 
|---|
| 1208 |  | 
|---|
| 1209 | int dmar_enable_qi(struct intel_iommu *iommu); | 
|---|
| 1210 | void dmar_disable_qi(struct intel_iommu *iommu); | 
|---|
| 1211 | int dmar_reenable_qi(struct intel_iommu *iommu); | 
|---|
| 1212 | void qi_global_iec(struct intel_iommu *iommu); | 
|---|
| 1213 |  | 
|---|
| 1214 | void qi_flush_context(struct intel_iommu *iommu, u16 did, | 
|---|
| 1215 | u16 sid, u8 fm, u64 type); | 
|---|
| 1216 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 
|---|
| 1217 | unsigned int size_order, u64 type); | 
|---|
| 1218 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, | 
|---|
| 1219 | u16 qdep, u64 addr, unsigned mask); | 
|---|
| 1220 |  | 
|---|
| 1221 | void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, | 
|---|
| 1222 | unsigned long npages, bool ih); | 
|---|
| 1223 |  | 
|---|
| 1224 | void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, | 
|---|
| 1225 | u32 pasid, u16 qdep, u64 addr, | 
|---|
| 1226 | unsigned int size_order); | 
|---|
| 1227 | void (struct device_domain_info *info, | 
|---|
| 1228 | unsigned long address, unsigned long pages, | 
|---|
| 1229 | u32 pasid, u16 qdep); | 
|---|
| 1230 | void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, | 
|---|
| 1231 | u32 pasid); | 
|---|
| 1232 |  | 
|---|
| 1233 | int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, | 
|---|
| 1234 | unsigned int count, unsigned long options); | 
|---|
| 1235 |  | 
|---|
| 1236 | void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 
|---|
| 1237 | unsigned int size_order, u64 type); | 
|---|
| 1238 | /* | 
|---|
| 1239 | * Options used in qi_submit_sync: | 
|---|
| 1240 | * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. | 
|---|
| 1241 | */ | 
|---|
| 1242 | #define QI_OPT_WAIT_DRAIN		BIT(0) | 
|---|
| 1243 |  | 
|---|
| 1244 | int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); | 
|---|
| 1245 | void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); | 
|---|
| 1246 | void device_block_translation(struct device *dev); | 
|---|
| 1247 | int paging_domain_compatible(struct iommu_domain *domain, struct device *dev); | 
|---|
| 1248 |  | 
|---|
| 1249 | struct dev_pasid_info * | 
|---|
| 1250 | domain_add_dev_pasid(struct iommu_domain *domain, | 
|---|
| 1251 | struct device *dev, ioasid_t pasid); | 
|---|
| 1252 | void domain_remove_dev_pasid(struct iommu_domain *domain, | 
|---|
| 1253 | struct device *dev, ioasid_t pasid); | 
|---|
| 1254 |  | 
|---|
| 1255 | int __domain_setup_first_level(struct intel_iommu *iommu, struct device *dev, | 
|---|
| 1256 | ioasid_t pasid, u16 did, phys_addr_t fsptptr, | 
|---|
| 1257 | int flags, struct iommu_domain *old); | 
|---|
| 1258 |  | 
|---|
| 1259 | int dmar_ir_support(void); | 
|---|
| 1260 |  | 
|---|
| 1261 | void iommu_flush_write_buffer(struct intel_iommu *iommu); | 
|---|
| 1262 | struct iommu_domain * | 
|---|
| 1263 | intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, | 
|---|
| 1264 | u32 flags, | 
|---|
| 1265 | const struct iommu_user_data *user_data); | 
|---|
| 1266 | struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid); | 
|---|
| 1267 |  | 
|---|
| 1268 | enum cache_tag_type { | 
|---|
| 1269 | CACHE_TAG_IOTLB, | 
|---|
| 1270 | CACHE_TAG_DEVTLB, | 
|---|
| 1271 | CACHE_TAG_NESTING_IOTLB, | 
|---|
| 1272 | CACHE_TAG_NESTING_DEVTLB, | 
|---|
| 1273 | }; | 
|---|
| 1274 |  | 
|---|
| 1275 | struct cache_tag { | 
|---|
| 1276 | struct list_head node; | 
|---|
| 1277 | enum cache_tag_type type; | 
|---|
| 1278 | struct intel_iommu *iommu; | 
|---|
| 1279 | /* | 
|---|
| 1280 | * The @dev field represents the location of the cache. For IOTLB, it | 
|---|
| 1281 | * resides on the IOMMU hardware. @dev stores the device pointer to | 
|---|
| 1282 | * the IOMMU hardware. For DevTLB, it locates in the PCIe endpoint. | 
|---|
| 1283 | * @dev stores the device pointer to that endpoint. | 
|---|
| 1284 | */ | 
|---|
| 1285 | struct device *dev; | 
|---|
| 1286 | u16 domain_id; | 
|---|
| 1287 | ioasid_t pasid; | 
|---|
| 1288 | unsigned int users; | 
|---|
| 1289 | }; | 
|---|
| 1290 |  | 
|---|
| 1291 | int cache_tag_assign(struct dmar_domain *domain, u16 did, struct device *dev, | 
|---|
| 1292 | ioasid_t pasid, enum cache_tag_type type); | 
|---|
| 1293 | int cache_tag_assign_domain(struct dmar_domain *domain, | 
|---|
| 1294 | struct device *dev, ioasid_t pasid); | 
|---|
| 1295 | void cache_tag_unassign_domain(struct dmar_domain *domain, | 
|---|
| 1296 | struct device *dev, ioasid_t pasid); | 
|---|
| 1297 | void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, | 
|---|
| 1298 | unsigned long end, int ih); | 
|---|
| 1299 | void cache_tag_flush_all(struct dmar_domain *domain); | 
|---|
| 1300 | void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, | 
|---|
| 1301 | unsigned long end); | 
|---|
| 1302 |  | 
|---|
| 1303 | void intel_context_flush_no_pasid(struct device_domain_info *info, | 
|---|
| 1304 | struct context_entry *context, u16 did); | 
|---|
| 1305 |  | 
|---|
| 1306 | int intel_iommu_enable_prq(struct intel_iommu *iommu); | 
|---|
| 1307 | int intel_iommu_finish_prq(struct intel_iommu *iommu); | 
|---|
| 1308 | void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt, | 
|---|
| 1309 | struct iommu_page_response *msg); | 
|---|
| 1310 | void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid); | 
|---|
| 1311 |  | 
|---|
| 1312 | int intel_iommu_enable_iopf(struct device *dev); | 
|---|
| 1313 | void intel_iommu_disable_iopf(struct device *dev); | 
|---|
| 1314 |  | 
|---|
| 1315 | static inline int iopf_for_domain_set(struct iommu_domain *domain, | 
|---|
| 1316 | struct device *dev) | 
|---|
| 1317 | { | 
|---|
| 1318 | if (!domain || !domain->iopf_handler) | 
|---|
| 1319 | return 0; | 
|---|
| 1320 |  | 
|---|
| 1321 | return intel_iommu_enable_iopf(dev); | 
|---|
| 1322 | } | 
|---|
| 1323 |  | 
|---|
| 1324 | static inline void iopf_for_domain_remove(struct iommu_domain *domain, | 
|---|
| 1325 | struct device *dev) | 
|---|
| 1326 | { | 
|---|
| 1327 | if (!domain || !domain->iopf_handler) | 
|---|
| 1328 | return; | 
|---|
| 1329 |  | 
|---|
| 1330 | intel_iommu_disable_iopf(dev); | 
|---|
| 1331 | } | 
|---|
| 1332 |  | 
|---|
| 1333 | static inline int iopf_for_domain_replace(struct iommu_domain *new, | 
|---|
| 1334 | struct iommu_domain *old, | 
|---|
| 1335 | struct device *dev) | 
|---|
| 1336 | { | 
|---|
| 1337 | int ret; | 
|---|
| 1338 |  | 
|---|
| 1339 | ret = iopf_for_domain_set(domain: new, dev); | 
|---|
| 1340 | if (ret) | 
|---|
| 1341 | return ret; | 
|---|
| 1342 |  | 
|---|
| 1343 | iopf_for_domain_remove(domain: old, dev); | 
|---|
| 1344 |  | 
|---|
| 1345 | return 0; | 
|---|
| 1346 | } | 
|---|
| 1347 |  | 
|---|
| 1348 | #ifdef CONFIG_INTEL_IOMMU_SVM | 
|---|
| 1349 | void intel_svm_check(struct intel_iommu *iommu); | 
|---|
| 1350 | struct iommu_domain *intel_svm_domain_alloc(struct device *dev, | 
|---|
| 1351 | struct mm_struct *mm); | 
|---|
| 1352 | #else | 
|---|
| 1353 | static inline void intel_svm_check(struct intel_iommu *iommu) {} | 
|---|
| 1354 | static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev, | 
|---|
| 1355 | struct mm_struct *mm) | 
|---|
| 1356 | { | 
|---|
| 1357 | return ERR_PTR(error: -ENODEV); | 
|---|
| 1358 | } | 
|---|
| 1359 | #endif | 
|---|
| 1360 |  | 
|---|
| 1361 | #ifdef CONFIG_INTEL_IOMMU_DEBUGFS | 
|---|
| 1362 | void intel_iommu_debugfs_init(void); | 
|---|
| 1363 | void intel_iommu_debugfs_create_dev(struct device_domain_info *info); | 
|---|
| 1364 | void intel_iommu_debugfs_remove_dev(struct device_domain_info *info); | 
|---|
| 1365 | void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid); | 
|---|
| 1366 | void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid); | 
|---|
| 1367 | #else | 
|---|
| 1368 | static inline void intel_iommu_debugfs_init(void) {} | 
|---|
| 1369 | static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {} | 
|---|
| 1370 | static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {} | 
|---|
| 1371 | static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {} | 
|---|
| 1372 | static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {} | 
|---|
| 1373 | #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ | 
|---|
| 1374 |  | 
|---|
| 1375 | extern const struct attribute_group *intel_iommu_groups[]; | 
|---|
| 1376 | struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, | 
|---|
| 1377 | u8 devfn, int alloc); | 
|---|
| 1378 |  | 
|---|
| 1379 | extern const struct iommu_ops intel_iommu_ops; | 
|---|
| 1380 | extern const struct iommu_domain_ops intel_fs_paging_domain_ops; | 
|---|
| 1381 | extern const struct iommu_domain_ops intel_ss_paging_domain_ops; | 
|---|
| 1382 |  | 
|---|
| 1383 | static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain) | 
|---|
| 1384 | { | 
|---|
| 1385 | return domain->domain.ops == &intel_fs_paging_domain_ops; | 
|---|
| 1386 | } | 
|---|
| 1387 |  | 
|---|
| 1388 | static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain) | 
|---|
| 1389 | { | 
|---|
| 1390 | return domain->domain.ops == &intel_ss_paging_domain_ops; | 
|---|
| 1391 | } | 
|---|
| 1392 |  | 
|---|
| 1393 | #ifdef CONFIG_INTEL_IOMMU | 
|---|
| 1394 | extern int intel_iommu_sm; | 
|---|
| 1395 | int iommu_calculate_agaw(struct intel_iommu *iommu); | 
|---|
| 1396 | int iommu_calculate_max_sagaw(struct intel_iommu *iommu); | 
|---|
| 1397 | int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob); | 
|---|
| 1398 |  | 
|---|
| 1399 | static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu) | 
|---|
| 1400 | { | 
|---|
| 1401 | return (iommu->ecmdcap[DMA_ECMD_ECCAP3] & DMA_ECMD_ECCAP3_ESSENTIAL) == | 
|---|
| 1402 | DMA_ECMD_ECCAP3_ESSENTIAL; | 
|---|
| 1403 | } | 
|---|
| 1404 |  | 
|---|
| 1405 | extern int dmar_disabled; | 
|---|
| 1406 | extern int intel_iommu_enabled; | 
|---|
| 1407 | #else | 
|---|
| 1408 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) | 
|---|
| 1409 | { | 
|---|
| 1410 | return 0; | 
|---|
| 1411 | } | 
|---|
| 1412 | static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) | 
|---|
| 1413 | { | 
|---|
| 1414 | return 0; | 
|---|
| 1415 | } | 
|---|
| 1416 | #define dmar_disabled	(1) | 
|---|
| 1417 | #define intel_iommu_enabled (0) | 
|---|
| 1418 | #define intel_iommu_sm (0) | 
|---|
| 1419 | #endif | 
|---|
| 1420 |  | 
|---|
| 1421 | static inline const char *decode_prq_descriptor(char *str, size_t size, | 
|---|
| 1422 | u64 dw0, u64 dw1, u64 dw2, u64 dw3) | 
|---|
| 1423 | { | 
|---|
| 1424 | char *buf = str; | 
|---|
| 1425 | int bytes; | 
|---|
| 1426 |  | 
|---|
| 1427 | bytes = snprintf(buf, size, | 
|---|
| 1428 | fmt: "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx", | 
|---|
| 1429 | FIELD_GET(GENMASK_ULL(31, 16), dw0), | 
|---|
| 1430 | FIELD_GET(GENMASK_ULL(63, 12), dw1), | 
|---|
| 1431 | dw1 & BIT_ULL(0) ? 'r' : '-', | 
|---|
| 1432 | dw1 & BIT_ULL(1) ? 'w' : '-', | 
|---|
| 1433 | dw0 & BIT_ULL(52) ? 'x' : '-', | 
|---|
| 1434 | dw0 & BIT_ULL(53) ? 'p' : '-', | 
|---|
| 1435 | dw1 & BIT_ULL(2) ? 'l' : '-', | 
|---|
| 1436 | FIELD_GET(GENMASK_ULL(51, 32), dw0), | 
|---|
| 1437 | FIELD_GET(GENMASK_ULL(11, 3), dw1)); | 
|---|
| 1438 |  | 
|---|
| 1439 | /* Private Data */ | 
|---|
| 1440 | if (dw0 & BIT_ULL(9)) { | 
|---|
| 1441 | size -= bytes; | 
|---|
| 1442 | buf += bytes; | 
|---|
| 1443 | snprintf(buf, size, fmt: " private=0x%llx/0x%llx\n", dw2, dw3); | 
|---|
| 1444 | } | 
|---|
| 1445 |  | 
|---|
| 1446 | return str; | 
|---|
| 1447 | } | 
|---|
| 1448 |  | 
|---|
| 1449 | #endif | 
|---|
| 1450 |  | 
|---|