| 1 | /* SPDX-License-Identifier: MIT */ | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2020 Intel Corporation | 
|---|
| 4 | * | 
|---|
| 5 | * Please try to maintain the following order within this file unless it makes | 
|---|
| 6 | * sense to do otherwise. From top to bottom: | 
|---|
| 7 | * 1. typedefs | 
|---|
| 8 | * 2. #defines, and macros | 
|---|
| 9 | * 3. structure definitions | 
|---|
| 10 | * 4. function prototypes | 
|---|
| 11 | * | 
|---|
| 12 | * Within each section, please try to order by generation in ascending order, | 
|---|
| 13 | * from top to bottom (ie. gen6 on the top, gen8 on the bottom). | 
|---|
| 14 | */ | 
|---|
| 15 |  | 
|---|
| 16 | #ifndef __INTEL_GTT_H__ | 
|---|
| 17 | #define __INTEL_GTT_H__ | 
|---|
| 18 |  | 
|---|
| 19 | #include <linux/io-mapping.h> | 
|---|
| 20 | #include <linux/kref.h> | 
|---|
| 21 | #include <linux/mm.h> | 
|---|
| 22 | #include <linux/pagevec.h> | 
|---|
| 23 | #include <linux/scatterlist.h> | 
|---|
| 24 | #include <linux/workqueue.h> | 
|---|
| 25 |  | 
|---|
| 26 | #include <drm/drm_mm.h> | 
|---|
| 27 |  | 
|---|
| 28 | #include "gt/intel_reset.h" | 
|---|
| 29 | #include "i915_selftest.h" | 
|---|
| 30 | #include "i915_vma_resource.h" | 
|---|
| 31 | #include "i915_vma_types.h" | 
|---|
| 32 | #include "i915_params.h" | 
|---|
| 33 | #include "intel_memory_region.h" | 
|---|
| 34 |  | 
|---|
| 35 | #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) | 
|---|
| 36 |  | 
|---|
| 37 | #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) | 
|---|
| 38 | #define GTT_TRACE(...) trace_printk(__VA_ARGS__) | 
|---|
| 39 | #else | 
|---|
| 40 | #define GTT_TRACE(...) | 
|---|
| 41 | #endif | 
|---|
| 42 |  | 
|---|
| 43 | #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ | 
|---|
| 44 |  | 
|---|
| 45 | #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12) | 
|---|
| 46 | #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16) | 
|---|
| 47 | #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21) | 
|---|
| 48 |  | 
|---|
| 49 | #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K | 
|---|
| 50 | #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M | 
|---|
| 51 |  | 
|---|
| 52 | #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE | 
|---|
| 53 |  | 
|---|
| 54 | #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE | 
|---|
| 55 |  | 
|---|
| 56 | #define I915_FENCE_REG_NONE -1 | 
|---|
| 57 | #define I915_MAX_NUM_FENCES 32 | 
|---|
| 58 | /* 32 fences + sign bit for FENCE_REG_NONE */ | 
|---|
| 59 | #define I915_MAX_NUM_FENCE_BITS 6 | 
|---|
| 60 |  | 
|---|
| 61 | typedef u32 gen6_pte_t; | 
|---|
| 62 | typedef u64 gen8_pte_t; | 
|---|
| 63 |  | 
|---|
| 64 | #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) | 
|---|
| 65 |  | 
|---|
| 66 | #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len))) | 
|---|
| 67 | #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1) | 
|---|
| 68 | #define I915_PDES			512 | 
|---|
| 69 | #define I915_PDE_MASK			(I915_PDES - 1) | 
|---|
| 70 |  | 
|---|
| 71 | /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ | 
|---|
| 72 | #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0)) | 
|---|
| 73 | #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr) | 
|---|
| 74 | #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr) | 
|---|
| 75 | #define GEN6_PTE_CACHE_LLC		(2 << 1) | 
|---|
| 76 | #define GEN6_PTE_UNCACHED		(1 << 1) | 
|---|
| 77 | #define GEN6_PTE_VALID			REG_BIT(0) | 
|---|
| 78 |  | 
|---|
| 79 | #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t)) | 
|---|
| 80 | #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE) | 
|---|
| 81 | #define GEN6_PD_ALIGN			(PAGE_SIZE * 16) | 
|---|
| 82 | #define GEN6_PDE_SHIFT			22 | 
|---|
| 83 | #define GEN6_PDE_VALID			REG_BIT(0) | 
|---|
| 84 | #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT)) | 
|---|
| 85 |  | 
|---|
| 86 | #define GEN7_PTE_CACHE_L3_LLC		(3 << 1) | 
|---|
| 87 |  | 
|---|
| 88 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2) | 
|---|
| 89 | #define BYT_PTE_WRITEABLE		REG_BIT(1) | 
|---|
| 90 |  | 
|---|
| 91 | #define MTL_PPGTT_PTE_PAT3	BIT_ULL(62) | 
|---|
| 92 | #define GEN12_PPGTT_PTE_LM	BIT_ULL(11) | 
|---|
| 93 | #define GEN12_PPGTT_PTE_PAT2	BIT_ULL(7) | 
|---|
| 94 | #define GEN12_PPGTT_PTE_PAT1	BIT_ULL(4) | 
|---|
| 95 | #define GEN12_PPGTT_PTE_PAT0	BIT_ULL(3) | 
|---|
| 96 |  | 
|---|
| 97 | #define GEN12_GGTT_PTE_LM		BIT_ULL(1) | 
|---|
| 98 | #define MTL_GGTT_PTE_PAT0		BIT_ULL(52) | 
|---|
| 99 | #define MTL_GGTT_PTE_PAT1		BIT_ULL(53) | 
|---|
| 100 | #define GEN12_GGTT_PTE_ADDR_MASK	GENMASK_ULL(45, 12) | 
|---|
| 101 | #define MTL_GGTT_PTE_PAT_MASK		GENMASK_ULL(53, 52) | 
|---|
| 102 |  | 
|---|
| 103 | #define GEN12_PDE_64K BIT(6) | 
|---|
| 104 | #define GEN12_PTE_PS64 BIT(8) | 
|---|
| 105 |  | 
|---|
| 106 | /* | 
|---|
| 107 | * Cacheability Control is a 4-bit value. The low three bits are stored in bits | 
|---|
| 108 | * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. | 
|---|
| 109 | */ | 
|---|
| 110 | #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \ | 
|---|
| 111 | (((bits) & 0x8) << (11 - 3))) | 
|---|
| 112 | #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2) | 
|---|
| 113 | #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3) | 
|---|
| 114 | #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8) | 
|---|
| 115 | #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb) | 
|---|
| 116 | #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7) | 
|---|
| 117 | #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6) | 
|---|
| 118 | #define HSW_PTE_UNCACHED		(0) | 
|---|
| 119 | #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0)) | 
|---|
| 120 | #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr) | 
|---|
| 121 |  | 
|---|
| 122 | /* | 
|---|
| 123 | * GEN8 32b style address is defined as a 3 level page table: | 
|---|
| 124 | * 31:30 | 29:21 | 20:12 |  11:0 | 
|---|
| 125 | * PDPE  |  PDE  |  PTE  | offset | 
|---|
| 126 | * The difference as compared to normal x86 3 level page table is the PDPEs are | 
|---|
| 127 | * programmed via register. | 
|---|
| 128 | * | 
|---|
| 129 | * GEN8 48b style address is defined as a 4 level page table: | 
|---|
| 130 | * 47:39 | 38:30 | 29:21 | 20:12 |  11:0 | 
|---|
| 131 | * PML4E | PDPE  |  PDE  |  PTE  | offset | 
|---|
| 132 | */ | 
|---|
| 133 | #define GEN8_3LVL_PDPES			4 | 
|---|
| 134 |  | 
|---|
| 135 | #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD) | 
|---|
| 136 | #define PPAT_CACHED_PDE			0 /* WB LLC */ | 
|---|
| 137 | #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */ | 
|---|
| 138 | #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */ | 
|---|
| 139 |  | 
|---|
| 140 | #define CHV_PPAT_SNOOP			REG_BIT(6) | 
|---|
| 141 | #define GEN8_PPAT_AGE(x)		((x)<<4) | 
|---|
| 142 | #define GEN8_PPAT_LLCeLLC		(3<<2) | 
|---|
| 143 | #define GEN8_PPAT_LLCELLC		(2<<2) | 
|---|
| 144 | #define GEN8_PPAT_LLC			(1<<2) | 
|---|
| 145 | #define GEN8_PPAT_WB			(3<<0) | 
|---|
| 146 | #define GEN8_PPAT_WT			(2<<0) | 
|---|
| 147 | #define GEN8_PPAT_WC			(1<<0) | 
|---|
| 148 | #define GEN8_PPAT_UC			(0<<0) | 
|---|
| 149 | #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2) | 
|---|
| 150 | #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8)) | 
|---|
| 151 |  | 
|---|
| 152 | #define GEN8_PAGE_PRESENT		BIT_ULL(0) | 
|---|
| 153 | #define GEN8_PAGE_RW			BIT_ULL(1) | 
|---|
| 154 |  | 
|---|
| 155 | #define GEN8_PDE_IPS_64K BIT(11) | 
|---|
| 156 | #define GEN8_PDE_PS_2M   BIT(7) | 
|---|
| 157 |  | 
|---|
| 158 | #define MTL_PPAT_L4_CACHE_POLICY_MASK	REG_GENMASK(3, 2) | 
|---|
| 159 | #define MTL_PAT_INDEX_COH_MODE_MASK	REG_GENMASK(1, 0) | 
|---|
| 160 | #define MTL_PPAT_L4_3_UC	REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3) | 
|---|
| 161 | #define MTL_PPAT_L4_1_WT	REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1) | 
|---|
| 162 | #define MTL_PPAT_L4_0_WB	REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0) | 
|---|
| 163 | #define MTL_3_COH_2W	REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3) | 
|---|
| 164 | #define MTL_2_COH_1W	REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2) | 
|---|
| 165 |  | 
|---|
| 166 | struct drm_i915_gem_object; | 
|---|
| 167 | struct i915_fence_reg; | 
|---|
| 168 | struct i915_vma; | 
|---|
| 169 | struct intel_gt; | 
|---|
| 170 |  | 
|---|
| 171 | #define for_each_sgt_daddr(__dp, __iter, __sgt) \ | 
|---|
| 172 | __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) | 
|---|
| 173 |  | 
|---|
| 174 | #define for_each_sgt_daddr_next(__dp, __iter) \ | 
|---|
| 175 | __for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE) | 
|---|
| 176 |  | 
|---|
| 177 | struct i915_page_table { | 
|---|
| 178 | struct drm_i915_gem_object *base; | 
|---|
| 179 | union { | 
|---|
| 180 | atomic_t used; | 
|---|
| 181 | struct i915_page_table *stash; | 
|---|
| 182 | }; | 
|---|
| 183 | bool is_compact; | 
|---|
| 184 | }; | 
|---|
| 185 |  | 
|---|
| 186 | struct i915_page_directory { | 
|---|
| 187 | struct i915_page_table pt; | 
|---|
| 188 | spinlock_t lock; | 
|---|
| 189 | void **entry; | 
|---|
| 190 | }; | 
|---|
| 191 |  | 
|---|
| 192 | #define __px_choose_expr(x, type, expr, other) \ | 
|---|
| 193 | __builtin_choose_expr( \ | 
|---|
| 194 | __builtin_types_compatible_p(typeof(x), type) || \ | 
|---|
| 195 | __builtin_types_compatible_p(typeof(x), const type), \ | 
|---|
| 196 | ({ type __x = (type)(x); expr; }), \ | 
|---|
| 197 | other) | 
|---|
| 198 |  | 
|---|
| 199 | #define px_base(px) \ | 
|---|
| 200 | __px_choose_expr(px, struct drm_i915_gem_object *, __x, \ | 
|---|
| 201 | __px_choose_expr(px, struct i915_page_table *, __x->base, \ | 
|---|
| 202 | __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \ | 
|---|
| 203 | (void)0))) | 
|---|
| 204 |  | 
|---|
| 205 | struct page *__px_page(struct drm_i915_gem_object *p); | 
|---|
| 206 | dma_addr_t __px_dma(struct drm_i915_gem_object *p); | 
|---|
| 207 | #define px_dma(px) (__px_dma(px_base(px))) | 
|---|
| 208 |  | 
|---|
| 209 | void *__px_vaddr(struct drm_i915_gem_object *p); | 
|---|
| 210 | #define px_vaddr(px) (__px_vaddr(px_base(px))) | 
|---|
| 211 |  | 
|---|
| 212 | #define px_pt(px) \ | 
|---|
| 213 | __px_choose_expr(px, struct i915_page_table *, __x, \ | 
|---|
| 214 | __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ | 
|---|
| 215 | (void)0)) | 
|---|
| 216 | #define px_used(px) (&px_pt(px)->used) | 
|---|
| 217 |  | 
|---|
| 218 | struct i915_vm_pt_stash { | 
|---|
| 219 | /* preallocated chains of page tables/directories */ | 
|---|
| 220 | struct i915_page_table *pt[2]; | 
|---|
| 221 | /* | 
|---|
| 222 | * Optionally override the alignment/size of the physical page that | 
|---|
| 223 | * contains each PT. If not set defaults back to the usual | 
|---|
| 224 | * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging | 
|---|
| 225 | * structures. MUST be a power-of-two. ONLY applicable on discrete | 
|---|
| 226 | * platforms. | 
|---|
| 227 | */ | 
|---|
| 228 | int pt_sz; | 
|---|
| 229 | }; | 
|---|
| 230 |  | 
|---|
| 231 | struct i915_vma_ops { | 
|---|
| 232 | /* Map an object into an address space with the given cache flags. */ | 
|---|
| 233 | void (*bind_vma)(struct i915_address_space *vm, | 
|---|
| 234 | struct i915_vm_pt_stash *stash, | 
|---|
| 235 | struct i915_vma_resource *vma_res, | 
|---|
| 236 | unsigned int pat_index, | 
|---|
| 237 | u32 flags); | 
|---|
| 238 | /* | 
|---|
| 239 | * Unmap an object from an address space. This usually consists of | 
|---|
| 240 | * setting the valid PTE entries to a reserved scratch page. | 
|---|
| 241 | */ | 
|---|
| 242 | void (*unbind_vma)(struct i915_address_space *vm, | 
|---|
| 243 | struct i915_vma_resource *vma_res); | 
|---|
| 244 |  | 
|---|
| 245 | }; | 
|---|
| 246 |  | 
|---|
| 247 | struct i915_address_space { | 
|---|
| 248 | struct kref ref; | 
|---|
| 249 | struct work_struct release_work; | 
|---|
| 250 |  | 
|---|
| 251 | struct drm_mm mm; | 
|---|
| 252 | struct { | 
|---|
| 253 | struct drm_i915_gem_object *obj; | 
|---|
| 254 | struct i915_vma *vma; | 
|---|
| 255 | } rsvd; | 
|---|
| 256 | struct intel_gt *gt; | 
|---|
| 257 | struct drm_i915_private *i915; | 
|---|
| 258 | struct drm_i915_file_private *fpriv; | 
|---|
| 259 | struct device *dma; | 
|---|
| 260 | u64 total;		/* size addr space maps (ex. 2GB for ggtt) */ | 
|---|
| 261 | u64 reserved;		/* size addr space reserved */ | 
|---|
| 262 | u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1]; | 
|---|
| 263 |  | 
|---|
| 264 | unsigned int bind_async_flags; | 
|---|
| 265 |  | 
|---|
| 266 | struct mutex mutex; /* protects vma and our lists */ | 
|---|
| 267 |  | 
|---|
| 268 | struct kref resv_ref; /* kref to keep the reservation lock alive. */ | 
|---|
| 269 | struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */ | 
|---|
| 270 | #define VM_CLASS_GGTT 0 | 
|---|
| 271 | #define VM_CLASS_PPGTT 1 | 
|---|
| 272 | #define VM_CLASS_DPT 2 | 
|---|
| 273 |  | 
|---|
| 274 | struct drm_i915_gem_object *scratch[4]; | 
|---|
| 275 | /** | 
|---|
| 276 | * List of vma currently bound. | 
|---|
| 277 | */ | 
|---|
| 278 | struct list_head bound_list; | 
|---|
| 279 |  | 
|---|
| 280 | /** | 
|---|
| 281 | * List of vmas not yet bound or evicted. | 
|---|
| 282 | */ | 
|---|
| 283 | struct list_head unbound_list; | 
|---|
| 284 |  | 
|---|
| 285 | /* Global GTT */ | 
|---|
| 286 | bool is_ggtt:1; | 
|---|
| 287 |  | 
|---|
| 288 | /* Display page table */ | 
|---|
| 289 | bool is_dpt:1; | 
|---|
| 290 |  | 
|---|
| 291 | /* Some systems support read-only mappings for GGTT and/or PPGTT */ | 
|---|
| 292 | bool has_read_only:1; | 
|---|
| 293 |  | 
|---|
| 294 | /* Skip pte rewrite on unbind for suspend. Protected by @mutex */ | 
|---|
| 295 | bool skip_pte_rewrite:1; | 
|---|
| 296 |  | 
|---|
| 297 | u8 top; | 
|---|
| 298 | u8 pd_shift; | 
|---|
| 299 | u8 scratch_order; | 
|---|
| 300 |  | 
|---|
| 301 | /* Flags used when creating page-table objects for this vm */ | 
|---|
| 302 | unsigned long lmem_pt_obj_flags; | 
|---|
| 303 |  | 
|---|
| 304 | /* Interval tree for pending unbind vma resources */ | 
|---|
| 305 | struct rb_root_cached pending_unbind; | 
|---|
| 306 |  | 
|---|
| 307 | struct drm_i915_gem_object * | 
|---|
| 308 | (*alloc_pt_dma)(struct i915_address_space *vm, int sz); | 
|---|
| 309 | struct drm_i915_gem_object * | 
|---|
| 310 | (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); | 
|---|
| 311 |  | 
|---|
| 312 | u64 (*pte_encode)(dma_addr_t addr, | 
|---|
| 313 | unsigned int pat_index, | 
|---|
| 314 | u32 flags); /* Create a valid PTE */ | 
|---|
| 315 | dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local); | 
|---|
| 316 | #define PTE_READ_ONLY	BIT(0) | 
|---|
| 317 | #define PTE_LM		BIT(1) | 
|---|
| 318 |  | 
|---|
| 319 | void (*allocate_va_range)(struct i915_address_space *vm, | 
|---|
| 320 | struct i915_vm_pt_stash *stash, | 
|---|
| 321 | u64 start, u64 length); | 
|---|
| 322 | void (*clear_range)(struct i915_address_space *vm, | 
|---|
| 323 | u64 start, u64 length); | 
|---|
| 324 | void (*scratch_range)(struct i915_address_space *vm, | 
|---|
| 325 | u64 start, u64 length); | 
|---|
| 326 | void (*insert_page)(struct i915_address_space *vm, | 
|---|
| 327 | dma_addr_t addr, | 
|---|
| 328 | u64 offset, | 
|---|
| 329 | unsigned int pat_index, | 
|---|
| 330 | u32 flags); | 
|---|
| 331 | void (*insert_entries)(struct i915_address_space *vm, | 
|---|
| 332 | struct i915_vma_resource *vma_res, | 
|---|
| 333 | unsigned int pat_index, | 
|---|
| 334 | u32 flags); | 
|---|
| 335 | void (*raw_insert_page)(struct i915_address_space *vm, | 
|---|
| 336 | dma_addr_t addr, | 
|---|
| 337 | u64 offset, | 
|---|
| 338 | unsigned int pat_index, | 
|---|
| 339 | u32 flags); | 
|---|
| 340 | void (*raw_insert_entries)(struct i915_address_space *vm, | 
|---|
| 341 | struct i915_vma_resource *vma_res, | 
|---|
| 342 | unsigned int pat_index, | 
|---|
| 343 | u32 flags); | 
|---|
| 344 | dma_addr_t (*read_entry)(struct i915_address_space *vm, | 
|---|
| 345 | u64 offset, bool *is_present, bool *is_local); | 
|---|
| 346 | void (*cleanup)(struct i915_address_space *vm); | 
|---|
| 347 |  | 
|---|
| 348 | void (*foreach)(struct i915_address_space *vm, | 
|---|
| 349 | u64 start, u64 length, | 
|---|
| 350 | void (*fn)(struct i915_address_space *vm, | 
|---|
| 351 | struct i915_page_table *pt, | 
|---|
| 352 | void *data), | 
|---|
| 353 | void *data); | 
|---|
| 354 |  | 
|---|
| 355 | struct i915_vma_ops vma_ops; | 
|---|
| 356 |  | 
|---|
| 357 | I915_SELFTEST_DECLARE(struct fault_attr fault_attr); | 
|---|
| 358 | I915_SELFTEST_DECLARE(bool scrub_64K); | 
|---|
| 359 | }; | 
|---|
| 360 |  | 
|---|
| 361 | /* | 
|---|
| 362 | * The Graphics Translation Table is the way in which GEN hardware translates a | 
|---|
| 363 | * Graphics Virtual Address into a Physical Address. In addition to the normal | 
|---|
| 364 | * collateral associated with any va->pa translations GEN hardware also has a | 
|---|
| 365 | * portion of the GTT which can be mapped by the CPU and remain both coherent | 
|---|
| 366 | * and correct (in cases like swizzling). That region is referred to as GMADR in | 
|---|
| 367 | * the spec. | 
|---|
| 368 | */ | 
|---|
| 369 | struct i915_ggtt { | 
|---|
| 370 | struct i915_address_space vm; | 
|---|
| 371 |  | 
|---|
| 372 | struct io_mapping iomap;	/* Mapping to our CPU mappable region */ | 
|---|
| 373 | struct resource gmadr;          /* GMADR resource */ | 
|---|
| 374 | resource_size_t mappable_end;	/* End offset that we can CPU map */ | 
|---|
| 375 |  | 
|---|
| 376 | /** "Graphics Stolen Memory" holds the global PTEs */ | 
|---|
| 377 | void __iomem *gsm; | 
|---|
| 378 | void (*invalidate)(struct i915_ggtt *ggtt); | 
|---|
| 379 |  | 
|---|
| 380 | /** PPGTT used for aliasing the PPGTT with the GTT */ | 
|---|
| 381 | struct i915_ppgtt *alias; | 
|---|
| 382 |  | 
|---|
| 383 | bool do_idle_maps; | 
|---|
| 384 |  | 
|---|
| 385 | int mtrr; | 
|---|
| 386 |  | 
|---|
| 387 | /** Bit 6 swizzling required for X tiling */ | 
|---|
| 388 | u32 bit_6_swizzle_x; | 
|---|
| 389 | /** Bit 6 swizzling required for Y tiling */ | 
|---|
| 390 | u32 bit_6_swizzle_y; | 
|---|
| 391 |  | 
|---|
| 392 | u32 pin_bias; | 
|---|
| 393 |  | 
|---|
| 394 | unsigned int num_fences; | 
|---|
| 395 | struct i915_fence_reg *fence_regs; | 
|---|
| 396 | struct list_head fence_list; | 
|---|
| 397 |  | 
|---|
| 398 | /** | 
|---|
| 399 | * List of all objects in gtt_space, currently mmaped by userspace. | 
|---|
| 400 | * All objects within this list must also be on bound_list. | 
|---|
| 401 | */ | 
|---|
| 402 | struct list_head userfault_list; | 
|---|
| 403 |  | 
|---|
| 404 | struct mutex error_mutex; | 
|---|
| 405 | struct drm_mm_node error_capture; | 
|---|
| 406 | struct drm_mm_node uc_fw; | 
|---|
| 407 |  | 
|---|
| 408 | /** List of GTs mapping this GGTT */ | 
|---|
| 409 | struct list_head gt_list; | 
|---|
| 410 | }; | 
|---|
| 411 |  | 
|---|
| 412 | struct i915_ppgtt { | 
|---|
| 413 | struct i915_address_space vm; | 
|---|
| 414 |  | 
|---|
| 415 | struct i915_page_directory *pd; | 
|---|
| 416 | }; | 
|---|
| 417 |  | 
|---|
| 418 | #define i915_is_ggtt(vm) ((vm)->is_ggtt) | 
|---|
| 419 | #define i915_is_dpt(vm) ((vm)->is_dpt) | 
|---|
| 420 | #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) | 
|---|
| 421 |  | 
|---|
| 422 | bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915); | 
|---|
| 423 |  | 
|---|
| 424 | int __must_check | 
|---|
| 425 | i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); | 
|---|
| 426 |  | 
|---|
| 427 | static inline bool | 
|---|
| 428 | i915_vm_is_4lvl(const struct i915_address_space *vm) | 
|---|
| 429 | { | 
|---|
| 430 | return (vm->total - 1) >> 32; | 
|---|
| 431 | } | 
|---|
| 432 |  | 
|---|
| 433 | static inline bool | 
|---|
| 434 | i915_vm_has_scratch_64K(struct i915_address_space *vm) | 
|---|
| 435 | { | 
|---|
| 436 | return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); | 
|---|
| 437 | } | 
|---|
| 438 |  | 
|---|
| 439 | static inline u64 i915_vm_min_alignment(struct i915_address_space *vm, | 
|---|
| 440 | enum intel_memory_type type) | 
|---|
| 441 | { | 
|---|
| 442 | /* avoid INTEL_MEMORY_MOCK overflow */ | 
|---|
| 443 | if ((int)type >= ARRAY_SIZE(vm->min_alignment)) | 
|---|
| 444 | type = INTEL_MEMORY_SYSTEM; | 
|---|
| 445 |  | 
|---|
| 446 | return vm->min_alignment[type]; | 
|---|
| 447 | } | 
|---|
| 448 |  | 
|---|
| 449 | static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm, | 
|---|
| 450 | struct drm_i915_gem_object  *obj) | 
|---|
| 451 | { | 
|---|
| 452 | struct intel_memory_region *mr = READ_ONCE(obj->mm.region); | 
|---|
| 453 | enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM; | 
|---|
| 454 |  | 
|---|
| 455 | return i915_vm_min_alignment(vm, type); | 
|---|
| 456 | } | 
|---|
| 457 |  | 
|---|
| 458 | static inline bool | 
|---|
| 459 | i915_vm_has_cache_coloring(struct i915_address_space *vm) | 
|---|
| 460 | { | 
|---|
| 461 | return i915_is_ggtt(vm) && vm->mm.color_adjust; | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | static inline struct i915_ggtt * | 
|---|
| 465 | i915_vm_to_ggtt(struct i915_address_space *vm) | 
|---|
| 466 | { | 
|---|
| 467 | BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); | 
|---|
| 468 | GEM_BUG_ON(!i915_is_ggtt(vm)); | 
|---|
| 469 | return container_of(vm, struct i915_ggtt, vm); | 
|---|
| 470 | } | 
|---|
| 471 |  | 
|---|
| 472 | static inline struct i915_ppgtt * | 
|---|
| 473 | i915_vm_to_ppgtt(struct i915_address_space *vm) | 
|---|
| 474 | { | 
|---|
| 475 | BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); | 
|---|
| 476 | GEM_BUG_ON(i915_is_ggtt_or_dpt(vm)); | 
|---|
| 477 | return container_of(vm, struct i915_ppgtt, vm); | 
|---|
| 478 | } | 
|---|
| 479 |  | 
|---|
| 480 | static inline struct i915_address_space * | 
|---|
| 481 | i915_vm_get(struct i915_address_space *vm) | 
|---|
| 482 | { | 
|---|
| 483 | kref_get(kref: &vm->ref); | 
|---|
| 484 | return vm; | 
|---|
| 485 | } | 
|---|
| 486 |  | 
|---|
| 487 | static inline struct i915_address_space * | 
|---|
| 488 | i915_vm_tryget(struct i915_address_space *vm) | 
|---|
| 489 | { | 
|---|
| 490 | return kref_get_unless_zero(kref: &vm->ref) ? vm : NULL; | 
|---|
| 491 | } | 
|---|
| 492 |  | 
|---|
| 493 | static inline void assert_vm_alive(struct i915_address_space *vm) | 
|---|
| 494 | { | 
|---|
| 495 | GEM_BUG_ON(!kref_read(&vm->ref)); | 
|---|
| 496 | } | 
|---|
| 497 |  | 
|---|
| 498 | /** | 
|---|
| 499 | * i915_vm_resv_get - Obtain a reference on the vm's reservation lock | 
|---|
| 500 | * @vm: The vm whose reservation lock we want to share. | 
|---|
| 501 | * | 
|---|
| 502 | * Return: A pointer to the vm's reservation lock. | 
|---|
| 503 | */ | 
|---|
| 504 | static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm) | 
|---|
| 505 | { | 
|---|
| 506 | kref_get(kref: &vm->resv_ref); | 
|---|
| 507 | return &vm->_resv; | 
|---|
| 508 | } | 
|---|
| 509 |  | 
|---|
| 510 | void i915_vm_release(struct kref *kref); | 
|---|
| 511 |  | 
|---|
| 512 | void i915_vm_resv_release(struct kref *kref); | 
|---|
| 513 |  | 
|---|
| 514 | static inline void i915_vm_put(struct i915_address_space *vm) | 
|---|
| 515 | { | 
|---|
| 516 | kref_put(kref: &vm->ref, release: i915_vm_release); | 
|---|
| 517 | } | 
|---|
| 518 |  | 
|---|
| 519 | /** | 
|---|
| 520 | * i915_vm_resv_put - Release a reference on the vm's reservation lock | 
|---|
| 521 | * @vm: The vm whose reservation lock reference we want to release | 
|---|
| 522 | */ | 
|---|
| 523 | static inline void i915_vm_resv_put(struct i915_address_space *vm) | 
|---|
| 524 | { | 
|---|
| 525 | kref_put(kref: &vm->resv_ref, release: i915_vm_resv_release); | 
|---|
| 526 | } | 
|---|
| 527 |  | 
|---|
| 528 | void i915_address_space_init(struct i915_address_space *vm, int subclass); | 
|---|
| 529 | void i915_address_space_fini(struct i915_address_space *vm); | 
|---|
| 530 |  | 
|---|
| 531 | static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) | 
|---|
| 532 | { | 
|---|
| 533 | const u32 mask = NUM_PTE(pde_shift) - 1; | 
|---|
| 534 |  | 
|---|
| 535 | return (address >> PAGE_SHIFT) & mask; | 
|---|
| 536 | } | 
|---|
| 537 |  | 
|---|
| 538 | /* | 
|---|
| 539 | * Helper to counts the number of PTEs within the given length. This count | 
|---|
| 540 | * does not cross a page table boundary, so the max value would be | 
|---|
| 541 | * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. | 
|---|
| 542 | */ | 
|---|
| 543 | static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) | 
|---|
| 544 | { | 
|---|
| 545 | const u64 mask = ~((1ULL << pde_shift) - 1); | 
|---|
| 546 | u64 end; | 
|---|
| 547 |  | 
|---|
| 548 | GEM_BUG_ON(length == 0); | 
|---|
| 549 | GEM_BUG_ON(offset_in_page(addr | length)); | 
|---|
| 550 |  | 
|---|
| 551 | end = addr + length; | 
|---|
| 552 |  | 
|---|
| 553 | if ((addr & mask) != (end & mask)) | 
|---|
| 554 | return NUM_PTE(pde_shift) - i915_pte_index(address: addr, pde_shift); | 
|---|
| 555 |  | 
|---|
| 556 | return i915_pte_index(address: end, pde_shift) - i915_pte_index(address: addr, pde_shift); | 
|---|
| 557 | } | 
|---|
| 558 |  | 
|---|
| 559 | static inline u32 i915_pde_index(u64 addr, u32 shift) | 
|---|
| 560 | { | 
|---|
| 561 | return (addr >> shift) & I915_PDE_MASK; | 
|---|
| 562 | } | 
|---|
| 563 |  | 
|---|
| 564 | static inline struct i915_page_table * | 
|---|
| 565 | i915_pt_entry(const struct i915_page_directory * const pd, | 
|---|
| 566 | const unsigned short n) | 
|---|
| 567 | { | 
|---|
| 568 | return pd->entry[n]; | 
|---|
| 569 | } | 
|---|
| 570 |  | 
|---|
| 571 | static inline struct i915_page_directory * | 
|---|
| 572 | i915_pd_entry(const struct i915_page_directory * const pdp, | 
|---|
| 573 | const unsigned short n) | 
|---|
| 574 | { | 
|---|
| 575 | return pdp->entry[n]; | 
|---|
| 576 | } | 
|---|
| 577 |  | 
|---|
| 578 | static inline dma_addr_t | 
|---|
| 579 | i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) | 
|---|
| 580 | { | 
|---|
| 581 | struct i915_page_table *pt = ppgtt->pd->entry[n]; | 
|---|
| 582 |  | 
|---|
| 583 | return __px_dma(p: pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]); | 
|---|
| 584 | } | 
|---|
| 585 |  | 
|---|
| 586 | void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, | 
|---|
| 587 | unsigned long lmem_pt_obj_flags); | 
|---|
| 588 | void intel_ggtt_bind_vma(struct i915_address_space *vm, | 
|---|
| 589 | struct i915_vm_pt_stash *stash, | 
|---|
| 590 | struct i915_vma_resource *vma_res, | 
|---|
| 591 | unsigned int pat_index, | 
|---|
| 592 | u32 flags); | 
|---|
| 593 | void intel_ggtt_unbind_vma(struct i915_address_space *vm, | 
|---|
| 594 | struct i915_vma_resource *vma_res); | 
|---|
| 595 |  | 
|---|
| 596 | dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm, | 
|---|
| 597 | u64 offset, bool *is_present, bool *is_local); | 
|---|
| 598 |  | 
|---|
| 599 | int i915_ggtt_probe_hw(struct drm_i915_private *i915); | 
|---|
| 600 | int i915_ggtt_init_hw(struct drm_i915_private *i915); | 
|---|
| 601 | int i915_ggtt_enable_hw(struct drm_i915_private *i915); | 
|---|
| 602 | int i915_init_ggtt(struct drm_i915_private *i915); | 
|---|
| 603 | void i915_ggtt_driver_release(struct drm_i915_private *i915); | 
|---|
| 604 | void i915_ggtt_driver_late_release(struct drm_i915_private *i915); | 
|---|
| 605 | struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915); | 
|---|
| 606 |  | 
|---|
| 607 | static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) | 
|---|
| 608 | { | 
|---|
| 609 | return ggtt->mappable_end > 0; | 
|---|
| 610 | } | 
|---|
| 611 |  | 
|---|
| 612 | int i915_ppgtt_init_hw(struct intel_gt *gt); | 
|---|
| 613 |  | 
|---|
| 614 | struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, | 
|---|
| 615 | unsigned long lmem_pt_obj_flags); | 
|---|
| 616 |  | 
|---|
| 617 | void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all); | 
|---|
| 618 | bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted); | 
|---|
| 619 | void i915_ggtt_suspend(struct i915_ggtt *gtt); | 
|---|
| 620 | void i915_ggtt_resume(struct i915_ggtt *ggtt); | 
|---|
| 621 |  | 
|---|
| 622 | void | 
|---|
| 623 | fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count); | 
|---|
| 624 |  | 
|---|
| 625 | #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) | 
|---|
| 626 | #define fill32_px(px, v) do {						\ | 
|---|
| 627 | u64 v__ = lower_32_bits(v);					\ | 
|---|
| 628 | fill_px((px), v__ << 32 | v__);					\ | 
|---|
| 629 | } while (0) | 
|---|
| 630 |  | 
|---|
| 631 | int setup_scratch_page(struct i915_address_space *vm); | 
|---|
| 632 | void free_scratch(struct i915_address_space *vm); | 
|---|
| 633 |  | 
|---|
| 634 | struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz); | 
|---|
| 635 | struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz); | 
|---|
| 636 | struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz); | 
|---|
| 637 | struct i915_page_directory *alloc_pd(struct i915_address_space *vm); | 
|---|
| 638 | struct i915_page_directory *__alloc_pd(int npde); | 
|---|
| 639 |  | 
|---|
| 640 | int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); | 
|---|
| 641 | int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); | 
|---|
| 642 |  | 
|---|
| 643 | void free_px(struct i915_address_space *vm, | 
|---|
| 644 | struct i915_page_table *pt, int lvl); | 
|---|
| 645 | #define free_pt(vm, px) free_px(vm, px, 0) | 
|---|
| 646 | #define free_pd(vm, px) free_px(vm, px_pt(px), 1) | 
|---|
| 647 |  | 
|---|
| 648 | void | 
|---|
| 649 | __set_pd_entry(struct i915_page_directory * const pd, | 
|---|
| 650 | const unsigned short idx, | 
|---|
| 651 | struct i915_page_table *pt, | 
|---|
| 652 | u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); | 
|---|
| 653 |  | 
|---|
| 654 | #define set_pd_entry(pd, idx, to) \ | 
|---|
| 655 | __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode) | 
|---|
| 656 |  | 
|---|
| 657 | void | 
|---|
| 658 | clear_pd_entry(struct i915_page_directory * const pd, | 
|---|
| 659 | const unsigned short idx, | 
|---|
| 660 | const struct drm_i915_gem_object * const scratch); | 
|---|
| 661 |  | 
|---|
| 662 | bool | 
|---|
| 663 | release_pd_entry(struct i915_page_directory * const pd, | 
|---|
| 664 | const unsigned short idx, | 
|---|
| 665 | struct i915_page_table * const pt, | 
|---|
| 666 | const struct drm_i915_gem_object * const scratch); | 
|---|
| 667 | void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); | 
|---|
| 668 |  | 
|---|
| 669 | void ppgtt_bind_vma(struct i915_address_space *vm, | 
|---|
| 670 | struct i915_vm_pt_stash *stash, | 
|---|
| 671 | struct i915_vma_resource *vma_res, | 
|---|
| 672 | unsigned int pat_index, | 
|---|
| 673 | u32 flags); | 
|---|
| 674 | void ppgtt_unbind_vma(struct i915_address_space *vm, | 
|---|
| 675 | struct i915_vma_resource *vma_res); | 
|---|
| 676 |  | 
|---|
| 677 | void gtt_write_workarounds(struct intel_gt *gt); | 
|---|
| 678 |  | 
|---|
| 679 | void setup_private_pat(struct intel_gt *gt); | 
|---|
| 680 |  | 
|---|
| 681 | int i915_vm_alloc_pt_stash(struct i915_address_space *vm, | 
|---|
| 682 | struct i915_vm_pt_stash *stash, | 
|---|
| 683 | u64 size); | 
|---|
| 684 | int i915_vm_map_pt_stash(struct i915_address_space *vm, | 
|---|
| 685 | struct i915_vm_pt_stash *stash); | 
|---|
| 686 | void i915_vm_free_pt_stash(struct i915_address_space *vm, | 
|---|
| 687 | struct i915_vm_pt_stash *stash); | 
|---|
| 688 |  | 
|---|
| 689 | struct i915_vma * | 
|---|
| 690 | __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size); | 
|---|
| 691 |  | 
|---|
| 692 | struct i915_vma * | 
|---|
| 693 | __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size); | 
|---|
| 694 |  | 
|---|
| 695 | static inline struct sgt_dma { | 
|---|
| 696 | struct scatterlist *sg; | 
|---|
| 697 | dma_addr_t dma, max; | 
|---|
| 698 | } sgt_dma(struct i915_vma_resource *vma_res) { | 
|---|
| 699 | struct scatterlist *sg = vma_res->bi.pages->sgl; | 
|---|
| 700 | dma_addr_t addr = sg_dma_address(sg); | 
|---|
| 701 |  | 
|---|
| 702 | return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; | 
|---|
| 703 | } | 
|---|
| 704 |  | 
|---|
| 705 | bool i915_ggtt_require_binder(struct drm_i915_private *i915); | 
|---|
| 706 |  | 
|---|
| 707 | #endif | 
|---|
| 708 |  | 
|---|