| 1 | /* SPDX-License-Identifier: MIT */ | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2019 Intel Corporation | 
|---|
| 4 | */ | 
|---|
| 5 |  | 
|---|
| 6 | #ifndef __INTEL_GT__ | 
|---|
| 7 | #define __INTEL_GT__ | 
|---|
| 8 |  | 
|---|
| 9 | #include "intel_engine_types.h" | 
|---|
| 10 | #include "intel_gt_types.h" | 
|---|
| 11 | #include "intel_reset.h" | 
|---|
| 12 |  | 
|---|
| 13 | struct drm_i915_private; | 
|---|
| 14 | struct drm_printer; | 
|---|
| 15 |  | 
|---|
| 16 | /* | 
|---|
| 17 | * Check that the GT is a graphics GT and has an IP version within the | 
|---|
| 18 | * specified range (inclusive). | 
|---|
| 19 | */ | 
|---|
| 20 | #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \ | 
|---|
| 21 | BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \ | 
|---|
| 22 | BUILD_BUG_ON_ZERO((until) < (from)) + \ | 
|---|
| 23 | ((gt)->type != GT_MEDIA && \ | 
|---|
| 24 | GRAPHICS_VER_FULL((gt)->i915) >= (from) && \ | 
|---|
| 25 | GRAPHICS_VER_FULL((gt)->i915) <= (until))) | 
|---|
| 26 |  | 
|---|
| 27 | /* | 
|---|
| 28 | * Check that the GT is a media GT and has an IP version within the | 
|---|
| 29 | * specified range (inclusive). | 
|---|
| 30 | * | 
|---|
| 31 | * Only usable on platforms with a standalone media design (i.e., IP version 13 | 
|---|
| 32 | * and higher). | 
|---|
| 33 | */ | 
|---|
| 34 | #define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \ | 
|---|
| 35 | BUILD_BUG_ON_ZERO((from) < IP_VER(13, 0)) + \ | 
|---|
| 36 | BUILD_BUG_ON_ZERO((until) < (from)) + \ | 
|---|
| 37 | ((gt) && (gt)->type == GT_MEDIA && \ | 
|---|
| 38 | MEDIA_VER_FULL((gt)->i915) >= (from) && \ | 
|---|
| 39 | MEDIA_VER_FULL((gt)->i915) <= (until))) | 
|---|
| 40 |  | 
|---|
| 41 | /* | 
|---|
| 42 | * Check that the GT is a graphics GT with a specific IP version and has | 
|---|
| 43 | * a stepping in the range [from, until).  The lower stepping bound is | 
|---|
| 44 | * inclusive, the upper bound is exclusive.  The most common use-case of this | 
|---|
| 45 | * macro is for checking bounds for workarounds, which usually have a stepping | 
|---|
| 46 | * ("from") at which the hardware issue is first present and another stepping | 
|---|
| 47 | * ("until") at which a hardware fix is present and the software workaround is | 
|---|
| 48 | * no longer necessary.  E.g., | 
|---|
| 49 | * | 
|---|
| 50 | *    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) | 
|---|
| 51 | *    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER) | 
|---|
| 52 | * | 
|---|
| 53 | * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper | 
|---|
| 54 | * stepping bound for the specified IP version. | 
|---|
| 55 | */ | 
|---|
| 56 | #define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \ | 
|---|
| 57 | BUILD_BUG_ON_ZERO((until) <= (from)) + \ | 
|---|
| 58 | (IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \ | 
|---|
| 59 | IS_GRAPHICS_STEP((gt)->i915, (from), (until)))) | 
|---|
| 60 |  | 
|---|
| 61 | /* | 
|---|
| 62 | * Check that the GT is a media GT with a specific IP version and has | 
|---|
| 63 | * a stepping in the range [from, until).  The lower stepping bound is | 
|---|
| 64 | * inclusive, the upper bound is exclusive.  The most common use-case of this | 
|---|
| 65 | * macro is for checking bounds for workarounds, which usually have a stepping | 
|---|
| 66 | * ("from") at which the hardware issue is first present and another stepping | 
|---|
| 67 | * ("until") at which a hardware fix is present and the software workaround is | 
|---|
| 68 | * no longer necessary.  "STEP_FOREVER" can be passed as "until" for | 
|---|
| 69 | * workarounds that have no upper stepping bound for the specified IP version. | 
|---|
| 70 | * | 
|---|
| 71 | * This macro may only be used to match on platforms that have a standalone | 
|---|
| 72 | * media design (i.e., media version 13 or higher). | 
|---|
| 73 | */ | 
|---|
| 74 | #define IS_MEDIA_GT_IP_STEP(gt, ipver, from, until) ( \ | 
|---|
| 75 | BUILD_BUG_ON_ZERO((until) <= (from)) + \ | 
|---|
| 76 | (IS_MEDIA_GT_IP_RANGE((gt), (ipver), (ipver)) && \ | 
|---|
| 77 | IS_MEDIA_STEP((gt)->i915, (from), (until)))) | 
|---|
| 78 |  | 
|---|
| 79 | #define GT_TRACE(gt, fmt, ...) do {					\ | 
|---|
| 80 | const struct intel_gt *gt__ __maybe_unused = (gt);		\ | 
|---|
| 81 | GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev),		\ | 
|---|
| 82 | ##__VA_ARGS__);					\ | 
|---|
| 83 | } while (0) | 
|---|
| 84 |  | 
|---|
| 85 | static inline bool gt_is_root(struct intel_gt *gt) | 
|---|
| 86 | { | 
|---|
| 87 | return !gt->info.id; | 
|---|
| 88 | } | 
|---|
| 89 |  | 
|---|
| 90 | bool intel_gt_needs_wa_16018031267(struct intel_gt *gt); | 
|---|
| 91 | bool intel_gt_needs_wa_22016122933(struct intel_gt *gt); | 
|---|
| 92 |  | 
|---|
| 93 | #define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \ | 
|---|
| 94 | intel_gt_needs_wa_16018031267(engine->gt) && \ | 
|---|
| 95 | engine->class == COPY_ENGINE_CLASS && engine->instance == 0) | 
|---|
| 96 |  | 
|---|
| 97 | static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) | 
|---|
| 98 | { | 
|---|
| 99 | return container_of(uc, struct intel_gt, uc); | 
|---|
| 100 | } | 
|---|
| 101 |  | 
|---|
| 102 | static inline struct intel_gt *guc_to_gt(struct intel_guc *guc) | 
|---|
| 103 | { | 
|---|
| 104 | return container_of(guc, struct intel_gt, uc.guc); | 
|---|
| 105 | } | 
|---|
| 106 |  | 
|---|
| 107 | static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) | 
|---|
| 108 | { | 
|---|
| 109 | return container_of(huc, struct intel_gt, uc.huc); | 
|---|
| 110 | } | 
|---|
| 111 |  | 
|---|
| 112 | static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc) | 
|---|
| 113 | { | 
|---|
| 114 | return container_of(gsc_uc, struct intel_gt, uc.gsc); | 
|---|
| 115 | } | 
|---|
| 116 |  | 
|---|
| 117 | static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) | 
|---|
| 118 | { | 
|---|
| 119 | return container_of(gsc, struct intel_gt, gsc); | 
|---|
| 120 | } | 
|---|
| 121 |  | 
|---|
| 122 | static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) | 
|---|
| 123 | { | 
|---|
| 124 | return guc_to_gt(guc)->i915; | 
|---|
| 125 | } | 
|---|
| 126 |  | 
|---|
| 127 | static inline struct intel_guc *gt_to_guc(struct intel_gt *gt) | 
|---|
| 128 | { | 
|---|
| 129 | return >->uc.guc; | 
|---|
| 130 | } | 
|---|
| 131 |  | 
|---|
| 132 | void intel_gt_common_init_early(struct intel_gt *gt); | 
|---|
| 133 | int intel_root_gt_init_early(struct drm_i915_private *i915); | 
|---|
| 134 | int intel_gt_assign_ggtt(struct intel_gt *gt); | 
|---|
| 135 | int intel_gt_init_mmio(struct intel_gt *gt); | 
|---|
| 136 | int __must_check intel_gt_init_hw(struct intel_gt *gt); | 
|---|
| 137 | int intel_gt_init(struct intel_gt *gt); | 
|---|
| 138 | void intel_gt_driver_register(struct intel_gt *gt); | 
|---|
| 139 |  | 
|---|
| 140 | void intel_gt_driver_unregister(struct intel_gt *gt); | 
|---|
| 141 | void intel_gt_driver_remove(struct intel_gt *gt); | 
|---|
| 142 | void intel_gt_driver_release(struct intel_gt *gt); | 
|---|
| 143 | void intel_gt_driver_late_release_all(struct drm_i915_private *i915); | 
|---|
| 144 |  | 
|---|
| 145 | int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); | 
|---|
| 146 |  | 
|---|
| 147 | void intel_gt_check_and_clear_faults(struct intel_gt *gt); | 
|---|
| 148 | i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt); | 
|---|
| 149 | void intel_gt_clear_error_registers(struct intel_gt *gt, | 
|---|
| 150 | intel_engine_mask_t engine_mask); | 
|---|
| 151 |  | 
|---|
| 152 | void intel_gt_flush_ggtt_writes(struct intel_gt *gt); | 
|---|
| 153 | void intel_gt_chipset_flush(struct intel_gt *gt); | 
|---|
| 154 |  | 
|---|
| 155 | static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, | 
|---|
| 156 | enum intel_gt_scratch_field field) | 
|---|
| 157 | { | 
|---|
| 158 | return i915_ggtt_offset(vma: gt->scratch) + field; | 
|---|
| 159 | } | 
|---|
| 160 |  | 
|---|
| 161 | static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt) | 
|---|
| 162 | { | 
|---|
| 163 | return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) || | 
|---|
| 164 | test_bit(I915_WEDGED_ON_FINI, >->reset.flags); | 
|---|
| 165 | } | 
|---|
| 166 |  | 
|---|
| 167 | static inline bool intel_gt_is_wedged(const struct intel_gt *gt) | 
|---|
| 168 | { | 
|---|
| 169 | GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) && | 
|---|
| 170 | !test_bit(I915_WEDGED, >->reset.flags)); | 
|---|
| 171 |  | 
|---|
| 172 | return unlikely(test_bit(I915_WEDGED, >->reset.flags)); | 
|---|
| 173 | } | 
|---|
| 174 |  | 
|---|
| 175 | int intel_gt_probe_all(struct drm_i915_private *i915); | 
|---|
| 176 | int intel_gt_tiles_init(struct drm_i915_private *i915); | 
|---|
| 177 |  | 
|---|
| 178 | #define for_each_gt(gt__, i915__, id__) \ | 
|---|
| 179 | for ((id__) = 0; \ | 
|---|
| 180 | (id__) < I915_MAX_GT; \ | 
|---|
| 181 | (id__)++) \ | 
|---|
| 182 | for_each_if(((gt__) = (i915__)->gt[(id__)])) | 
|---|
| 183 |  | 
|---|
| 184 | /* Simple iterator over all initialised engines */ | 
|---|
| 185 | #define for_each_engine(engine__, gt__, id__) \ | 
|---|
| 186 | for ((id__) = 0; \ | 
|---|
| 187 | (id__) < I915_NUM_ENGINES; \ | 
|---|
| 188 | (id__)++) \ | 
|---|
| 189 | for_each_if ((engine__) = (gt__)->engine[(id__)]) | 
|---|
| 190 |  | 
|---|
| 191 | /* Iterator over subset of engines selected by mask */ | 
|---|
| 192 | #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ | 
|---|
| 193 | for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ | 
|---|
| 194 | (tmp__) ? \ | 
|---|
| 195 | ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ | 
|---|
| 196 | 0;) | 
|---|
| 197 |  | 
|---|
| 198 | void intel_gt_info_print(const struct intel_gt_info *info, | 
|---|
| 199 | struct drm_printer *p); | 
|---|
| 200 |  | 
|---|
| 201 | void intel_gt_watchdog_work(struct work_struct *work); | 
|---|
| 202 |  | 
|---|
| 203 | enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, | 
|---|
| 204 | struct drm_i915_gem_object *obj, | 
|---|
| 205 | bool always_coherent); | 
|---|
| 206 |  | 
|---|
| 207 | void intel_gt_bind_context_set_ready(struct intel_gt *gt); | 
|---|
| 208 | void intel_gt_bind_context_set_unready(struct intel_gt *gt); | 
|---|
| 209 | bool intel_gt_is_bind_context_ready(struct intel_gt *gt); | 
|---|
| 210 |  | 
|---|
| 211 | static inline void intel_gt_set_wedged_async(struct intel_gt *gt) | 
|---|
| 212 | { | 
|---|
| 213 | queue_work(wq: system_highpri_wq, work: >->wedge); | 
|---|
| 214 | } | 
|---|
| 215 |  | 
|---|
| 216 | #endif /* __INTEL_GT_H__ */ | 
|---|
| 217 |  | 
|---|