| 1 | // SPDX-License-Identifier: MIT | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2014-2019 Intel Corporation | 
|---|
| 4 | */ | 
|---|
| 5 |  | 
|---|
| 6 | #include "gem/i915_gem_lmem.h" | 
|---|
| 7 | #include "gt/intel_gt.h" | 
|---|
| 8 | #include "gt/intel_gt_irq.h" | 
|---|
| 9 | #include "gt/intel_gt_pm_irq.h" | 
|---|
| 10 | #include "gt/intel_gt_regs.h" | 
|---|
| 11 |  | 
|---|
| 12 | #include "i915_drv.h" | 
|---|
| 13 | #include "i915_irq.h" | 
|---|
| 14 | #include "i915_reg.h" | 
|---|
| 15 | #include "i915_wait_util.h" | 
|---|
| 16 | #include "intel_guc.h" | 
|---|
| 17 | #include "intel_guc_ads.h" | 
|---|
| 18 | #include "intel_guc_capture.h" | 
|---|
| 19 | #include "intel_guc_print.h" | 
|---|
| 20 | #include "intel_guc_slpc.h" | 
|---|
| 21 | #include "intel_guc_submission.h" | 
|---|
| 22 |  | 
|---|
| 23 | /** | 
|---|
| 24 | * DOC: GuC | 
|---|
| 25 | * | 
|---|
| 26 | * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is | 
|---|
| 27 | * designed to offload some of the functionality usually performed by the host | 
|---|
| 28 | * driver; currently the main operations it can take care of are: | 
|---|
| 29 | * | 
|---|
| 30 | * - Authentication of the HuC, which is required to fully enable HuC usage. | 
|---|
| 31 | * - Low latency graphics context scheduling (a.k.a. GuC submission). | 
|---|
| 32 | * - GT Power management. | 
|---|
| 33 | * | 
|---|
| 34 | * The enable_guc module parameter can be used to select which of those | 
|---|
| 35 | * operations to enable within GuC. Note that not all the operations are | 
|---|
| 36 | * supported on all gen9+ platforms. | 
|---|
| 37 | * | 
|---|
| 38 | * Enabling the GuC is not mandatory and therefore the firmware is only loaded | 
|---|
| 39 | * if at least one of the operations is selected. However, not loading the GuC | 
|---|
| 40 | * might result in the loss of some features that do require the GuC (currently | 
|---|
| 41 | * just the HuC, but more are expected to land in the future). | 
|---|
| 42 | */ | 
|---|
| 43 |  | 
|---|
| 44 | void intel_guc_notify(struct intel_guc *guc) | 
|---|
| 45 | { | 
|---|
| 46 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 47 |  | 
|---|
| 48 | /* | 
|---|
| 49 | * On Gen11+, the value written to the register is passes as a payload | 
|---|
| 50 | * to the FW. However, the FW currently treats all values the same way | 
|---|
| 51 | * (H2G interrupt), so we can just write the value that the HW expects | 
|---|
| 52 | * on older gens. | 
|---|
| 53 | */ | 
|---|
| 54 | intel_uncore_write(uncore: gt->uncore, reg: guc->notify_reg, GUC_SEND_TRIGGER); | 
|---|
| 55 | } | 
|---|
| 56 |  | 
|---|
| 57 | static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) | 
|---|
| 58 | { | 
|---|
| 59 | GEM_BUG_ON(!guc->send_regs.base); | 
|---|
| 60 | GEM_BUG_ON(!guc->send_regs.count); | 
|---|
| 61 | GEM_BUG_ON(i >= guc->send_regs.count); | 
|---|
| 62 |  | 
|---|
| 63 | return _MMIO(guc->send_regs.base + 4 * i); | 
|---|
| 64 | } | 
|---|
| 65 |  | 
|---|
| 66 | void intel_guc_init_send_regs(struct intel_guc *guc) | 
|---|
| 67 | { | 
|---|
| 68 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 69 | enum forcewake_domains fw_domains = 0; | 
|---|
| 70 | unsigned int i; | 
|---|
| 71 |  | 
|---|
| 72 | GEM_BUG_ON(!guc->send_regs.base); | 
|---|
| 73 | GEM_BUG_ON(!guc->send_regs.count); | 
|---|
| 74 |  | 
|---|
| 75 | for (i = 0; i < guc->send_regs.count; i++) { | 
|---|
| 76 | fw_domains |= intel_uncore_forcewake_for_reg(uncore: gt->uncore, | 
|---|
| 77 | reg: guc_send_reg(guc, i), | 
|---|
| 78 | FW_REG_READ | FW_REG_WRITE); | 
|---|
| 79 | } | 
|---|
| 80 | guc->send_regs.fw_domains = fw_domains; | 
|---|
| 81 | } | 
|---|
| 82 |  | 
|---|
| 83 | static void gen9_reset_guc_interrupts(struct intel_guc *guc) | 
|---|
| 84 | { | 
|---|
| 85 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 86 |  | 
|---|
| 87 | assert_rpm_wakelock_held(rpm: >->i915->runtime_pm); | 
|---|
| 88 |  | 
|---|
| 89 | spin_lock_irq(lock: gt->irq_lock); | 
|---|
| 90 | gen6_gt_pm_reset_iir(gt, reset_mask: gt->pm_guc_events); | 
|---|
| 91 | spin_unlock_irq(lock: gt->irq_lock); | 
|---|
| 92 | } | 
|---|
| 93 |  | 
|---|
| 94 | static void gen9_enable_guc_interrupts(struct intel_guc *guc) | 
|---|
| 95 | { | 
|---|
| 96 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 97 |  | 
|---|
| 98 | assert_rpm_wakelock_held(rpm: >->i915->runtime_pm); | 
|---|
| 99 |  | 
|---|
| 100 | spin_lock_irq(lock: gt->irq_lock); | 
|---|
| 101 | guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & | 
|---|
| 102 | gt->pm_guc_events); | 
|---|
| 103 | gen6_gt_pm_enable_irq(gt, enable_mask: gt->pm_guc_events); | 
|---|
| 104 | spin_unlock_irq(lock: gt->irq_lock); | 
|---|
| 105 |  | 
|---|
| 106 | guc->interrupts.enabled = true; | 
|---|
| 107 | } | 
|---|
| 108 |  | 
|---|
| 109 | static void gen9_disable_guc_interrupts(struct intel_guc *guc) | 
|---|
| 110 | { | 
|---|
| 111 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 112 |  | 
|---|
| 113 | assert_rpm_wakelock_held(rpm: >->i915->runtime_pm); | 
|---|
| 114 | guc->interrupts.enabled = false; | 
|---|
| 115 |  | 
|---|
| 116 | spin_lock_irq(lock: gt->irq_lock); | 
|---|
| 117 |  | 
|---|
| 118 | gen6_gt_pm_disable_irq(gt, disable_mask: gt->pm_guc_events); | 
|---|
| 119 |  | 
|---|
| 120 | spin_unlock_irq(lock: gt->irq_lock); | 
|---|
| 121 | intel_synchronize_irq(i915: gt->i915); | 
|---|
| 122 |  | 
|---|
| 123 | gen9_reset_guc_interrupts(guc); | 
|---|
| 124 | } | 
|---|
| 125 |  | 
|---|
| 126 | static bool __gen11_reset_guc_interrupts(struct intel_gt *gt) | 
|---|
| 127 | { | 
|---|
| 128 | u32 irq = gt->type == GT_MEDIA ? MTL_MGUC : GEN11_GUC; | 
|---|
| 129 |  | 
|---|
| 130 | lockdep_assert_held(gt->irq_lock); | 
|---|
| 131 | return gen11_gt_reset_one_iir(gt, bank: 0, bit: irq); | 
|---|
| 132 | } | 
|---|
| 133 |  | 
|---|
| 134 | static void gen11_reset_guc_interrupts(struct intel_guc *guc) | 
|---|
| 135 | { | 
|---|
| 136 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 137 |  | 
|---|
| 138 | spin_lock_irq(lock: gt->irq_lock); | 
|---|
| 139 | __gen11_reset_guc_interrupts(gt); | 
|---|
| 140 | spin_unlock_irq(lock: gt->irq_lock); | 
|---|
| 141 | } | 
|---|
| 142 |  | 
|---|
| 143 | static void gen11_enable_guc_interrupts(struct intel_guc *guc) | 
|---|
| 144 | { | 
|---|
| 145 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 146 |  | 
|---|
| 147 | spin_lock_irq(lock: gt->irq_lock); | 
|---|
| 148 | __gen11_reset_guc_interrupts(gt); | 
|---|
| 149 | spin_unlock_irq(lock: gt->irq_lock); | 
|---|
| 150 |  | 
|---|
| 151 | guc->interrupts.enabled = true; | 
|---|
| 152 | } | 
|---|
| 153 |  | 
|---|
| 154 | static void gen11_disable_guc_interrupts(struct intel_guc *guc) | 
|---|
| 155 | { | 
|---|
| 156 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 157 |  | 
|---|
| 158 | guc->interrupts.enabled = false; | 
|---|
| 159 | intel_synchronize_irq(i915: gt->i915); | 
|---|
| 160 |  | 
|---|
| 161 | gen11_reset_guc_interrupts(guc); | 
|---|
| 162 | } | 
|---|
| 163 |  | 
|---|
| 164 | static void guc_dead_worker_func(struct work_struct *w) | 
|---|
| 165 | { | 
|---|
| 166 | struct intel_guc *guc = container_of(w, struct intel_guc, dead_guc_worker); | 
|---|
| 167 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 168 | unsigned long last = guc->last_dead_guc_jiffies; | 
|---|
| 169 | unsigned long delta = jiffies_to_msecs(j: jiffies - last); | 
|---|
| 170 |  | 
|---|
| 171 | if (delta < 500) { | 
|---|
| 172 | intel_gt_set_wedged(gt); | 
|---|
| 173 | } else { | 
|---|
| 174 | intel_gt_handle_error(gt, ALL_ENGINES, I915_ERROR_CAPTURE, fmt: "dead GuC"); | 
|---|
| 175 | guc->last_dead_guc_jiffies = jiffies; | 
|---|
| 176 | } | 
|---|
| 177 | } | 
|---|
| 178 |  | 
|---|
| 179 | void intel_guc_init_early(struct intel_guc *guc) | 
|---|
| 180 | { | 
|---|
| 181 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 182 | struct drm_i915_private *i915 = gt->i915; | 
|---|
| 183 |  | 
|---|
| 184 | intel_uc_fw_init_early(uc_fw: &guc->fw, type: INTEL_UC_FW_TYPE_GUC, needs_ggtt_mapping: true); | 
|---|
| 185 | intel_guc_ct_init_early(ct: &guc->ct); | 
|---|
| 186 | intel_guc_log_init_early(log: &guc->log); | 
|---|
| 187 | intel_guc_submission_init_early(guc); | 
|---|
| 188 | intel_guc_slpc_init_early(slpc: &guc->slpc); | 
|---|
| 189 | intel_guc_rc_init_early(guc); | 
|---|
| 190 |  | 
|---|
| 191 | INIT_WORK(&guc->dead_guc_worker, guc_dead_worker_func); | 
|---|
| 192 |  | 
|---|
| 193 | mutex_init(&guc->send_mutex); | 
|---|
| 194 | spin_lock_init(&guc->irq_lock); | 
|---|
| 195 | if (GRAPHICS_VER(i915) >= 11) { | 
|---|
| 196 | guc->interrupts.reset = gen11_reset_guc_interrupts; | 
|---|
| 197 | guc->interrupts.enable = gen11_enable_guc_interrupts; | 
|---|
| 198 | guc->interrupts.disable = gen11_disable_guc_interrupts; | 
|---|
| 199 | if (gt->type == GT_MEDIA) { | 
|---|
| 200 | guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT; | 
|---|
| 201 | guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0)); | 
|---|
| 202 | } else { | 
|---|
| 203 | guc->notify_reg = GEN11_GUC_HOST_INTERRUPT; | 
|---|
| 204 | guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; | 
|---|
| 208 |  | 
|---|
| 209 | } else { | 
|---|
| 210 | guc->notify_reg = GUC_SEND_INTERRUPT; | 
|---|
| 211 | guc->interrupts.reset = gen9_reset_guc_interrupts; | 
|---|
| 212 | guc->interrupts.enable = gen9_enable_guc_interrupts; | 
|---|
| 213 | guc->interrupts.disable = gen9_disable_guc_interrupts; | 
|---|
| 214 | guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); | 
|---|
| 215 | guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; | 
|---|
| 216 | BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); | 
|---|
| 217 | } | 
|---|
| 218 |  | 
|---|
| 219 | intel_guc_enable_msg(guc, mask: INTEL_GUC_RECV_MSG_EXCEPTION | | 
|---|
| 220 | INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); | 
|---|
| 221 | } | 
|---|
| 222 |  | 
|---|
| 223 | void intel_guc_init_late(struct intel_guc *guc) | 
|---|
| 224 | { | 
|---|
| 225 | intel_guc_ads_init_late(guc); | 
|---|
| 226 | } | 
|---|
| 227 |  | 
|---|
| 228 | static u32 guc_ctl_debug_flags(struct intel_guc *guc) | 
|---|
| 229 | { | 
|---|
| 230 | u32 level = intel_guc_log_get_level(log: &guc->log); | 
|---|
| 231 | u32 flags = 0; | 
|---|
| 232 |  | 
|---|
| 233 | if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) | 
|---|
| 234 | flags |= GUC_LOG_DISABLED; | 
|---|
| 235 | else | 
|---|
| 236 | flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << | 
|---|
| 237 | GUC_LOG_VERBOSITY_SHIFT; | 
|---|
| 238 |  | 
|---|
| 239 | return flags; | 
|---|
| 240 | } | 
|---|
| 241 |  | 
|---|
| 242 | static u32 guc_ctl_feature_flags(struct intel_guc *guc) | 
|---|
| 243 | { | 
|---|
| 244 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 245 | u32 flags = 0; | 
|---|
| 246 |  | 
|---|
| 247 | /* | 
|---|
| 248 | * Enable PXP GuC autoteardown flow. | 
|---|
| 249 | * NB: MTL does things differently. | 
|---|
| 250 | */ | 
|---|
| 251 | if (HAS_PXP(gt->i915) && !IS_METEORLAKE(gt->i915)) | 
|---|
| 252 | flags |= GUC_CTL_ENABLE_GUC_PXP_CTL; | 
|---|
| 253 |  | 
|---|
| 254 | if (!intel_guc_submission_is_used(guc)) | 
|---|
| 255 | flags |= GUC_CTL_DISABLE_SCHEDULER; | 
|---|
| 256 |  | 
|---|
| 257 | if (intel_guc_slpc_is_used(guc)) | 
|---|
| 258 | flags |= GUC_CTL_ENABLE_SLPC; | 
|---|
| 259 |  | 
|---|
| 260 | return flags; | 
|---|
| 261 | } | 
|---|
| 262 |  | 
|---|
| 263 | static u32 guc_ctl_log_params_flags(struct intel_guc *guc) | 
|---|
| 264 | { | 
|---|
| 265 | struct intel_guc_log *log = &guc->log; | 
|---|
| 266 | u32 offset, flags; | 
|---|
| 267 |  | 
|---|
| 268 | GEM_BUG_ON(!log->sizes_initialised); | 
|---|
| 269 |  | 
|---|
| 270 | offset = intel_guc_ggtt_offset(guc, vma: log->vma) >> PAGE_SHIFT; | 
|---|
| 271 |  | 
|---|
| 272 | flags = GUC_LOG_VALID | | 
|---|
| 273 | GUC_LOG_NOTIFY_ON_HALF_FULL | | 
|---|
| 274 | log->sizes[GUC_LOG_SECTIONS_DEBUG].flag | | 
|---|
| 275 | log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag | | 
|---|
| 276 | (log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) | | 
|---|
| 277 | (log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) | | 
|---|
| 278 | (log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) | | 
|---|
| 279 | (offset << GUC_LOG_BUF_ADDR_SHIFT); | 
|---|
| 280 |  | 
|---|
| 281 | return flags; | 
|---|
| 282 | } | 
|---|
| 283 |  | 
|---|
| 284 | static u32 guc_ctl_ads_flags(struct intel_guc *guc) | 
|---|
| 285 | { | 
|---|
| 286 | u32 ads = intel_guc_ggtt_offset(guc, vma: guc->ads_vma) >> PAGE_SHIFT; | 
|---|
| 287 | u32 flags = ads << GUC_ADS_ADDR_SHIFT; | 
|---|
| 288 |  | 
|---|
| 289 | return flags; | 
|---|
| 290 | } | 
|---|
| 291 |  | 
|---|
| 292 | static u32 guc_ctl_wa_flags(struct intel_guc *guc) | 
|---|
| 293 | { | 
|---|
| 294 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 295 | u32 flags = 0; | 
|---|
| 296 |  | 
|---|
| 297 | /* Wa_22012773006:gen11,gen12 < XeHP */ | 
|---|
| 298 | if (GRAPHICS_VER(gt->i915) >= 11 && | 
|---|
| 299 | GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55)) | 
|---|
| 300 | flags |= GUC_WA_POLLCS; | 
|---|
| 301 |  | 
|---|
| 302 | /* Wa_14014475959 */ | 
|---|
| 303 | if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || | 
|---|
| 304 | IS_DG2(gt->i915)) | 
|---|
| 305 | flags |= GUC_WA_HOLD_CCS_SWITCHOUT; | 
|---|
| 306 |  | 
|---|
| 307 | /* Wa_16019325821 */ | 
|---|
| 308 | /* Wa_14019159160 */ | 
|---|
| 309 | if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) | 
|---|
| 310 | flags |= GUC_WA_RCS_CCS_SWITCHOUT; | 
|---|
| 311 |  | 
|---|
| 312 | /* | 
|---|
| 313 | * Wa_14012197797 | 
|---|
| 314 | * Wa_22011391025 | 
|---|
| 315 | * | 
|---|
| 316 | * The same WA bit is used for both and 22011391025 is applicable to | 
|---|
| 317 | * all DG2. | 
|---|
| 318 | * | 
|---|
| 319 | * Platforms post DG2 prevent this issue in hardware by stalling | 
|---|
| 320 | * submissions. With this flag GuC will schedule as to avoid such | 
|---|
| 321 | * stalls. | 
|---|
| 322 | */ | 
|---|
| 323 | if (IS_DG2(gt->i915) || | 
|---|
| 324 | (CCS_MASK(gt) && GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))) | 
|---|
| 325 | flags |= GUC_WA_DUAL_QUEUE; | 
|---|
| 326 |  | 
|---|
| 327 | /* Wa_22011802037: graphics version 11/12 */ | 
|---|
| 328 | if (intel_engine_reset_needs_wa_22011802037(gt)) | 
|---|
| 329 | flags |= GUC_WA_PRE_PARSER; | 
|---|
| 330 |  | 
|---|
| 331 | /* | 
|---|
| 332 | * Wa_22012727170 | 
|---|
| 333 | * Wa_22012727685 | 
|---|
| 334 | */ | 
|---|
| 335 | if (IS_DG2_G11(gt->i915)) | 
|---|
| 336 | flags |= GUC_WA_CONTEXT_ISOLATION; | 
|---|
| 337 |  | 
|---|
| 338 | /* | 
|---|
| 339 | * Wa_14018913170: Applicable to all platforms supported by i915 so | 
|---|
| 340 | * don't bother testing for all X/Y/Z platforms explicitly. | 
|---|
| 341 | */ | 
|---|
| 342 | if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) | 
|---|
| 343 | flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6; | 
|---|
| 344 |  | 
|---|
| 345 | return flags; | 
|---|
| 346 | } | 
|---|
| 347 |  | 
|---|
| 348 | static u32 guc_ctl_devid(struct intel_guc *guc) | 
|---|
| 349 | { | 
|---|
| 350 | struct drm_i915_private *i915 = guc_to_i915(guc); | 
|---|
| 351 |  | 
|---|
| 352 | return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915); | 
|---|
| 353 | } | 
|---|
| 354 |  | 
|---|
| 355 | /* | 
|---|
| 356 | * Initialise the GuC parameter block before starting the firmware | 
|---|
| 357 | * transfer. These parameters are read by the firmware on startup | 
|---|
| 358 | * and cannot be changed thereafter. | 
|---|
| 359 | */ | 
|---|
| 360 | static void guc_init_params(struct intel_guc *guc) | 
|---|
| 361 | { | 
|---|
| 362 | u32 *params = guc->params; | 
|---|
| 363 | int i; | 
|---|
| 364 |  | 
|---|
| 365 | BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); | 
|---|
| 366 |  | 
|---|
| 367 | params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); | 
|---|
| 368 | params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); | 
|---|
| 369 | params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); | 
|---|
| 370 | params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); | 
|---|
| 371 | params[GUC_CTL_WA] = guc_ctl_wa_flags(guc); | 
|---|
| 372 | params[GUC_CTL_DEVID] = guc_ctl_devid(guc); | 
|---|
| 373 |  | 
|---|
| 374 | for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) | 
|---|
| 375 | guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]); | 
|---|
| 376 | } | 
|---|
| 377 |  | 
|---|
| 378 | /* | 
|---|
| 379 | * Initialise the GuC parameter block before starting the firmware | 
|---|
| 380 | * transfer. These parameters are read by the firmware on startup | 
|---|
| 381 | * and cannot be changed thereafter. | 
|---|
| 382 | */ | 
|---|
| 383 | void intel_guc_write_params(struct intel_guc *guc) | 
|---|
| 384 | { | 
|---|
| 385 | struct intel_uncore *uncore = guc_to_gt(guc)->uncore; | 
|---|
| 386 | int i; | 
|---|
| 387 |  | 
|---|
| 388 | /* | 
|---|
| 389 | * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and | 
|---|
| 390 | * they are power context saved so it's ok to release forcewake | 
|---|
| 391 | * when we are done here and take it again at xfer time. | 
|---|
| 392 | */ | 
|---|
| 393 | intel_uncore_forcewake_get(uncore, domains: FORCEWAKE_GT); | 
|---|
| 394 |  | 
|---|
| 395 | intel_uncore_write(uncore, SOFT_SCRATCH(0), val: 0); | 
|---|
| 396 |  | 
|---|
| 397 | for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) | 
|---|
| 398 | intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), val: guc->params[i]); | 
|---|
| 399 |  | 
|---|
| 400 | intel_uncore_forcewake_put(uncore, domains: FORCEWAKE_GT); | 
|---|
| 401 | } | 
|---|
| 402 |  | 
|---|
| 403 | void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p) | 
|---|
| 404 | { | 
|---|
| 405 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 406 | intel_wakeref_t wakeref; | 
|---|
| 407 | u32 stamp = 0; | 
|---|
| 408 | u64 ktime; | 
|---|
| 409 |  | 
|---|
| 410 | with_intel_runtime_pm(>->i915->runtime_pm, wakeref) | 
|---|
| 411 | stamp = intel_uncore_read(uncore: gt->uncore, GUCPMTIMESTAMP); | 
|---|
| 412 | ktime = ktime_get_boottime_ns(); | 
|---|
| 413 |  | 
|---|
| 414 | drm_printf(p, f: "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime); | 
|---|
| 415 | drm_printf(p, f: "GuC timestamp: 0x%08X [%u]\n", stamp, stamp); | 
|---|
| 416 | drm_printf(p, f: "CS timestamp frequency: %u Hz, %u ns\n", | 
|---|
| 417 | gt->clock_frequency, gt->clock_period_ns); | 
|---|
| 418 | } | 
|---|
| 419 |  | 
|---|
| 420 | int intel_guc_init(struct intel_guc *guc) | 
|---|
| 421 | { | 
|---|
| 422 | int ret; | 
|---|
| 423 |  | 
|---|
| 424 | ret = intel_uc_fw_init(uc_fw: &guc->fw); | 
|---|
| 425 | if (ret) | 
|---|
| 426 | goto out; | 
|---|
| 427 |  | 
|---|
| 428 | ret = intel_guc_log_create(log: &guc->log); | 
|---|
| 429 | if (ret) | 
|---|
| 430 | goto err_fw; | 
|---|
| 431 |  | 
|---|
| 432 | ret = intel_guc_capture_init(guc); | 
|---|
| 433 | if (ret) | 
|---|
| 434 | goto err_log; | 
|---|
| 435 |  | 
|---|
| 436 | ret = intel_guc_ads_create(guc); | 
|---|
| 437 | if (ret) | 
|---|
| 438 | goto err_capture; | 
|---|
| 439 |  | 
|---|
| 440 | GEM_BUG_ON(!guc->ads_vma); | 
|---|
| 441 |  | 
|---|
| 442 | ret = intel_guc_ct_init(ct: &guc->ct); | 
|---|
| 443 | if (ret) | 
|---|
| 444 | goto err_ads; | 
|---|
| 445 |  | 
|---|
| 446 | if (intel_guc_submission_is_used(guc)) { | 
|---|
| 447 | /* | 
|---|
| 448 | * This is stuff we need to have available at fw load time | 
|---|
| 449 | * if we are planning to enable submission later | 
|---|
| 450 | */ | 
|---|
| 451 | ret = intel_guc_submission_init(guc); | 
|---|
| 452 | if (ret) | 
|---|
| 453 | goto err_ct; | 
|---|
| 454 | } | 
|---|
| 455 |  | 
|---|
| 456 | if (intel_guc_slpc_is_used(guc)) { | 
|---|
| 457 | ret = intel_guc_slpc_init(slpc: &guc->slpc); | 
|---|
| 458 | if (ret) | 
|---|
| 459 | goto err_submission; | 
|---|
| 460 | } | 
|---|
| 461 |  | 
|---|
| 462 | /* now that everything is perma-pinned, initialize the parameters */ | 
|---|
| 463 | guc_init_params(guc); | 
|---|
| 464 |  | 
|---|
| 465 | intel_uc_fw_change_status(uc_fw: &guc->fw, status: INTEL_UC_FIRMWARE_LOADABLE); | 
|---|
| 466 |  | 
|---|
| 467 | return 0; | 
|---|
| 468 |  | 
|---|
| 469 | err_submission: | 
|---|
| 470 | intel_guc_submission_fini(guc); | 
|---|
| 471 | err_ct: | 
|---|
| 472 | intel_guc_ct_fini(ct: &guc->ct); | 
|---|
| 473 | err_ads: | 
|---|
| 474 | intel_guc_ads_destroy(guc); | 
|---|
| 475 | err_capture: | 
|---|
| 476 | intel_guc_capture_destroy(guc); | 
|---|
| 477 | err_log: | 
|---|
| 478 | intel_guc_log_destroy(log: &guc->log); | 
|---|
| 479 | err_fw: | 
|---|
| 480 | intel_uc_fw_fini(uc_fw: &guc->fw); | 
|---|
| 481 | out: | 
|---|
| 482 | intel_uc_fw_change_status(uc_fw: &guc->fw, status: INTEL_UC_FIRMWARE_INIT_FAIL); | 
|---|
| 483 | guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret)); | 
|---|
| 484 | return ret; | 
|---|
| 485 | } | 
|---|
| 486 |  | 
|---|
| 487 | void intel_guc_fini(struct intel_guc *guc) | 
|---|
| 488 | { | 
|---|
| 489 | if (!intel_uc_fw_is_loadable(uc_fw: &guc->fw)) | 
|---|
| 490 | return; | 
|---|
| 491 |  | 
|---|
| 492 | flush_work(work: &guc->dead_guc_worker); | 
|---|
| 493 |  | 
|---|
| 494 | if (intel_guc_slpc_is_used(guc)) | 
|---|
| 495 | intel_guc_slpc_fini(slpc: &guc->slpc); | 
|---|
| 496 |  | 
|---|
| 497 | if (intel_guc_submission_is_used(guc)) | 
|---|
| 498 | intel_guc_submission_fini(guc); | 
|---|
| 499 |  | 
|---|
| 500 | intel_guc_ct_fini(ct: &guc->ct); | 
|---|
| 501 |  | 
|---|
| 502 | intel_guc_ads_destroy(guc); | 
|---|
| 503 | intel_guc_capture_destroy(guc); | 
|---|
| 504 | intel_guc_log_destroy(log: &guc->log); | 
|---|
| 505 | intel_uc_fw_fini(uc_fw: &guc->fw); | 
|---|
| 506 | } | 
|---|
| 507 |  | 
|---|
| 508 | /* | 
|---|
| 509 | * This function implements the MMIO based host to GuC interface. | 
|---|
| 510 | */ | 
|---|
| 511 | int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, | 
|---|
| 512 | u32 *response_buf, u32 response_buf_size) | 
|---|
| 513 | { | 
|---|
| 514 | struct intel_uncore *uncore = guc_to_gt(guc)->uncore; | 
|---|
| 515 | u32 ; | 
|---|
| 516 | int i; | 
|---|
| 517 | int ret; | 
|---|
| 518 |  | 
|---|
| 519 | GEM_BUG_ON(!len); | 
|---|
| 520 | GEM_BUG_ON(len > guc->send_regs.count); | 
|---|
| 521 |  | 
|---|
| 522 | GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST); | 
|---|
| 523 | GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST); | 
|---|
| 524 |  | 
|---|
| 525 | mutex_lock(lock: &guc->send_mutex); | 
|---|
| 526 | intel_uncore_forcewake_get(uncore, domains: guc->send_regs.fw_domains); | 
|---|
| 527 |  | 
|---|
| 528 | retry: | 
|---|
| 529 | for (i = 0; i < len; i++) | 
|---|
| 530 | intel_uncore_write(uncore, reg: guc_send_reg(guc, i), val: request[i]); | 
|---|
| 531 |  | 
|---|
| 532 | intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); | 
|---|
| 533 |  | 
|---|
| 534 | intel_guc_notify(guc); | 
|---|
| 535 |  | 
|---|
| 536 | /* | 
|---|
| 537 | * No GuC command should ever take longer than 10ms. | 
|---|
| 538 | * Fast commands should still complete in 10us. | 
|---|
| 539 | */ | 
|---|
| 540 | ret = __intel_wait_for_register_fw(uncore, | 
|---|
| 541 | reg: guc_send_reg(guc, i: 0), | 
|---|
| 542 | GUC_HXG_MSG_0_ORIGIN, | 
|---|
| 543 | FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, | 
|---|
| 544 | GUC_HXG_ORIGIN_GUC), | 
|---|
| 545 | fast_timeout_us: 10, slow_timeout_ms: 10, out_value: &header); | 
|---|
| 546 | if (unlikely(ret)) { | 
|---|
| 547 | timeout: | 
|---|
| 548 | guc_err(guc, "mmio request %#x: no reply %x\n", | 
|---|
| 549 | request[0], header); | 
|---|
| 550 | goto out; | 
|---|
| 551 | } | 
|---|
| 552 |  | 
|---|
| 553 | if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) { | 
|---|
| 554 | #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \ | 
|---|
| 555 | FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \ | 
|---|
| 556 | FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; }) | 
|---|
| 557 |  | 
|---|
| 558 | ret = wait_for(done, 1000); | 
|---|
| 559 | if (unlikely(ret)) | 
|---|
| 560 | goto timeout; | 
|---|
| 561 | if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != | 
|---|
| 562 | GUC_HXG_ORIGIN_GUC)) | 
|---|
| 563 | goto proto; | 
|---|
| 564 | #undef done | 
|---|
| 565 | } | 
|---|
| 566 |  | 
|---|
| 567 | if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { | 
|---|
| 568 | u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header); | 
|---|
| 569 |  | 
|---|
| 570 | guc_dbg(guc, "mmio request %#x: retrying, reason %u\n", | 
|---|
| 571 | request[0], reason); | 
|---|
| 572 | goto retry; | 
|---|
| 573 | } | 
|---|
| 574 |  | 
|---|
| 575 | if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) { | 
|---|
| 576 | u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header); | 
|---|
| 577 | u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header); | 
|---|
| 578 |  | 
|---|
| 579 | guc_err(guc, "mmio request %#x: failure %x/%u\n", | 
|---|
| 580 | request[0], error, hint); | 
|---|
| 581 | ret = -ENXIO; | 
|---|
| 582 | goto out; | 
|---|
| 583 | } | 
|---|
| 584 |  | 
|---|
| 585 | if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) { | 
|---|
| 586 | proto: | 
|---|
| 587 | guc_err(guc, "mmio request %#x: unexpected reply %#x\n", | 
|---|
| 588 | request[0], header); | 
|---|
| 589 | ret = -EPROTO; | 
|---|
| 590 | goto out; | 
|---|
| 591 | } | 
|---|
| 592 |  | 
|---|
| 593 | if (response_buf) { | 
|---|
| 594 | int count = min(response_buf_size, guc->send_regs.count); | 
|---|
| 595 |  | 
|---|
| 596 | GEM_BUG_ON(!count); | 
|---|
| 597 |  | 
|---|
| 598 | response_buf[0] = header; | 
|---|
| 599 |  | 
|---|
| 600 | for (i = 1; i < count; i++) | 
|---|
| 601 | response_buf[i] = intel_uncore_read(uncore, | 
|---|
| 602 | reg: guc_send_reg(guc, i)); | 
|---|
| 603 |  | 
|---|
| 604 | /* Use number of copied dwords as our return value */ | 
|---|
| 605 | ret = count; | 
|---|
| 606 | } else { | 
|---|
| 607 | /* Use data from the GuC response as our return value */ | 
|---|
| 608 | ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header); | 
|---|
| 609 | } | 
|---|
| 610 |  | 
|---|
| 611 | out: | 
|---|
| 612 | intel_uncore_forcewake_put(uncore, domains: guc->send_regs.fw_domains); | 
|---|
| 613 | mutex_unlock(lock: &guc->send_mutex); | 
|---|
| 614 |  | 
|---|
| 615 | return ret; | 
|---|
| 616 | } | 
|---|
| 617 |  | 
|---|
| 618 | int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action) | 
|---|
| 619 | { | 
|---|
| 620 | if (action == INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED) | 
|---|
| 621 | guc_err(guc, "Crash dump notification\n"); | 
|---|
| 622 | else if (action == INTEL_GUC_ACTION_NOTIFY_EXCEPTION) | 
|---|
| 623 | guc_err(guc, "Exception notification\n"); | 
|---|
| 624 | else | 
|---|
| 625 | guc_err(guc, "Unknown crash notification: 0x%04X\n", action); | 
|---|
| 626 |  | 
|---|
| 627 | queue_work(wq: system_unbound_wq, work: &guc->dead_guc_worker); | 
|---|
| 628 |  | 
|---|
| 629 | return 0; | 
|---|
| 630 | } | 
|---|
| 631 |  | 
|---|
| 632 | int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, | 
|---|
| 633 | const u32 *payload, u32 len) | 
|---|
| 634 | { | 
|---|
| 635 | u32 msg; | 
|---|
| 636 |  | 
|---|
| 637 | if (unlikely(!len)) | 
|---|
| 638 | return -EPROTO; | 
|---|
| 639 |  | 
|---|
| 640 | /* Make sure to handle only enabled messages */ | 
|---|
| 641 | msg = payload[0] & guc->msg_enabled_mask; | 
|---|
| 642 |  | 
|---|
| 643 | if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED) | 
|---|
| 644 | guc_err(guc, "Received early crash dump notification!\n"); | 
|---|
| 645 | if (msg & INTEL_GUC_RECV_MSG_EXCEPTION) | 
|---|
| 646 | guc_err(guc, "Received early exception notification!\n"); | 
|---|
| 647 |  | 
|---|
| 648 | if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION)) | 
|---|
| 649 | queue_work(wq: system_unbound_wq, work: &guc->dead_guc_worker); | 
|---|
| 650 |  | 
|---|
| 651 | return 0; | 
|---|
| 652 | } | 
|---|
| 653 |  | 
|---|
| 654 | /** | 
|---|
| 655 | * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode | 
|---|
| 656 | * @guc: intel_guc structure | 
|---|
| 657 | * @rsa_offset: rsa offset w.r.t ggtt base of huc vma | 
|---|
| 658 | * | 
|---|
| 659 | * Triggers a HuC firmware authentication request to the GuC via intel_guc_send | 
|---|
| 660 | * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by | 
|---|
| 661 | * intel_huc_auth(). | 
|---|
| 662 | * | 
|---|
| 663 | * Return:	non-zero code on error | 
|---|
| 664 | */ | 
|---|
| 665 | int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) | 
|---|
| 666 | { | 
|---|
| 667 | u32 action[] = { | 
|---|
| 668 | INTEL_GUC_ACTION_AUTHENTICATE_HUC, | 
|---|
| 669 | rsa_offset | 
|---|
| 670 | }; | 
|---|
| 671 |  | 
|---|
| 672 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); | 
|---|
| 673 | } | 
|---|
| 674 |  | 
|---|
| 675 | /** | 
|---|
| 676 | * intel_guc_suspend() - notify GuC entering suspend state | 
|---|
| 677 | * @guc:	the guc | 
|---|
| 678 | */ | 
|---|
| 679 | int intel_guc_suspend(struct intel_guc *guc) | 
|---|
| 680 | { | 
|---|
| 681 | int ret; | 
|---|
| 682 | u32 action[] = { | 
|---|
| 683 | INTEL_GUC_ACTION_CLIENT_SOFT_RESET, | 
|---|
| 684 | }; | 
|---|
| 685 |  | 
|---|
| 686 | if (!intel_guc_is_ready(guc)) | 
|---|
| 687 | return 0; | 
|---|
| 688 |  | 
|---|
| 689 | if (intel_guc_submission_is_used(guc)) { | 
|---|
| 690 | flush_work(work: &guc->dead_guc_worker); | 
|---|
| 691 |  | 
|---|
| 692 | /* | 
|---|
| 693 | * This H2G MMIO command tears down the GuC in two steps. First it will | 
|---|
| 694 | * generate a G2H CTB for every active context indicating a reset. In | 
|---|
| 695 | * practice the i915 shouldn't ever get a G2H as suspend should only be | 
|---|
| 696 | * called when the GPU is idle. Next, it tears down the CTBs and this | 
|---|
| 697 | * H2G MMIO command completes. | 
|---|
| 698 | * | 
|---|
| 699 | * Don't abort on a failure code from the GuC. Keep going and do the | 
|---|
| 700 | * clean up in sanitize() and re-initialisation on resume and hopefully | 
|---|
| 701 | * the error here won't be problematic. | 
|---|
| 702 | */ | 
|---|
| 703 | ret = intel_guc_send_mmio(guc, request: action, ARRAY_SIZE(action), NULL, response_buf_size: 0); | 
|---|
| 704 | if (ret) | 
|---|
| 705 | guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n", | 
|---|
| 706 | ERR_PTR(ret)); | 
|---|
| 707 | } | 
|---|
| 708 |  | 
|---|
| 709 | /* Signal that the GuC isn't running. */ | 
|---|
| 710 | intel_guc_sanitize(guc); | 
|---|
| 711 |  | 
|---|
| 712 | return 0; | 
|---|
| 713 | } | 
|---|
| 714 |  | 
|---|
| 715 | /** | 
|---|
| 716 | * intel_guc_resume() - notify GuC resuming from suspend state | 
|---|
| 717 | * @guc:	the guc | 
|---|
| 718 | */ | 
|---|
| 719 | int intel_guc_resume(struct intel_guc *guc) | 
|---|
| 720 | { | 
|---|
| 721 | /* | 
|---|
| 722 | * NB: This function can still be called even if GuC submission is | 
|---|
| 723 | * disabled, e.g. if GuC is enabled for HuC authentication only. Thus, | 
|---|
| 724 | * if any code is later added here, it must be support doing nothing | 
|---|
| 725 | * if submission is disabled (as per intel_guc_suspend). | 
|---|
| 726 | */ | 
|---|
| 727 | return 0; | 
|---|
| 728 | } | 
|---|
| 729 |  | 
|---|
| 730 | /** | 
|---|
| 731 | * DOC: GuC Memory Management | 
|---|
| 732 | * | 
|---|
| 733 | * GuC can't allocate any memory for its own usage, so all the allocations must | 
|---|
| 734 | * be handled by the host driver. GuC accesses the memory via the GGTT, with the | 
|---|
| 735 | * exception of the top and bottom parts of the 4GB address space, which are | 
|---|
| 736 | * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM) | 
|---|
| 737 | * or other parts of the HW. The driver must take care not to place objects that | 
|---|
| 738 | * the GuC is going to access in these reserved ranges. The layout of the GuC | 
|---|
| 739 | * address space is shown below: | 
|---|
| 740 | * | 
|---|
| 741 | * :: | 
|---|
| 742 | * | 
|---|
| 743 | *     +===========> +====================+ <== FFFF_FFFF | 
|---|
| 744 | *     ^             |      Reserved      | | 
|---|
| 745 | *     |             +====================+ <== GUC_GGTT_TOP | 
|---|
| 746 | *     |             |                    | | 
|---|
| 747 | *     |             |        DRAM        | | 
|---|
| 748 | *    GuC            |                    | | 
|---|
| 749 | *  Address    +===> +====================+ <== GuC ggtt_pin_bias | 
|---|
| 750 | *   Space     ^     |                    | | 
|---|
| 751 | *     |       |     |                    | | 
|---|
| 752 | *     |      GuC    |        GuC         | | 
|---|
| 753 | *     |     WOPCM   |       WOPCM        | | 
|---|
| 754 | *     |      Size   |                    | | 
|---|
| 755 | *     |       |     |                    | | 
|---|
| 756 | *     v       v     |                    | | 
|---|
| 757 | *     +=======+===> +====================+ <== 0000_0000 | 
|---|
| 758 | * | 
|---|
| 759 | * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM | 
|---|
| 760 | * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped | 
|---|
| 761 | * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. | 
|---|
| 762 | */ | 
|---|
| 763 |  | 
|---|
| 764 | /** | 
|---|
| 765 | * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage | 
|---|
| 766 | * @guc:	the guc | 
|---|
| 767 | * @size:	size of area to allocate (both virtual space and memory) | 
|---|
| 768 | * | 
|---|
| 769 | * This is a wrapper to create an object for use with the GuC. In order to | 
|---|
| 770 | * use it inside the GuC, an object needs to be pinned lifetime, so we allocate | 
|---|
| 771 | * both some backing storage and a range inside the Global GTT. We must pin | 
|---|
| 772 | * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that | 
|---|
| 773 | * range is reserved inside GuC. | 
|---|
| 774 | * | 
|---|
| 775 | * Return:	A i915_vma if successful, otherwise an ERR_PTR. | 
|---|
| 776 | */ | 
|---|
| 777 | struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) | 
|---|
| 778 | { | 
|---|
| 779 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 780 | struct drm_i915_gem_object *obj; | 
|---|
| 781 | struct i915_vma *vma; | 
|---|
| 782 | u64 flags; | 
|---|
| 783 | int ret; | 
|---|
| 784 |  | 
|---|
| 785 | if (HAS_LMEM(gt->i915)) | 
|---|
| 786 | obj = i915_gem_object_create_lmem(i915: gt->i915, size, | 
|---|
| 787 | I915_BO_ALLOC_CPU_CLEAR | | 
|---|
| 788 | I915_BO_ALLOC_CONTIGUOUS | | 
|---|
| 789 | I915_BO_ALLOC_PM_EARLY); | 
|---|
| 790 | else | 
|---|
| 791 | obj = i915_gem_object_create_shmem(i915: gt->i915, size); | 
|---|
| 792 |  | 
|---|
| 793 | if (IS_ERR(ptr: obj)) | 
|---|
| 794 | return ERR_CAST(ptr: obj); | 
|---|
| 795 |  | 
|---|
| 796 | /* | 
|---|
| 797 | * Wa_22016122933: For Media version 13.0, all Media GT shared | 
|---|
| 798 | * memory needs to be mapped as WC on CPU side and UC (PAT | 
|---|
| 799 | * index 2) on GPU side. | 
|---|
| 800 | */ | 
|---|
| 801 | if (intel_gt_needs_wa_22016122933(gt)) | 
|---|
| 802 | i915_gem_object_set_cache_coherency(obj, cache_level: I915_CACHE_NONE); | 
|---|
| 803 |  | 
|---|
| 804 | vma = i915_vma_instance(obj, vm: >->ggtt->vm, NULL); | 
|---|
| 805 | if (IS_ERR(ptr: vma)) | 
|---|
| 806 | goto err; | 
|---|
| 807 |  | 
|---|
| 808 | flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); | 
|---|
| 809 | ret = i915_ggtt_pin(vma, NULL, align: 0, flags); | 
|---|
| 810 | if (ret) { | 
|---|
| 811 | vma = ERR_PTR(error: ret); | 
|---|
| 812 | goto err; | 
|---|
| 813 | } | 
|---|
| 814 |  | 
|---|
| 815 | return i915_vma_make_unshrinkable(vma); | 
|---|
| 816 |  | 
|---|
| 817 | err: | 
|---|
| 818 | i915_gem_object_put(obj); | 
|---|
| 819 | return vma; | 
|---|
| 820 | } | 
|---|
| 821 |  | 
|---|
| 822 | /** | 
|---|
| 823 | * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage | 
|---|
| 824 | * @guc:	the guc | 
|---|
| 825 | * @size:	size of area to allocate (both virtual space and memory) | 
|---|
| 826 | * @out_vma:	return variable for the allocated vma pointer | 
|---|
| 827 | * @out_vaddr:	return variable for the obj mapping | 
|---|
| 828 | * | 
|---|
| 829 | * This wrapper calls intel_guc_allocate_vma() and then maps the allocated | 
|---|
| 830 | * object with I915_MAP_WB. | 
|---|
| 831 | * | 
|---|
| 832 | * Return:	0 if successful, a negative errno code otherwise. | 
|---|
| 833 | */ | 
|---|
| 834 | int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, | 
|---|
| 835 | struct i915_vma **out_vma, void **out_vaddr) | 
|---|
| 836 | { | 
|---|
| 837 | struct i915_vma *vma; | 
|---|
| 838 | void *vaddr; | 
|---|
| 839 |  | 
|---|
| 840 | vma = intel_guc_allocate_vma(guc, size); | 
|---|
| 841 | if (IS_ERR(ptr: vma)) | 
|---|
| 842 | return PTR_ERR(ptr: vma); | 
|---|
| 843 |  | 
|---|
| 844 | vaddr = i915_gem_object_pin_map_unlocked(obj: vma->obj, | 
|---|
| 845 | type: intel_gt_coherent_map_type(gt: guc_to_gt(guc), | 
|---|
| 846 | obj: vma->obj, always_coherent: true)); | 
|---|
| 847 | if (IS_ERR(ptr: vaddr)) { | 
|---|
| 848 | i915_vma_unpin_and_release(p_vma: &vma, flags: 0); | 
|---|
| 849 | return PTR_ERR(ptr: vaddr); | 
|---|
| 850 | } | 
|---|
| 851 |  | 
|---|
| 852 | *out_vma = vma; | 
|---|
| 853 | *out_vaddr = vaddr; | 
|---|
| 854 |  | 
|---|
| 855 | return 0; | 
|---|
| 856 | } | 
|---|
| 857 |  | 
|---|
| 858 | static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value) | 
|---|
| 859 | { | 
|---|
| 860 | u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = { | 
|---|
| 861 | FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | | 
|---|
| 862 | FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | | 
|---|
| 863 | FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG), | 
|---|
| 864 | FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) | | 
|---|
| 865 | FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len), | 
|---|
| 866 | FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)), | 
|---|
| 867 | FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)), | 
|---|
| 868 | }; | 
|---|
| 869 | int ret; | 
|---|
| 870 |  | 
|---|
| 871 | GEM_BUG_ON(len > 2); | 
|---|
| 872 | GEM_BUG_ON(len == 1 && upper_32_bits(value)); | 
|---|
| 873 |  | 
|---|
| 874 | /* Self config must go over MMIO */ | 
|---|
| 875 | ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, response_buf_size: 0); | 
|---|
| 876 |  | 
|---|
| 877 | if (unlikely(ret < 0)) | 
|---|
| 878 | return ret; | 
|---|
| 879 | if (unlikely(ret > 1)) | 
|---|
| 880 | return -EPROTO; | 
|---|
| 881 | if (unlikely(!ret)) | 
|---|
| 882 | return -ENOKEY; | 
|---|
| 883 |  | 
|---|
| 884 | return 0; | 
|---|
| 885 | } | 
|---|
| 886 |  | 
|---|
| 887 | static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value) | 
|---|
| 888 | { | 
|---|
| 889 | int err = __guc_action_self_cfg(guc, key, len, value); | 
|---|
| 890 |  | 
|---|
| 891 | if (unlikely(err)) | 
|---|
| 892 | guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n", | 
|---|
| 893 | ERR_PTR(err), key, value); | 
|---|
| 894 | return err; | 
|---|
| 895 | } | 
|---|
| 896 |  | 
|---|
| 897 | int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value) | 
|---|
| 898 | { | 
|---|
| 899 | return __guc_self_cfg(guc, key, len: 1, value); | 
|---|
| 900 | } | 
|---|
| 901 |  | 
|---|
| 902 | int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value) | 
|---|
| 903 | { | 
|---|
| 904 | return __guc_self_cfg(guc, key, len: 2, value); | 
|---|
| 905 | } | 
|---|
| 906 |  | 
|---|
| 907 | /** | 
|---|
| 908 | * intel_guc_load_status - dump information about GuC load status | 
|---|
| 909 | * @guc: the GuC | 
|---|
| 910 | * @p: the &drm_printer | 
|---|
| 911 | * | 
|---|
| 912 | * Pretty printer for GuC load status. | 
|---|
| 913 | */ | 
|---|
| 914 | void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p) | 
|---|
| 915 | { | 
|---|
| 916 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 917 | struct intel_uncore *uncore = gt->uncore; | 
|---|
| 918 | intel_wakeref_t wakeref; | 
|---|
| 919 |  | 
|---|
| 920 | if (!intel_guc_is_supported(guc)) { | 
|---|
| 921 | drm_printf(p, f: "GuC not supported\n"); | 
|---|
| 922 | return; | 
|---|
| 923 | } | 
|---|
| 924 |  | 
|---|
| 925 | if (!intel_guc_is_wanted(guc)) { | 
|---|
| 926 | drm_printf(p, f: "GuC disabled\n"); | 
|---|
| 927 | return; | 
|---|
| 928 | } | 
|---|
| 929 |  | 
|---|
| 930 | intel_uc_fw_dump(uc_fw: &guc->fw, p); | 
|---|
| 931 |  | 
|---|
| 932 | with_intel_runtime_pm(uncore->rpm, wakeref) { | 
|---|
| 933 | u32 status = intel_uncore_read(uncore, GUC_STATUS); | 
|---|
| 934 | u32 i; | 
|---|
| 935 |  | 
|---|
| 936 | drm_printf(p, f: "GuC status 0x%08x:\n", status); | 
|---|
| 937 | drm_printf(p, f: "\tBootrom status = 0x%x\n", | 
|---|
| 938 | (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); | 
|---|
| 939 | drm_printf(p, f: "\tuKernel status = 0x%x\n", | 
|---|
| 940 | (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); | 
|---|
| 941 | drm_printf(p, f: "\tMIA Core status = 0x%x\n", | 
|---|
| 942 | (status & GS_MIA_MASK) >> GS_MIA_SHIFT); | 
|---|
| 943 | drm_puts(p, str: "Scratch registers:\n"); | 
|---|
| 944 | for (i = 0; i < 16; i++) { | 
|---|
| 945 | drm_printf(p, f: "\t%2d: \t0x%x\n", | 
|---|
| 946 | i, intel_uncore_read(uncore, SOFT_SCRATCH(i))); | 
|---|
| 947 | } | 
|---|
| 948 | } | 
|---|
| 949 | } | 
|---|
| 950 |  | 
|---|
| 951 | void intel_guc_write_barrier(struct intel_guc *guc) | 
|---|
| 952 | { | 
|---|
| 953 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 954 |  | 
|---|
| 955 | if (i915_gem_object_is_lmem(obj: guc->ct.vma->obj)) { | 
|---|
| 956 | /* | 
|---|
| 957 | * Ensure intel_uncore_write_fw can be used rather than | 
|---|
| 958 | * intel_uncore_write. | 
|---|
| 959 | */ | 
|---|
| 960 | GEM_BUG_ON(guc->send_regs.fw_domains); | 
|---|
| 961 |  | 
|---|
| 962 | /* | 
|---|
| 963 | * This register is used by the i915 and GuC for MMIO based | 
|---|
| 964 | * communication. Once we are in this code CTBs are the only | 
|---|
| 965 | * method the i915 uses to communicate with the GuC so it is | 
|---|
| 966 | * safe to write to this register (a value of 0 is NOP for MMIO | 
|---|
| 967 | * communication). If we ever start mixing CTBs and MMIOs a new | 
|---|
| 968 | * register will have to be chosen. This function is also used | 
|---|
| 969 | * to enforce ordering of a work queue item write and an update | 
|---|
| 970 | * to the process descriptor. When a work queue is being used, | 
|---|
| 971 | * CTBs are also the only mechanism of communication. | 
|---|
| 972 | */ | 
|---|
| 973 | intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0); | 
|---|
| 974 | } else { | 
|---|
| 975 | /* wmb() sufficient for a barrier if in smem */ | 
|---|
| 976 | wmb(); | 
|---|
| 977 | } | 
|---|
| 978 | } | 
|---|
| 979 |  | 
|---|