| 1 | // SPDX-License-Identifier: MIT | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2016-2019 Intel Corporation | 
|---|
| 4 | */ | 
|---|
| 5 |  | 
|---|
| 6 | #include <linux/string_helpers.h> | 
|---|
| 7 |  | 
|---|
| 8 | #include "gt/intel_gt.h" | 
|---|
| 9 | #include "gt/intel_gt_print.h" | 
|---|
| 10 | #include "gt/intel_reset.h" | 
|---|
| 11 | #include "intel_gsc_fw.h" | 
|---|
| 12 | #include "intel_gsc_uc.h" | 
|---|
| 13 | #include "intel_guc.h" | 
|---|
| 14 | #include "intel_guc_ads.h" | 
|---|
| 15 | #include "intel_guc_print.h" | 
|---|
| 16 | #include "intel_guc_submission.h" | 
|---|
| 17 | #include "gt/intel_rps.h" | 
|---|
| 18 | #include "intel_uc.h" | 
|---|
| 19 |  | 
|---|
| 20 | #include "i915_drv.h" | 
|---|
| 21 | #include "i915_hwmon.h" | 
|---|
| 22 |  | 
|---|
| 23 | static const struct intel_uc_ops uc_ops_off; | 
|---|
| 24 | static const struct intel_uc_ops uc_ops_on; | 
|---|
| 25 |  | 
|---|
| 26 | static void uc_expand_default_options(struct intel_uc *uc) | 
|---|
| 27 | { | 
|---|
| 28 | struct drm_i915_private *i915 = uc_to_gt(uc)->i915; | 
|---|
| 29 |  | 
|---|
| 30 | if (i915->params.enable_guc != -1) | 
|---|
| 31 | return; | 
|---|
| 32 |  | 
|---|
| 33 | /* Don't enable GuC/HuC on pre-Gen12 */ | 
|---|
| 34 | if (GRAPHICS_VER(i915) < 12) { | 
|---|
| 35 | i915->params.enable_guc = 0; | 
|---|
| 36 | return; | 
|---|
| 37 | } | 
|---|
| 38 |  | 
|---|
| 39 | /* Don't enable GuC/HuC on older Gen12 platforms */ | 
|---|
| 40 | if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) { | 
|---|
| 41 | i915->params.enable_guc = 0; | 
|---|
| 42 | return; | 
|---|
| 43 | } | 
|---|
| 44 |  | 
|---|
| 45 | /* Intermediate platforms are HuC authentication only */ | 
|---|
| 46 | if (IS_ALDERLAKE_S(i915) && !IS_RAPTORLAKE_S(i915)) { | 
|---|
| 47 | i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; | 
|---|
| 48 | return; | 
|---|
| 49 | } | 
|---|
| 50 |  | 
|---|
| 51 | /* Default: enable HuC authentication and GuC submission */ | 
|---|
| 52 | i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION; | 
|---|
| 53 | } | 
|---|
| 54 |  | 
|---|
| 55 | /* Reset GuC providing us with fresh state for both GuC and HuC. | 
|---|
| 56 | */ | 
|---|
| 57 | static int __intel_uc_reset_hw(struct intel_uc *uc) | 
|---|
| 58 | { | 
|---|
| 59 | struct intel_gt *gt = uc_to_gt(uc); | 
|---|
| 60 | int ret; | 
|---|
| 61 | u32 guc_status; | 
|---|
| 62 |  | 
|---|
| 63 | ret = i915_inject_probe_error(gt->i915, -ENXIO); | 
|---|
| 64 | if (ret) | 
|---|
| 65 | return ret; | 
|---|
| 66 |  | 
|---|
| 67 | ret = intel_reset_guc(gt); | 
|---|
| 68 | if (ret) { | 
|---|
| 69 | gt_err(gt, "Failed to reset GuC, ret = %d\n", ret); | 
|---|
| 70 | return ret; | 
|---|
| 71 | } | 
|---|
| 72 |  | 
|---|
| 73 | guc_status = intel_uncore_read(uncore: gt->uncore, GUC_STATUS); | 
|---|
| 74 | gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET), | 
|---|
| 75 | "GuC status: 0x%x, MIA core expected to be in reset\n", | 
|---|
| 76 | guc_status); | 
|---|
| 77 |  | 
|---|
| 78 | return ret; | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | static void __confirm_options(struct intel_uc *uc) | 
|---|
| 82 | { | 
|---|
| 83 | struct intel_gt *gt = uc_to_gt(uc); | 
|---|
| 84 | struct drm_i915_private *i915 = gt->i915; | 
|---|
| 85 |  | 
|---|
| 86 | gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n", | 
|---|
| 87 | i915->params.enable_guc, | 
|---|
| 88 | str_yes_no(intel_uc_wants_guc(uc)), | 
|---|
| 89 | str_yes_no(intel_uc_wants_guc_submission(uc)), | 
|---|
| 90 | str_yes_no(intel_uc_wants_huc(uc)), | 
|---|
| 91 | str_yes_no(intel_uc_wants_guc_slpc(uc))); | 
|---|
| 92 |  | 
|---|
| 93 | if (i915->params.enable_guc == 0) { | 
|---|
| 94 | GEM_BUG_ON(intel_uc_wants_guc(uc)); | 
|---|
| 95 | GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); | 
|---|
| 96 | GEM_BUG_ON(intel_uc_wants_huc(uc)); | 
|---|
| 97 | GEM_BUG_ON(intel_uc_wants_guc_slpc(uc)); | 
|---|
| 98 | return; | 
|---|
| 99 | } | 
|---|
| 100 |  | 
|---|
| 101 | if (!intel_uc_supports_guc(uc)) | 
|---|
| 102 | gt_info(gt, "Incompatible option enable_guc=%d - %s\n", | 
|---|
| 103 | i915->params.enable_guc, "GuC is not supported!"); | 
|---|
| 104 |  | 
|---|
| 105 | if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && | 
|---|
| 106 | !intel_uc_supports_guc_submission(uc)) | 
|---|
| 107 | gt_info(gt, "Incompatible option enable_guc=%d - %s\n", | 
|---|
| 108 | i915->params.enable_guc, "GuC submission is N/A"); | 
|---|
| 109 |  | 
|---|
| 110 | if (i915->params.enable_guc & ~ENABLE_GUC_MASK) | 
|---|
| 111 | gt_info(gt, "Incompatible option enable_guc=%d - %s\n", | 
|---|
| 112 | i915->params.enable_guc, "undocumented flag"); | 
|---|
| 113 | } | 
|---|
| 114 |  | 
|---|
| 115 | void intel_uc_init_early(struct intel_uc *uc) | 
|---|
| 116 | { | 
|---|
| 117 | uc_expand_default_options(uc); | 
|---|
| 118 |  | 
|---|
| 119 | intel_guc_init_early(guc: &uc->guc); | 
|---|
| 120 | intel_huc_init_early(huc: &uc->huc); | 
|---|
| 121 | intel_gsc_uc_init_early(gsc: &uc->gsc); | 
|---|
| 122 |  | 
|---|
| 123 | __confirm_options(uc); | 
|---|
| 124 |  | 
|---|
| 125 | if (intel_uc_wants_guc(uc)) | 
|---|
| 126 | uc->ops = &uc_ops_on; | 
|---|
| 127 | else | 
|---|
| 128 | uc->ops = &uc_ops_off; | 
|---|
| 129 | } | 
|---|
| 130 |  | 
|---|
| 131 | void intel_uc_init_late(struct intel_uc *uc) | 
|---|
| 132 | { | 
|---|
| 133 | intel_guc_init_late(guc: &uc->guc); | 
|---|
| 134 | intel_gsc_uc_load_start(gsc: &uc->gsc); | 
|---|
| 135 | } | 
|---|
| 136 |  | 
|---|
| 137 | void intel_uc_driver_late_release(struct intel_uc *uc) | 
|---|
| 138 | { | 
|---|
| 139 | intel_huc_fini_late(huc: &uc->huc); | 
|---|
| 140 | } | 
|---|
| 141 |  | 
|---|
| 142 | /** | 
|---|
| 143 | * intel_uc_init_mmio - setup uC MMIO access | 
|---|
| 144 | * @uc: the intel_uc structure | 
|---|
| 145 | * | 
|---|
| 146 | * Setup minimal state necessary for MMIO accesses later in the | 
|---|
| 147 | * initialization sequence. | 
|---|
| 148 | */ | 
|---|
| 149 | void intel_uc_init_mmio(struct intel_uc *uc) | 
|---|
| 150 | { | 
|---|
| 151 | intel_guc_init_send_regs(guc: &uc->guc); | 
|---|
| 152 | } | 
|---|
| 153 |  | 
|---|
| 154 | static void __uc_capture_load_err_log(struct intel_uc *uc) | 
|---|
| 155 | { | 
|---|
| 156 | struct intel_guc *guc = &uc->guc; | 
|---|
| 157 |  | 
|---|
| 158 | if (guc->log.vma && !uc->load_err_log) | 
|---|
| 159 | uc->load_err_log = i915_gem_object_get(obj: guc->log.vma->obj); | 
|---|
| 160 | } | 
|---|
| 161 |  | 
|---|
| 162 | static void __uc_free_load_err_log(struct intel_uc *uc) | 
|---|
| 163 | { | 
|---|
| 164 | struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log); | 
|---|
| 165 |  | 
|---|
| 166 | if (log) | 
|---|
| 167 | i915_gem_object_put(obj: log); | 
|---|
| 168 | } | 
|---|
| 169 |  | 
|---|
| 170 | void intel_uc_driver_remove(struct intel_uc *uc) | 
|---|
| 171 | { | 
|---|
| 172 | intel_uc_fini_hw(uc); | 
|---|
| 173 | intel_uc_fini(uc); | 
|---|
| 174 | __uc_free_load_err_log(uc); | 
|---|
| 175 | } | 
|---|
| 176 |  | 
|---|
| 177 | /* | 
|---|
| 178 | * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 | 
|---|
| 179 | * register using the same bits used in the CT message payload. Since our | 
|---|
| 180 | * communication channel with guc is turned off at this point, we can save the | 
|---|
| 181 | * message and handle it after we turn it back on. | 
|---|
| 182 | */ | 
|---|
| 183 | static void guc_clear_mmio_msg(struct intel_guc *guc) | 
|---|
| 184 | { | 
|---|
| 185 | intel_uncore_write(uncore: guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), val: 0); | 
|---|
| 186 | } | 
|---|
| 187 |  | 
|---|
| 188 | static void guc_get_mmio_msg(struct intel_guc *guc) | 
|---|
| 189 | { | 
|---|
| 190 | u32 val; | 
|---|
| 191 |  | 
|---|
| 192 | spin_lock_irq(lock: &guc->irq_lock); | 
|---|
| 193 |  | 
|---|
| 194 | val = intel_uncore_read(uncore: guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); | 
|---|
| 195 | guc->mmio_msg |= val & guc->msg_enabled_mask; | 
|---|
| 196 |  | 
|---|
| 197 | /* | 
|---|
| 198 | * clear all events, including the ones we're not currently servicing, | 
|---|
| 199 | * to make sure we don't try to process a stale message if we enable | 
|---|
| 200 | * handling of more events later. | 
|---|
| 201 | */ | 
|---|
| 202 | guc_clear_mmio_msg(guc); | 
|---|
| 203 |  | 
|---|
| 204 | spin_unlock_irq(lock: &guc->irq_lock); | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | static void guc_handle_mmio_msg(struct intel_guc *guc) | 
|---|
| 208 | { | 
|---|
| 209 | /* we need communication to be enabled to reply to GuC */ | 
|---|
| 210 | GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); | 
|---|
| 211 |  | 
|---|
| 212 | spin_lock_irq(lock: &guc->irq_lock); | 
|---|
| 213 | if (guc->mmio_msg) { | 
|---|
| 214 | intel_guc_to_host_process_recv_msg(guc, payload: &guc->mmio_msg, len: 1); | 
|---|
| 215 | guc->mmio_msg = 0; | 
|---|
| 216 | } | 
|---|
| 217 | spin_unlock_irq(lock: &guc->irq_lock); | 
|---|
| 218 | } | 
|---|
| 219 |  | 
|---|
| 220 | static int guc_enable_communication(struct intel_guc *guc) | 
|---|
| 221 | { | 
|---|
| 222 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 223 | struct drm_i915_private *i915 = gt->i915; | 
|---|
| 224 | int ret; | 
|---|
| 225 |  | 
|---|
| 226 | GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); | 
|---|
| 227 |  | 
|---|
| 228 | ret = i915_inject_probe_error(i915, -ENXIO); | 
|---|
| 229 | if (ret) | 
|---|
| 230 | return ret; | 
|---|
| 231 |  | 
|---|
| 232 | ret = intel_guc_ct_enable(ct: &guc->ct); | 
|---|
| 233 | if (ret) | 
|---|
| 234 | return ret; | 
|---|
| 235 |  | 
|---|
| 236 | /* check for mmio messages received before/during the CT enable */ | 
|---|
| 237 | guc_get_mmio_msg(guc); | 
|---|
| 238 | guc_handle_mmio_msg(guc); | 
|---|
| 239 |  | 
|---|
| 240 | intel_guc_enable_interrupts(guc); | 
|---|
| 241 |  | 
|---|
| 242 | /* check for CT messages received before we enabled interrupts */ | 
|---|
| 243 | spin_lock_irq(lock: gt->irq_lock); | 
|---|
| 244 | intel_guc_ct_event_handler(ct: &guc->ct); | 
|---|
| 245 | spin_unlock_irq(lock: gt->irq_lock); | 
|---|
| 246 |  | 
|---|
| 247 | guc_dbg(guc, "communication enabled\n"); | 
|---|
| 248 |  | 
|---|
| 249 | return 0; | 
|---|
| 250 | } | 
|---|
| 251 |  | 
|---|
| 252 | static void guc_disable_communication(struct intel_guc *guc) | 
|---|
| 253 | { | 
|---|
| 254 | /* | 
|---|
| 255 | * Events generated during or after CT disable are logged by guc in | 
|---|
| 256 | * via mmio. Make sure the register is clear before disabling CT since | 
|---|
| 257 | * all events we cared about have already been processed via CT. | 
|---|
| 258 | */ | 
|---|
| 259 | guc_clear_mmio_msg(guc); | 
|---|
| 260 |  | 
|---|
| 261 | intel_guc_disable_interrupts(guc); | 
|---|
| 262 |  | 
|---|
| 263 | intel_guc_ct_disable(ct: &guc->ct); | 
|---|
| 264 |  | 
|---|
| 265 | /* | 
|---|
| 266 | * Check for messages received during/after the CT disable. We do not | 
|---|
| 267 | * expect any messages to have arrived via CT between the interrupt | 
|---|
| 268 | * disable and the CT disable because GuC should've been idle until we | 
|---|
| 269 | * triggered the CT disable protocol. | 
|---|
| 270 | */ | 
|---|
| 271 | guc_get_mmio_msg(guc); | 
|---|
| 272 |  | 
|---|
| 273 | guc_dbg(guc, "communication disabled\n"); | 
|---|
| 274 | } | 
|---|
| 275 |  | 
|---|
| 276 | static void __uc_fetch_firmwares(struct intel_uc *uc) | 
|---|
| 277 | { | 
|---|
| 278 | struct intel_gt *gt = uc_to_gt(uc); | 
|---|
| 279 | int err; | 
|---|
| 280 |  | 
|---|
| 281 | GEM_BUG_ON(!intel_uc_wants_guc(uc)); | 
|---|
| 282 |  | 
|---|
| 283 | err = intel_uc_fw_fetch(uc_fw: &uc->guc.fw); | 
|---|
| 284 | if (err) { | 
|---|
| 285 | /* Make sure we transition out of transient "SELECTED" state */ | 
|---|
| 286 | if (intel_uc_wants_huc(uc)) { | 
|---|
| 287 | gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling HuC\n", ERR_PTR(err)); | 
|---|
| 288 | intel_uc_fw_change_status(uc_fw: &uc->huc.fw, | 
|---|
| 289 | status: INTEL_UC_FIRMWARE_ERROR); | 
|---|
| 290 | } | 
|---|
| 291 |  | 
|---|
| 292 | if (intel_uc_wants_gsc_uc(uc)) { | 
|---|
| 293 | gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling GSC\n", ERR_PTR(err)); | 
|---|
| 294 | intel_uc_fw_change_status(uc_fw: &uc->gsc.fw, | 
|---|
| 295 | status: INTEL_UC_FIRMWARE_ERROR); | 
|---|
| 296 | } | 
|---|
| 297 |  | 
|---|
| 298 | return; | 
|---|
| 299 | } | 
|---|
| 300 |  | 
|---|
| 301 | if (intel_uc_wants_huc(uc)) | 
|---|
| 302 | intel_uc_fw_fetch(uc_fw: &uc->huc.fw); | 
|---|
| 303 |  | 
|---|
| 304 | if (intel_uc_wants_gsc_uc(uc)) | 
|---|
| 305 | intel_uc_fw_fetch(uc_fw: &uc->gsc.fw); | 
|---|
| 306 | } | 
|---|
| 307 |  | 
|---|
| 308 | static void __uc_cleanup_firmwares(struct intel_uc *uc) | 
|---|
| 309 | { | 
|---|
| 310 | intel_uc_fw_cleanup_fetch(uc_fw: &uc->gsc.fw); | 
|---|
| 311 | intel_uc_fw_cleanup_fetch(uc_fw: &uc->huc.fw); | 
|---|
| 312 | intel_uc_fw_cleanup_fetch(uc_fw: &uc->guc.fw); | 
|---|
| 313 | } | 
|---|
| 314 |  | 
|---|
| 315 | static int __uc_init(struct intel_uc *uc) | 
|---|
| 316 | { | 
|---|
| 317 | struct intel_guc *guc = &uc->guc; | 
|---|
| 318 | struct intel_huc *huc = &uc->huc; | 
|---|
| 319 | int ret; | 
|---|
| 320 |  | 
|---|
| 321 | GEM_BUG_ON(!intel_uc_wants_guc(uc)); | 
|---|
| 322 |  | 
|---|
| 323 | if (!intel_uc_uses_guc(uc)) | 
|---|
| 324 | return 0; | 
|---|
| 325 |  | 
|---|
| 326 | if (i915_inject_probe_failure(uc_to_gt(uc)->i915)) | 
|---|
| 327 | return -ENOMEM; | 
|---|
| 328 |  | 
|---|
| 329 | ret = intel_guc_init(guc); | 
|---|
| 330 | if (ret) | 
|---|
| 331 | return ret; | 
|---|
| 332 |  | 
|---|
| 333 | if (intel_uc_uses_huc(uc)) | 
|---|
| 334 | intel_huc_init(huc); | 
|---|
| 335 |  | 
|---|
| 336 | if (intel_uc_uses_gsc_uc(uc)) | 
|---|
| 337 | intel_gsc_uc_init(gsc: &uc->gsc); | 
|---|
| 338 |  | 
|---|
| 339 | return 0; | 
|---|
| 340 | } | 
|---|
| 341 |  | 
|---|
| 342 | static void __uc_fini(struct intel_uc *uc) | 
|---|
| 343 | { | 
|---|
| 344 | intel_gsc_uc_fini(gsc: &uc->gsc); | 
|---|
| 345 | intel_huc_fini(huc: &uc->huc); | 
|---|
| 346 | intel_guc_fini(guc: &uc->guc); | 
|---|
| 347 | } | 
|---|
| 348 |  | 
|---|
| 349 | static int __uc_sanitize(struct intel_uc *uc) | 
|---|
| 350 | { | 
|---|
| 351 | struct intel_guc *guc = &uc->guc; | 
|---|
| 352 | struct intel_huc *huc = &uc->huc; | 
|---|
| 353 |  | 
|---|
| 354 | GEM_BUG_ON(!intel_uc_supports_guc(uc)); | 
|---|
| 355 |  | 
|---|
| 356 | intel_huc_sanitize(huc); | 
|---|
| 357 | intel_guc_sanitize(guc); | 
|---|
| 358 |  | 
|---|
| 359 | return __intel_uc_reset_hw(uc); | 
|---|
| 360 | } | 
|---|
| 361 |  | 
|---|
| 362 | /* Initialize and verify the uC regs related to uC positioning in WOPCM */ | 
|---|
| 363 | static int uc_init_wopcm(struct intel_uc *uc) | 
|---|
| 364 | { | 
|---|
| 365 | struct intel_gt *gt = uc_to_gt(uc); | 
|---|
| 366 | struct intel_uncore *uncore = gt->uncore; | 
|---|
| 367 | u32 base = intel_wopcm_guc_base(wopcm: >->wopcm); | 
|---|
| 368 | u32 size = intel_wopcm_guc_size(wopcm: >->wopcm); | 
|---|
| 369 | u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; | 
|---|
| 370 | u32 mask; | 
|---|
| 371 | int err; | 
|---|
| 372 |  | 
|---|
| 373 | if (unlikely(!base || !size)) { | 
|---|
| 374 | gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n"); | 
|---|
| 375 | return -E2BIG; | 
|---|
| 376 | } | 
|---|
| 377 |  | 
|---|
| 378 | GEM_BUG_ON(!intel_uc_supports_guc(uc)); | 
|---|
| 379 | GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); | 
|---|
| 380 | GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); | 
|---|
| 381 | GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); | 
|---|
| 382 | GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); | 
|---|
| 383 |  | 
|---|
| 384 | err = i915_inject_probe_error(gt->i915, -ENXIO); | 
|---|
| 385 | if (err) | 
|---|
| 386 | return err; | 
|---|
| 387 |  | 
|---|
| 388 | mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; | 
|---|
| 389 | err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, val: size, mask, | 
|---|
| 390 | expected_val: size | GUC_WOPCM_SIZE_LOCKED); | 
|---|
| 391 | if (err) | 
|---|
| 392 | goto err_out; | 
|---|
| 393 |  | 
|---|
| 394 | mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; | 
|---|
| 395 | err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET, | 
|---|
| 396 | val: base | huc_agent, mask, | 
|---|
| 397 | expected_val: base | huc_agent | | 
|---|
| 398 | GUC_WOPCM_OFFSET_VALID); | 
|---|
| 399 | if (err) | 
|---|
| 400 | goto err_out; | 
|---|
| 401 |  | 
|---|
| 402 | return 0; | 
|---|
| 403 |  | 
|---|
| 404 | err_out: | 
|---|
| 405 | gt_probe_error(gt, "Failed to init uC WOPCM registers!\n"); | 
|---|
| 406 | gt_probe_error(gt, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", | 
|---|
| 407 | i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET), | 
|---|
| 408 | intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); | 
|---|
| 409 | gt_probe_error(gt, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", | 
|---|
| 410 | i915_mmio_reg_offset(GUC_WOPCM_SIZE), | 
|---|
| 411 | intel_uncore_read(uncore, GUC_WOPCM_SIZE)); | 
|---|
| 412 |  | 
|---|
| 413 | return err; | 
|---|
| 414 | } | 
|---|
| 415 |  | 
|---|
| 416 | static bool uc_is_wopcm_locked(struct intel_uc *uc) | 
|---|
| 417 | { | 
|---|
| 418 | struct intel_gt *gt = uc_to_gt(uc); | 
|---|
| 419 | struct intel_uncore *uncore = gt->uncore; | 
|---|
| 420 |  | 
|---|
| 421 | return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) || | 
|---|
| 422 | (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID); | 
|---|
| 423 | } | 
|---|
| 424 |  | 
|---|
| 425 | static int __uc_check_hw(struct intel_uc *uc) | 
|---|
| 426 | { | 
|---|
| 427 | if (uc->fw_table_invalid) | 
|---|
| 428 | return -EIO; | 
|---|
| 429 |  | 
|---|
| 430 | if (!intel_uc_supports_guc(uc)) | 
|---|
| 431 | return 0; | 
|---|
| 432 |  | 
|---|
| 433 | /* | 
|---|
| 434 | * We can silently continue without GuC only if it was never enabled | 
|---|
| 435 | * before on this system after reboot, otherwise we risk GPU hangs. | 
|---|
| 436 | * To check if GuC was loaded before we look at WOPCM registers. | 
|---|
| 437 | */ | 
|---|
| 438 | if (uc_is_wopcm_locked(uc)) | 
|---|
| 439 | return -EIO; | 
|---|
| 440 |  | 
|---|
| 441 | return 0; | 
|---|
| 442 | } | 
|---|
| 443 |  | 
|---|
| 444 | static void print_fw_ver(struct intel_gt *gt, struct intel_uc_fw *fw) | 
|---|
| 445 | { | 
|---|
| 446 | gt_info(gt, "%s firmware %s version %u.%u.%u\n", | 
|---|
| 447 | intel_uc_fw_type_repr(fw->type), fw->file_selected.path, | 
|---|
| 448 | fw->file_selected.ver.major, | 
|---|
| 449 | fw->file_selected.ver.minor, | 
|---|
| 450 | fw->file_selected.ver.patch); | 
|---|
| 451 | } | 
|---|
| 452 |  | 
|---|
| 453 | static int __uc_init_hw(struct intel_uc *uc) | 
|---|
| 454 | { | 
|---|
| 455 | struct intel_gt *gt = uc_to_gt(uc); | 
|---|
| 456 | struct drm_i915_private *i915 = gt->i915; | 
|---|
| 457 | struct intel_guc *guc = &uc->guc; | 
|---|
| 458 | struct intel_huc *huc = &uc->huc; | 
|---|
| 459 | int ret, attempts; | 
|---|
| 460 | bool pl1en = false; | 
|---|
| 461 |  | 
|---|
| 462 | GEM_BUG_ON(!intel_uc_supports_guc(uc)); | 
|---|
| 463 | GEM_BUG_ON(!intel_uc_wants_guc(uc)); | 
|---|
| 464 |  | 
|---|
| 465 | print_fw_ver(gt, fw: &guc->fw); | 
|---|
| 466 |  | 
|---|
| 467 | if (intel_uc_uses_huc(uc)) | 
|---|
| 468 | print_fw_ver(gt, fw: &huc->fw); | 
|---|
| 469 |  | 
|---|
| 470 | if (!intel_uc_fw_is_loadable(uc_fw: &guc->fw)) { | 
|---|
| 471 | ret = __uc_check_hw(uc) || | 
|---|
| 472 | intel_uc_fw_is_overridden(uc_fw: &guc->fw) || | 
|---|
| 473 | intel_uc_wants_guc_submission(uc) ? | 
|---|
| 474 | intel_uc_fw_status_to_error(status: guc->fw.status) : 0; | 
|---|
| 475 | goto err_out; | 
|---|
| 476 | } | 
|---|
| 477 |  | 
|---|
| 478 | ret = uc_init_wopcm(uc); | 
|---|
| 479 | if (ret) | 
|---|
| 480 | goto err_out; | 
|---|
| 481 |  | 
|---|
| 482 | intel_guc_reset_interrupts(guc); | 
|---|
| 483 |  | 
|---|
| 484 | /* WaEnableuKernelHeaderValidFix:skl */ | 
|---|
| 485 | /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ | 
|---|
| 486 | if (GRAPHICS_VER(i915) == 9) | 
|---|
| 487 | attempts = 3; | 
|---|
| 488 | else | 
|---|
| 489 | attempts = 1; | 
|---|
| 490 |  | 
|---|
| 491 | /* Disable a potentially low PL1 power limit to allow freq to be raised */ | 
|---|
| 492 | i915_hwmon_power_max_disable(i915: gt->i915, old: &pl1en); | 
|---|
| 493 |  | 
|---|
| 494 | intel_rps_raise_unslice(rps: &uc_to_gt(uc)->rps); | 
|---|
| 495 |  | 
|---|
| 496 | while (attempts--) { | 
|---|
| 497 | /* | 
|---|
| 498 | * Always reset the GuC just before (re)loading, so | 
|---|
| 499 | * that the state and timing are fairly predictable | 
|---|
| 500 | */ | 
|---|
| 501 | ret = __uc_sanitize(uc); | 
|---|
| 502 | if (ret) | 
|---|
| 503 | goto err_rps; | 
|---|
| 504 |  | 
|---|
| 505 | intel_huc_fw_upload(huc); | 
|---|
| 506 | intel_guc_ads_reset(guc); | 
|---|
| 507 | intel_guc_write_params(guc); | 
|---|
| 508 | ret = intel_guc_fw_upload(guc); | 
|---|
| 509 | if (ret == 0) | 
|---|
| 510 | break; | 
|---|
| 511 |  | 
|---|
| 512 | gt_dbg(gt, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n", | 
|---|
| 513 | ERR_PTR(ret), attempts); | 
|---|
| 514 | } | 
|---|
| 515 |  | 
|---|
| 516 | /* Did we succeed or run out of retries? */ | 
|---|
| 517 | if (ret) | 
|---|
| 518 | goto err_log_capture; | 
|---|
| 519 |  | 
|---|
| 520 | ret = guc_enable_communication(guc); | 
|---|
| 521 | if (ret) | 
|---|
| 522 | goto err_log_capture; | 
|---|
| 523 |  | 
|---|
| 524 | /* | 
|---|
| 525 | * GSC-loaded HuC is authenticated by the GSC, so we don't need to | 
|---|
| 526 | * trigger the auth here. However, given that the HuC loaded this way | 
|---|
| 527 | * survive GT reset, we still need to update our SW bookkeeping to make | 
|---|
| 528 | * sure it reflects the correct HW status. | 
|---|
| 529 | */ | 
|---|
| 530 | if (intel_huc_is_loaded_by_gsc(huc)) | 
|---|
| 531 | intel_huc_update_auth_status(huc); | 
|---|
| 532 | else | 
|---|
| 533 | intel_huc_auth(huc, type: INTEL_HUC_AUTH_BY_GUC); | 
|---|
| 534 |  | 
|---|
| 535 | if (intel_uc_uses_guc_submission(uc)) { | 
|---|
| 536 | ret = intel_guc_submission_enable(guc); | 
|---|
| 537 | if (ret) | 
|---|
| 538 | goto err_log_capture; | 
|---|
| 539 | } | 
|---|
| 540 |  | 
|---|
| 541 | if (intel_uc_uses_guc_slpc(uc)) { | 
|---|
| 542 | ret = intel_guc_slpc_enable(slpc: &guc->slpc); | 
|---|
| 543 | if (ret) | 
|---|
| 544 | goto err_submission; | 
|---|
| 545 | } else { | 
|---|
| 546 | /* Restore GT back to RPn for non-SLPC path */ | 
|---|
| 547 | intel_rps_lower_unslice(rps: &uc_to_gt(uc)->rps); | 
|---|
| 548 | } | 
|---|
| 549 |  | 
|---|
| 550 | i915_hwmon_power_max_restore(i915: gt->i915, old: pl1en); | 
|---|
| 551 |  | 
|---|
| 552 | guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc))); | 
|---|
| 553 | guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc))); | 
|---|
| 554 |  | 
|---|
| 555 | return 0; | 
|---|
| 556 |  | 
|---|
| 557 | /* | 
|---|
| 558 | * We've failed to load the firmware :( | 
|---|
| 559 | */ | 
|---|
| 560 | err_submission: | 
|---|
| 561 | intel_guc_submission_disable(guc); | 
|---|
| 562 | err_log_capture: | 
|---|
| 563 | __uc_capture_load_err_log(uc); | 
|---|
| 564 | err_rps: | 
|---|
| 565 | /* Return GT back to RPn */ | 
|---|
| 566 | intel_rps_lower_unslice(rps: &uc_to_gt(uc)->rps); | 
|---|
| 567 |  | 
|---|
| 568 | i915_hwmon_power_max_restore(i915: gt->i915, old: pl1en); | 
|---|
| 569 | err_out: | 
|---|
| 570 | __uc_sanitize(uc); | 
|---|
| 571 |  | 
|---|
| 572 | if (!ret) { | 
|---|
| 573 | gt_notice(gt, "GuC is uninitialized\n"); | 
|---|
| 574 | /* We want to run without GuC submission */ | 
|---|
| 575 | return 0; | 
|---|
| 576 | } | 
|---|
| 577 |  | 
|---|
| 578 | gt_probe_error(gt, "GuC initialization failed %pe\n", ERR_PTR(ret)); | 
|---|
| 579 |  | 
|---|
| 580 | /* We want to keep KMS alive */ | 
|---|
| 581 | return -EIO; | 
|---|
| 582 | } | 
|---|
| 583 |  | 
|---|
| 584 | static void __uc_fini_hw(struct intel_uc *uc) | 
|---|
| 585 | { | 
|---|
| 586 | struct intel_guc *guc = &uc->guc; | 
|---|
| 587 |  | 
|---|
| 588 | if (!intel_guc_is_fw_running(guc)) | 
|---|
| 589 | return; | 
|---|
| 590 |  | 
|---|
| 591 | if (intel_uc_uses_guc_submission(uc)) | 
|---|
| 592 | intel_guc_submission_disable(guc); | 
|---|
| 593 |  | 
|---|
| 594 | __uc_sanitize(uc); | 
|---|
| 595 | } | 
|---|
| 596 |  | 
|---|
| 597 | /** | 
|---|
| 598 | * intel_uc_reset_prepare - Prepare for reset | 
|---|
| 599 | * @uc: the intel_uc structure | 
|---|
| 600 | * | 
|---|
| 601 | * Preparing for full gpu reset. | 
|---|
| 602 | */ | 
|---|
| 603 | void intel_uc_reset_prepare(struct intel_uc *uc) | 
|---|
| 604 | { | 
|---|
| 605 | struct intel_guc *guc = &uc->guc; | 
|---|
| 606 |  | 
|---|
| 607 | uc->reset_in_progress = true; | 
|---|
| 608 |  | 
|---|
| 609 | /* Nothing to do if GuC isn't supported */ | 
|---|
| 610 | if (!intel_uc_supports_guc(uc)) | 
|---|
| 611 | return; | 
|---|
| 612 |  | 
|---|
| 613 | /* Firmware expected to be running when this function is called */ | 
|---|
| 614 | if (!intel_guc_is_ready(guc)) | 
|---|
| 615 | goto sanitize; | 
|---|
| 616 |  | 
|---|
| 617 | if (intel_uc_uses_guc_submission(uc)) | 
|---|
| 618 | intel_guc_submission_reset_prepare(guc); | 
|---|
| 619 |  | 
|---|
| 620 | sanitize: | 
|---|
| 621 | __uc_sanitize(uc); | 
|---|
| 622 | } | 
|---|
| 623 |  | 
|---|
| 624 | void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled) | 
|---|
| 625 | { | 
|---|
| 626 | struct intel_guc *guc = &uc->guc; | 
|---|
| 627 |  | 
|---|
| 628 | /* Firmware can not be running when this function is called  */ | 
|---|
| 629 | if (intel_uc_uses_guc_submission(uc)) | 
|---|
| 630 | intel_guc_submission_reset(guc, stalled); | 
|---|
| 631 | } | 
|---|
| 632 |  | 
|---|
| 633 | void intel_uc_reset_finish(struct intel_uc *uc) | 
|---|
| 634 | { | 
|---|
| 635 | struct intel_guc *guc = &uc->guc; | 
|---|
| 636 |  | 
|---|
| 637 | /* | 
|---|
| 638 | * NB: The wedge code path results in prepare -> prepare -> finish -> finish. | 
|---|
| 639 | * So this function is sometimes called with the in-progress flag not set. | 
|---|
| 640 | */ | 
|---|
| 641 | uc->reset_in_progress = false; | 
|---|
| 642 |  | 
|---|
| 643 | /* Firmware expected to be running when this function is called */ | 
|---|
| 644 | if (intel_uc_uses_guc_submission(uc)) | 
|---|
| 645 | intel_guc_submission_reset_finish(guc); | 
|---|
| 646 | } | 
|---|
| 647 |  | 
|---|
| 648 | void intel_uc_cancel_requests(struct intel_uc *uc) | 
|---|
| 649 | { | 
|---|
| 650 | struct intel_guc *guc = &uc->guc; | 
|---|
| 651 |  | 
|---|
| 652 | /* Firmware can not be running when this function is called  */ | 
|---|
| 653 | if (intel_uc_uses_guc_submission(uc)) | 
|---|
| 654 | intel_guc_submission_cancel_requests(guc); | 
|---|
| 655 | } | 
|---|
| 656 |  | 
|---|
| 657 | void intel_uc_runtime_suspend(struct intel_uc *uc) | 
|---|
| 658 | { | 
|---|
| 659 | struct intel_guc *guc = &uc->guc; | 
|---|
| 660 |  | 
|---|
| 661 | if (!intel_guc_is_ready(guc)) { | 
|---|
| 662 | guc->interrupts.enabled = false; | 
|---|
| 663 | return; | 
|---|
| 664 | } | 
|---|
| 665 |  | 
|---|
| 666 | /* | 
|---|
| 667 | * Wait for any outstanding CTB before tearing down communication /w the | 
|---|
| 668 | * GuC. | 
|---|
| 669 | */ | 
|---|
| 670 | #define OUTSTANDING_CTB_TIMEOUT_PERIOD	(HZ / 5) | 
|---|
| 671 | intel_guc_wait_for_pending_msg(guc, wait_var: &guc->outstanding_submission_g2h, | 
|---|
| 672 | interruptible: false, OUTSTANDING_CTB_TIMEOUT_PERIOD); | 
|---|
| 673 | GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); | 
|---|
| 674 |  | 
|---|
| 675 | guc_disable_communication(guc); | 
|---|
| 676 | } | 
|---|
| 677 |  | 
|---|
| 678 | void intel_uc_suspend(struct intel_uc *uc) | 
|---|
| 679 | { | 
|---|
| 680 | struct intel_guc *guc = &uc->guc; | 
|---|
| 681 | intel_wakeref_t wakeref; | 
|---|
| 682 | int err; | 
|---|
| 683 |  | 
|---|
| 684 | /* flush the GSC worker */ | 
|---|
| 685 | intel_gsc_uc_flush_work(gsc: &uc->gsc); | 
|---|
| 686 |  | 
|---|
| 687 | wake_up_all_tlb_invalidate(guc); | 
|---|
| 688 |  | 
|---|
| 689 | if (!intel_guc_is_ready(guc)) { | 
|---|
| 690 | guc->interrupts.enabled = false; | 
|---|
| 691 | return; | 
|---|
| 692 | } | 
|---|
| 693 |  | 
|---|
| 694 | intel_guc_submission_flush_work(guc); | 
|---|
| 695 |  | 
|---|
| 696 | with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { | 
|---|
| 697 | err = intel_guc_suspend(guc); | 
|---|
| 698 | if (err) | 
|---|
| 699 | guc_dbg(guc, "Failed to suspend, %pe", ERR_PTR(err)); | 
|---|
| 700 | } | 
|---|
| 701 | } | 
|---|
| 702 |  | 
|---|
| 703 | static void __uc_resume_mappings(struct intel_uc *uc) | 
|---|
| 704 | { | 
|---|
| 705 | intel_uc_fw_resume_mapping(uc_fw: &uc->guc.fw); | 
|---|
| 706 | intel_uc_fw_resume_mapping(uc_fw: &uc->huc.fw); | 
|---|
| 707 | } | 
|---|
| 708 |  | 
|---|
| 709 | static int __uc_resume(struct intel_uc *uc, bool enable_communication) | 
|---|
| 710 | { | 
|---|
| 711 | struct intel_guc *guc = &uc->guc; | 
|---|
| 712 | struct intel_gt *gt = guc_to_gt(guc); | 
|---|
| 713 | int err; | 
|---|
| 714 |  | 
|---|
| 715 | if (!intel_guc_is_fw_running(guc)) | 
|---|
| 716 | return 0; | 
|---|
| 717 |  | 
|---|
| 718 | /* Make sure we enable communication if and only if it's disabled */ | 
|---|
| 719 | GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); | 
|---|
| 720 |  | 
|---|
| 721 | if (enable_communication) | 
|---|
| 722 | guc_enable_communication(guc); | 
|---|
| 723 |  | 
|---|
| 724 | /* If we are only resuming GuC communication but not reloading | 
|---|
| 725 | * GuC, we need to ensure the ARAT timer interrupt is enabled | 
|---|
| 726 | * again. In case of GuC reload, it is enabled during SLPC enable. | 
|---|
| 727 | */ | 
|---|
| 728 | if (enable_communication && intel_uc_uses_guc_slpc(uc)) | 
|---|
| 729 | intel_guc_pm_intrmsk_enable(gt); | 
|---|
| 730 |  | 
|---|
| 731 | err = intel_guc_resume(guc); | 
|---|
| 732 | if (err) { | 
|---|
| 733 | guc_dbg(guc, "Failed to resume, %pe", ERR_PTR(err)); | 
|---|
| 734 | return err; | 
|---|
| 735 | } | 
|---|
| 736 |  | 
|---|
| 737 | intel_gsc_uc_resume(gsc: &uc->gsc); | 
|---|
| 738 |  | 
|---|
| 739 | if (intel_guc_tlb_invalidation_is_available(guc)) { | 
|---|
| 740 | intel_guc_invalidate_tlb_engines(guc); | 
|---|
| 741 | intel_guc_invalidate_tlb_guc(guc); | 
|---|
| 742 | } | 
|---|
| 743 |  | 
|---|
| 744 | return 0; | 
|---|
| 745 | } | 
|---|
| 746 |  | 
|---|
| 747 | int intel_uc_resume(struct intel_uc *uc) | 
|---|
| 748 | { | 
|---|
| 749 | /* | 
|---|
| 750 | * When coming out of S3/S4 we sanitize and re-init the HW, so | 
|---|
| 751 | * communication is already re-enabled at this point. | 
|---|
| 752 | */ | 
|---|
| 753 | return __uc_resume(uc, enable_communication: false); | 
|---|
| 754 | } | 
|---|
| 755 |  | 
|---|
| 756 | int intel_uc_runtime_resume(struct intel_uc *uc) | 
|---|
| 757 | { | 
|---|
| 758 | /* | 
|---|
| 759 | * During runtime resume we don't sanitize, so we need to re-init | 
|---|
| 760 | * communication as well. | 
|---|
| 761 | */ | 
|---|
| 762 | return __uc_resume(uc, enable_communication: true); | 
|---|
| 763 | } | 
|---|
| 764 |  | 
|---|
| 765 | static const struct intel_uc_ops uc_ops_off = { | 
|---|
| 766 | .init_hw = __uc_check_hw, | 
|---|
| 767 | .fini = __uc_fini, /* to clean-up the init_early initialization */ | 
|---|
| 768 | }; | 
|---|
| 769 |  | 
|---|
| 770 | static const struct intel_uc_ops uc_ops_on = { | 
|---|
| 771 | .sanitize = __uc_sanitize, | 
|---|
| 772 |  | 
|---|
| 773 | .init_fw = __uc_fetch_firmwares, | 
|---|
| 774 | .fini_fw = __uc_cleanup_firmwares, | 
|---|
| 775 |  | 
|---|
| 776 | .init = __uc_init, | 
|---|
| 777 | .fini = __uc_fini, | 
|---|
| 778 |  | 
|---|
| 779 | .init_hw = __uc_init_hw, | 
|---|
| 780 | .fini_hw = __uc_fini_hw, | 
|---|
| 781 |  | 
|---|
| 782 | .resume_mappings = __uc_resume_mappings, | 
|---|
| 783 | }; | 
|---|
| 784 |  | 
|---|