| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2022 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include "gem/i915_gem_lmem.h" |
| 7 | #include "gt/intel_engine_pm.h" |
| 8 | #include "gt/intel_gpu_commands.h" |
| 9 | #include "gt/intel_gt.h" |
| 10 | #include "gt/intel_gt_print.h" |
| 11 | #include "gt/intel_ring.h" |
| 12 | #include "intel_gsc_binary_headers.h" |
| 13 | #include "intel_gsc_fw.h" |
| 14 | #include "intel_gsc_uc_heci_cmd_submit.h" |
| 15 | #include "i915_reg.h" |
| 16 | |
| 17 | static bool gsc_is_in_reset(struct intel_uncore *uncore) |
| 18 | { |
| 19 | u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1)); |
| 20 | |
| 21 | return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) == |
| 22 | HECI1_FWSTS1_CURRENT_STATE_RESET; |
| 23 | } |
| 24 | |
| 25 | static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref) |
| 26 | { |
| 27 | intel_wakeref_t wakeref; |
| 28 | u32 fw_status = 0; |
| 29 | |
| 30 | if (needs_wakeref) |
| 31 | wakeref = intel_runtime_pm_get(rpm: uncore->rpm); |
| 32 | |
| 33 | fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1)); |
| 34 | |
| 35 | if (needs_wakeref) |
| 36 | intel_runtime_pm_put(rpm: uncore->rpm, wref: wakeref); |
| 37 | return fw_status; |
| 38 | } |
| 39 | |
| 40 | bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref) |
| 41 | { |
| 42 | return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, |
| 43 | gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, |
| 44 | needs_wakeref)) == |
| 45 | HECI1_FWSTS1_PROXY_STATE_NORMAL; |
| 46 | } |
| 47 | |
| 48 | int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc) |
| 49 | { |
| 50 | if (!(IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY))) |
| 51 | return -ENODEV; |
| 52 | if (!intel_uc_fw_is_loadable(uc_fw: &gsc->fw)) |
| 53 | return -ENODEV; |
| 54 | if (__intel_uc_fw_status(uc_fw: &gsc->fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) |
| 55 | return -ENOLINK; |
| 56 | if (!intel_gsc_uc_fw_proxy_init_done(gsc, needs_wakeref: true)) |
| 57 | return -EAGAIN; |
| 58 | |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc) |
| 63 | { |
| 64 | return gsc_uc_get_fw_status(uncore: gsc_uc_to_gt(gsc_uc: gsc)->uncore, needs_wakeref: false) & |
| 65 | HECI1_FWSTS1_INIT_COMPLETE; |
| 66 | } |
| 67 | |
| 68 | static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry) |
| 69 | { |
| 70 | return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK; |
| 71 | } |
| 72 | |
| 73 | int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size) |
| 74 | { |
| 75 | struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw); |
| 76 | struct intel_gt *gt = gsc_uc_to_gt(gsc_uc: gsc); |
| 77 | const struct intel_gsc_layout_pointers *layout = data; |
| 78 | const struct intel_gsc_bpdt_header * = NULL; |
| 79 | const struct intel_gsc_bpdt_entry *bpdt_entry = NULL; |
| 80 | const struct intel_gsc_cpd_header_v2 * = NULL; |
| 81 | const struct intel_gsc_cpd_entry *cpd_entry = NULL; |
| 82 | const struct intel_gsc_manifest_header *manifest; |
| 83 | struct intel_uc_fw_ver min_ver = { 0 }; |
| 84 | size_t min_size = sizeof(*layout); |
| 85 | int i; |
| 86 | |
| 87 | if (size < min_size) { |
| 88 | gt_err(gt, "GSC FW too small! %zu < %zu\n" , size, min_size); |
| 89 | return -ENODATA; |
| 90 | } |
| 91 | |
| 92 | /* |
| 93 | * The GSC binary starts with the pointer layout, which contains the |
| 94 | * locations of the various partitions of the binary. The one we're |
| 95 | * interested in to get the version is the boot1 partition, where we can |
| 96 | * find a BPDT header followed by entries, one of which points to the |
| 97 | * RBE sub-section of the partition. From here, we can parse the CPD |
| 98 | * header and the following entries to find the manifest location |
| 99 | * (entry identified by the "RBEP.man" name), from which we can finally |
| 100 | * extract the version. |
| 101 | * |
| 102 | * -------------------------------------------------- |
| 103 | * [ intel_gsc_layout_pointers ] |
| 104 | * [ ... ] |
| 105 | * [ boot1.offset >---------------------------]------o |
| 106 | * [ ... ] | |
| 107 | * -------------------------------------------------- | |
| 108 | * | |
| 109 | * -------------------------------------------------- | |
| 110 | * [ intel_gsc_bpdt_header ]<-----o |
| 111 | * -------------------------------------------------- |
| 112 | * [ intel_gsc_bpdt_entry[] ] |
| 113 | * [ entry1 ] |
| 114 | * [ ... ] |
| 115 | * [ entryX ] |
| 116 | * [ type == GSC_RBE ] |
| 117 | * [ offset >-----------------------------]------o |
| 118 | * [ ... ] | |
| 119 | * -------------------------------------------------- | |
| 120 | * | |
| 121 | * -------------------------------------------------- | |
| 122 | * [ intel_gsc_cpd_header_v2 ]<-----o |
| 123 | * -------------------------------------------------- |
| 124 | * [ intel_gsc_cpd_entry[] ] |
| 125 | * [ entry1 ] |
| 126 | * [ ... ] |
| 127 | * [ entryX ] |
| 128 | * [ "RBEP.man" ] |
| 129 | * [ ... ] |
| 130 | * [ offset >----------------------------]------o |
| 131 | * [ ... ] | |
| 132 | * -------------------------------------------------- | |
| 133 | * | |
| 134 | * -------------------------------------------------- | |
| 135 | * [ intel_gsc_manifest_header ]<-----o |
| 136 | * [ ... ] |
| 137 | * [ intel_gsc_version fw_version ] |
| 138 | * [ ... ] |
| 139 | * -------------------------------------------------- |
| 140 | */ |
| 141 | |
| 142 | min_size = layout->boot1.offset + layout->boot1.size; |
| 143 | if (size < min_size) { |
| 144 | gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n" , |
| 145 | size, min_size); |
| 146 | return -ENODATA; |
| 147 | } |
| 148 | |
| 149 | min_size = sizeof(*bpdt_header); |
| 150 | if (layout->boot1.size < min_size) { |
| 151 | gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n" , |
| 152 | layout->boot1.size, min_size); |
| 153 | return -ENODATA; |
| 154 | } |
| 155 | |
| 156 | bpdt_header = data + layout->boot1.offset; |
| 157 | if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) { |
| 158 | gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n" , |
| 159 | bpdt_header->signature); |
| 160 | return -EINVAL; |
| 161 | } |
| 162 | |
| 163 | min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count; |
| 164 | if (layout->boot1.size < min_size) { |
| 165 | gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n" , |
| 166 | layout->boot1.size, min_size); |
| 167 | return -ENODATA; |
| 168 | } |
| 169 | |
| 170 | bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header); |
| 171 | for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) { |
| 172 | if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) != |
| 173 | INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE) |
| 174 | continue; |
| 175 | |
| 176 | cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset; |
| 177 | min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header); |
| 178 | break; |
| 179 | } |
| 180 | |
| 181 | if (!cpd_header) { |
| 182 | gt_err(gt, "couldn't find CPD header in GSC binary!\n" ); |
| 183 | return -ENODATA; |
| 184 | } |
| 185 | |
| 186 | if (layout->boot1.size < min_size) { |
| 187 | gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n" , |
| 188 | layout->boot1.size, min_size); |
| 189 | return -ENODATA; |
| 190 | } |
| 191 | |
| 192 | if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) { |
| 193 | gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n" , |
| 194 | cpd_header->header_marker); |
| 195 | return -EINVAL; |
| 196 | } |
| 197 | |
| 198 | min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries; |
| 199 | if (layout->boot1.size < min_size) { |
| 200 | gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n" , |
| 201 | layout->boot1.size, min_size); |
| 202 | return -ENODATA; |
| 203 | } |
| 204 | |
| 205 | cpd_entry = (void *)cpd_header + cpd_header->header_length; |
| 206 | for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) { |
| 207 | if (strcmp(cpd_entry->name, "RBEP.man" ) == 0) { |
| 208 | manifest = (void *)cpd_header + cpd_entry_offset(entry: cpd_entry); |
| 209 | intel_uc_fw_version_from_gsc_manifest(ver: &gsc->release, |
| 210 | data: manifest); |
| 211 | gsc->security_version = manifest->security_version; |
| 212 | break; |
| 213 | } |
| 214 | } |
| 215 | |
| 216 | /* |
| 217 | * ARL SKUs require newer firmwares, but the blob is actually common |
| 218 | * across all MTL and ARL SKUs, so we need to do an explicit version check |
| 219 | * here rather than using a separate table entry. If a too old version |
| 220 | * is found, then just don't use GSC rather than aborting the driver load. |
| 221 | * Note that the major number in the GSC FW version is used to indicate |
| 222 | * the platform, so we expect it to always be 102 for MTL/ARL binaries. |
| 223 | */ |
| 224 | if (IS_ARROWLAKE_S(gt->i915)) |
| 225 | min_ver = (struct intel_uc_fw_ver){ 102, 0, 10, 1878 }; |
| 226 | else if (IS_ARROWLAKE_H(gt->i915) || IS_ARROWLAKE_U(gt->i915)) |
| 227 | min_ver = (struct intel_uc_fw_ver){ 102, 1, 15, 1926 }; |
| 228 | |
| 229 | if (IS_METEORLAKE(gt->i915) && gsc->release.major != 102) { |
| 230 | gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x" , |
| 231 | gsc->release.major, gsc->release.minor, |
| 232 | gsc->release.patch, gsc->release.build); |
| 233 | return -EINVAL; |
| 234 | } |
| 235 | |
| 236 | if (min_ver.major) { |
| 237 | bool too_old = false; |
| 238 | |
| 239 | if (gsc->release.minor < min_ver.minor) { |
| 240 | too_old = true; |
| 241 | } else if (gsc->release.minor == min_ver.minor) { |
| 242 | if (gsc->release.patch < min_ver.patch) { |
| 243 | too_old = true; |
| 244 | } else if (gsc->release.patch == min_ver.patch) { |
| 245 | if (gsc->release.build < min_ver.build) |
| 246 | too_old = true; |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | if (too_old) { |
| 251 | gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least %d.%d.%d.%d" , |
| 252 | gsc->release.major, gsc->release.minor, |
| 253 | gsc->release.patch, gsc->release.build, |
| 254 | min_ver.major, min_ver.minor, |
| 255 | min_ver.patch, min_ver.build); |
| 256 | return -EINVAL; |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc) |
| 264 | { |
| 265 | u32 offset = i915_ggtt_offset(vma: gsc->local); |
| 266 | u32 *cs; |
| 267 | |
| 268 | cs = intel_ring_begin(rq, num_dwords: 4); |
| 269 | if (IS_ERR(ptr: cs)) |
| 270 | return PTR_ERR(ptr: cs); |
| 271 | |
| 272 | *cs++ = GSC_FW_LOAD; |
| 273 | *cs++ = lower_32_bits(offset); |
| 274 | *cs++ = upper_32_bits(offset); |
| 275 | *cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID; |
| 276 | |
| 277 | intel_ring_advance(rq, cs); |
| 278 | |
| 279 | return 0; |
| 280 | } |
| 281 | |
| 282 | static int gsc_fw_load(struct intel_gsc_uc *gsc) |
| 283 | { |
| 284 | struct intel_context *ce = gsc->ce; |
| 285 | struct i915_request *rq; |
| 286 | int err; |
| 287 | |
| 288 | if (!ce) |
| 289 | return -ENODEV; |
| 290 | |
| 291 | rq = i915_request_create(ce); |
| 292 | if (IS_ERR(ptr: rq)) |
| 293 | return PTR_ERR(ptr: rq); |
| 294 | |
| 295 | if (ce->engine->emit_init_breadcrumb) { |
| 296 | err = ce->engine->emit_init_breadcrumb(rq); |
| 297 | if (err) |
| 298 | goto out_rq; |
| 299 | } |
| 300 | |
| 301 | err = emit_gsc_fw_load(rq, gsc); |
| 302 | if (err) |
| 303 | goto out_rq; |
| 304 | |
| 305 | err = ce->engine->emit_flush(rq, 0); |
| 306 | |
| 307 | out_rq: |
| 308 | i915_request_get(rq); |
| 309 | |
| 310 | if (unlikely(err)) |
| 311 | i915_request_set_error_once(rq, error: err); |
| 312 | |
| 313 | i915_request_add(rq); |
| 314 | |
| 315 | if (!err && i915_request_wait(rq, flags: 0, timeout: msecs_to_jiffies(m: 500)) < 0) |
| 316 | err = -ETIME; |
| 317 | |
| 318 | i915_request_put(rq); |
| 319 | |
| 320 | if (err) |
| 321 | gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n" , |
| 322 | ERR_PTR(err)); |
| 323 | |
| 324 | return err; |
| 325 | } |
| 326 | |
| 327 | static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc) |
| 328 | { |
| 329 | struct intel_gt *gt = gsc_uc_to_gt(gsc_uc: gsc); |
| 330 | void *src; |
| 331 | |
| 332 | if (!gsc->local) |
| 333 | return -ENODEV; |
| 334 | |
| 335 | if (gsc->local->size < gsc->fw.size) |
| 336 | return -ENOSPC; |
| 337 | |
| 338 | src = i915_gem_object_pin_map_unlocked(obj: gsc->fw.obj, |
| 339 | type: intel_gt_coherent_map_type(gt, obj: gsc->fw.obj, always_coherent: true)); |
| 340 | if (IS_ERR(ptr: src)) |
| 341 | return PTR_ERR(ptr: src); |
| 342 | |
| 343 | memcpy_toio(gsc->local_vaddr, src, gsc->fw.size); |
| 344 | memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size); |
| 345 | |
| 346 | intel_guc_write_barrier(guc: gt_to_guc(gt)); |
| 347 | |
| 348 | i915_gem_object_unpin_map(obj: gsc->fw.obj); |
| 349 | |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | static int gsc_fw_wait(struct intel_gt *gt) |
| 354 | { |
| 355 | return intel_wait_for_register(uncore: gt->uncore, |
| 356 | HECI_FWSTS(MTL_GSC_HECI1_BASE, 1), |
| 357 | HECI1_FWSTS1_INIT_COMPLETE, |
| 358 | HECI1_FWSTS1_INIT_COMPLETE, |
| 359 | timeout_ms: 500); |
| 360 | } |
| 361 | |
| 362 | struct { |
| 363 | u8 ; |
| 364 | #define MKHI_GROUP_ID_GFX_SRV 0x30 |
| 365 | |
| 366 | u8 command; |
| 367 | #define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42) |
| 368 | |
| 369 | u8 ; |
| 370 | u8 ; |
| 371 | } __packed; |
| 372 | |
| 373 | struct mtl_gsc_ver_msg_in { |
| 374 | struct intel_gsc_mtl_header ; |
| 375 | struct intel_gsc_mkhi_header mkhi; |
| 376 | } __packed; |
| 377 | |
| 378 | struct mtl_gsc_ver_msg_out { |
| 379 | struct intel_gsc_mtl_header ; |
| 380 | struct intel_gsc_mkhi_header mkhi; |
| 381 | u16 proj_major; |
| 382 | u16 compat_major; |
| 383 | u16 compat_minor; |
| 384 | u16 reserved[5]; |
| 385 | } __packed; |
| 386 | |
| 387 | #define GSC_VER_PKT_SZ SZ_4K |
| 388 | |
| 389 | static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc) |
| 390 | { |
| 391 | struct intel_gt *gt = gsc_uc_to_gt(gsc_uc: gsc); |
| 392 | struct mtl_gsc_ver_msg_in *msg_in; |
| 393 | struct mtl_gsc_ver_msg_out *msg_out; |
| 394 | struct i915_vma *vma; |
| 395 | u64 offset; |
| 396 | void *vaddr; |
| 397 | int err; |
| 398 | |
| 399 | err = intel_guc_allocate_and_map_vma(guc: gt_to_guc(gt), GSC_VER_PKT_SZ * 2, |
| 400 | out_vma: &vma, out_vaddr: &vaddr); |
| 401 | if (err) { |
| 402 | gt_err(gt, "failed to allocate vma for GSC version query\n" ); |
| 403 | return err; |
| 404 | } |
| 405 | |
| 406 | offset = i915_ggtt_offset(vma); |
| 407 | msg_in = vaddr; |
| 408 | msg_out = vaddr + GSC_VER_PKT_SZ; |
| 409 | |
| 410 | intel_gsc_uc_heci_cmd_emit_mtl_header(header: &msg_in->header, |
| 411 | HECI_MEADDRESS_MKHI, |
| 412 | msg_size: sizeof(*msg_in), host_session_id: 0); |
| 413 | msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV; |
| 414 | msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION; |
| 415 | |
| 416 | err = intel_gsc_uc_heci_cmd_submit_packet(gsc: >->uc.gsc, |
| 417 | addr_in: offset, |
| 418 | size_in: sizeof(*msg_in), |
| 419 | addr_out: offset + GSC_VER_PKT_SZ, |
| 420 | GSC_VER_PKT_SZ); |
| 421 | if (err) { |
| 422 | gt_err(gt, |
| 423 | "failed to submit GSC request for compatibility version: %d\n" , |
| 424 | err); |
| 425 | goto out_vma; |
| 426 | } |
| 427 | |
| 428 | if (msg_out->header.message_size != sizeof(*msg_out)) { |
| 429 | gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n" , |
| 430 | msg_out->header.message_size, sizeof(*msg_out), |
| 431 | msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result); |
| 432 | err = -EPROTO; |
| 433 | goto out_vma; |
| 434 | } |
| 435 | |
| 436 | gsc->fw.file_selected.ver.major = msg_out->compat_major; |
| 437 | gsc->fw.file_selected.ver.minor = msg_out->compat_minor; |
| 438 | |
| 439 | out_vma: |
| 440 | i915_vma_unpin_and_release(p_vma: &vma, I915_VMA_RELEASE_MAP); |
| 441 | return err; |
| 442 | } |
| 443 | |
| 444 | int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc) |
| 445 | { |
| 446 | struct intel_gt *gt = gsc_uc_to_gt(gsc_uc: gsc); |
| 447 | struct intel_uc_fw *gsc_fw = &gsc->fw; |
| 448 | int err; |
| 449 | |
| 450 | /* check current fw status */ |
| 451 | if (intel_gsc_uc_fw_init_done(gsc)) { |
| 452 | if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw))) |
| 453 | intel_uc_fw_change_status(uc_fw: gsc_fw, status: INTEL_UC_FIRMWARE_TRANSFERRED); |
| 454 | return -EEXIST; |
| 455 | } |
| 456 | |
| 457 | if (!intel_uc_fw_is_loadable(uc_fw: gsc_fw)) |
| 458 | return -ENOEXEC; |
| 459 | |
| 460 | /* FW blob is ok, so clean the status */ |
| 461 | intel_uc_fw_sanitize(uc_fw: &gsc->fw); |
| 462 | |
| 463 | if (!gsc_is_in_reset(uncore: gt->uncore)) |
| 464 | return -EIO; |
| 465 | |
| 466 | err = gsc_fw_load_prepare(gsc); |
| 467 | if (err) |
| 468 | goto fail; |
| 469 | |
| 470 | /* |
| 471 | * GSC is only killed by an FLR, so we need to trigger one on unload to |
| 472 | * make sure we stop it. This is because we assign a chunk of memory to |
| 473 | * the GSC as part of the FW load , so we need to make sure it stops |
| 474 | * using it when we release it to the system on driver unload. Note that |
| 475 | * this is not a problem of the unload per-se, because the GSC will not |
| 476 | * touch that memory unless there are requests for it coming from the |
| 477 | * driver; therefore, no accesses will happen while i915 is not loaded, |
| 478 | * but if we re-load the driver then the GSC might wake up and try to |
| 479 | * access that old memory location again. |
| 480 | * Given that an FLR is a very disruptive action (see the FLR function |
| 481 | * for details), we want to do it as the last action before releasing |
| 482 | * the access to the MMIO bar, which means we need to do it as part of |
| 483 | * the primary uncore cleanup. |
| 484 | * An alternative approach to the FLR would be to use a memory location |
| 485 | * that survives driver unload, like e.g. stolen memory, and keep the |
| 486 | * GSC loaded across reloads. However, this requires us to make sure we |
| 487 | * preserve that memory location on unload and then determine and |
| 488 | * reserve its offset on each subsequent load, which is not trivial, so |
| 489 | * it is easier to just kill everything and start fresh. |
| 490 | */ |
| 491 | intel_uncore_set_flr_on_fini(uncore: >->i915->uncore); |
| 492 | |
| 493 | err = gsc_fw_load(gsc); |
| 494 | if (err) |
| 495 | goto fail; |
| 496 | |
| 497 | err = gsc_fw_wait(gt); |
| 498 | if (err) |
| 499 | goto fail; |
| 500 | |
| 501 | err = gsc_fw_query_compatibility_version(gsc); |
| 502 | if (err) |
| 503 | goto fail; |
| 504 | |
| 505 | /* we only support compatibility version 1.0 at the moment */ |
| 506 | err = intel_uc_check_file_version(uc_fw: gsc_fw, NULL); |
| 507 | if (err) |
| 508 | goto fail; |
| 509 | |
| 510 | /* FW is not fully operational until we enable SW proxy */ |
| 511 | intel_uc_fw_change_status(uc_fw: gsc_fw, status: INTEL_UC_FIRMWARE_TRANSFERRED); |
| 512 | |
| 513 | gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n" , |
| 514 | gsc_fw->file_selected.path, |
| 515 | gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor, |
| 516 | gsc->release.major, gsc->release.minor, |
| 517 | gsc->release.patch, gsc->release.build, |
| 518 | gsc->security_version); |
| 519 | |
| 520 | return 0; |
| 521 | |
| 522 | fail: |
| 523 | return intel_uc_fw_mark_load_failed(uc_fw: gsc_fw, err); |
| 524 | } |
| 525 | |