| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2014-2019 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include "gt/intel_gsc.h" |
| 7 | #include "gt/intel_gt.h" |
| 8 | #include "intel_gsc_binary_headers.h" |
| 9 | #include "intel_gsc_uc_heci_cmd_submit.h" |
| 10 | #include "intel_huc.h" |
| 11 | #include "intel_huc_fw.h" |
| 12 | #include "intel_huc_print.h" |
| 13 | #include "i915_drv.h" |
| 14 | #include "pxp/intel_pxp_huc.h" |
| 15 | #include "pxp/intel_pxp_cmd_interface_43.h" |
| 16 | |
| 17 | struct mtl_huc_auth_msg_in { |
| 18 | struct intel_gsc_mtl_header ; |
| 19 | struct pxp43_new_huc_auth_in huc_in; |
| 20 | } __packed; |
| 21 | |
| 22 | struct mtl_huc_auth_msg_out { |
| 23 | struct intel_gsc_mtl_header ; |
| 24 | struct pxp43_huc_auth_out huc_out; |
| 25 | } __packed; |
| 26 | |
| 27 | int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc) |
| 28 | { |
| 29 | struct intel_gt *gt = huc_to_gt(huc); |
| 30 | struct drm_i915_gem_object *obj; |
| 31 | struct mtl_huc_auth_msg_in *msg_in; |
| 32 | struct mtl_huc_auth_msg_out *msg_out; |
| 33 | void *pkt_vaddr; |
| 34 | u64 pkt_offset; |
| 35 | int retry = 5; |
| 36 | int err = 0; |
| 37 | |
| 38 | if (!huc->heci_pkt) |
| 39 | return -ENODEV; |
| 40 | |
| 41 | obj = huc->heci_pkt->obj; |
| 42 | pkt_offset = i915_ggtt_offset(vma: huc->heci_pkt); |
| 43 | |
| 44 | pkt_vaddr = i915_gem_object_pin_map_unlocked(obj, |
| 45 | type: intel_gt_coherent_map_type(gt, obj, always_coherent: true)); |
| 46 | if (IS_ERR(ptr: pkt_vaddr)) |
| 47 | return PTR_ERR(ptr: pkt_vaddr); |
| 48 | |
| 49 | msg_in = pkt_vaddr; |
| 50 | msg_out = pkt_vaddr + PXP43_HUC_AUTH_INOUT_SIZE; |
| 51 | |
| 52 | intel_gsc_uc_heci_cmd_emit_mtl_header(header: &msg_in->header, |
| 53 | HECI_MEADDRESS_PXP, |
| 54 | msg_size: sizeof(*msg_in), host_session_id: 0); |
| 55 | |
| 56 | msg_in->huc_in.header.api_version = PXP_APIVER(4, 3); |
| 57 | msg_in->huc_in.header.command_id = PXP43_CMDID_NEW_HUC_AUTH; |
| 58 | msg_in->huc_in.header.status = 0; |
| 59 | msg_in->huc_in.header.buffer_len = sizeof(msg_in->huc_in) - |
| 60 | sizeof(msg_in->huc_in.header); |
| 61 | msg_in->huc_in.huc_base_address = huc->fw.vma_res.start; |
| 62 | msg_in->huc_in.huc_size = huc->fw.obj->base.size; |
| 63 | |
| 64 | do { |
| 65 | err = intel_gsc_uc_heci_cmd_submit_packet(gsc: >->uc.gsc, |
| 66 | addr_in: pkt_offset, size_in: sizeof(*msg_in), |
| 67 | addr_out: pkt_offset + PXP43_HUC_AUTH_INOUT_SIZE, |
| 68 | PXP43_HUC_AUTH_INOUT_SIZE); |
| 69 | if (err) { |
| 70 | huc_err(huc, "failed to submit GSC request to auth: %d\n" , err); |
| 71 | goto out_unpin; |
| 72 | } |
| 73 | |
| 74 | if (msg_out->header.flags & GSC_OUTFLAG_MSG_PENDING) { |
| 75 | msg_in->header.gsc_message_handle = msg_out->header.gsc_message_handle; |
| 76 | err = -EBUSY; |
| 77 | msleep(msecs: 50); |
| 78 | } |
| 79 | } while (--retry && err == -EBUSY); |
| 80 | |
| 81 | if (err) |
| 82 | goto out_unpin; |
| 83 | |
| 84 | if (msg_out->header.message_size != sizeof(*msg_out)) { |
| 85 | huc_err(huc, "invalid GSC reply length %u [expected %zu]\n" , |
| 86 | msg_out->header.message_size, sizeof(*msg_out)); |
| 87 | err = -EPROTO; |
| 88 | goto out_unpin; |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already |
| 93 | * loaded. If the same error is ever returned with HuC not loaded we'll |
| 94 | * still catch it when we check the authentication bit later. |
| 95 | */ |
| 96 | if (msg_out->huc_out.header.status != PXP_STATUS_SUCCESS && |
| 97 | msg_out->huc_out.header.status != PXP_STATUS_OP_NOT_PERMITTED) { |
| 98 | huc_err(huc, "auth failed with GSC error = 0x%x\n" , |
| 99 | msg_out->huc_out.header.status); |
| 100 | err = -EIO; |
| 101 | goto out_unpin; |
| 102 | } |
| 103 | |
| 104 | out_unpin: |
| 105 | i915_gem_object_unpin_map(obj); |
| 106 | return err; |
| 107 | } |
| 108 | |
| 109 | static bool css_valid(const void *data, size_t size) |
| 110 | { |
| 111 | const struct uc_css_header *css = data; |
| 112 | |
| 113 | if (unlikely(size < sizeof(struct uc_css_header))) |
| 114 | return false; |
| 115 | |
| 116 | if (css->module_type != 0x6) |
| 117 | return false; |
| 118 | |
| 119 | if (css->module_vendor != PCI_VENDOR_ID_INTEL) |
| 120 | return false; |
| 121 | |
| 122 | return true; |
| 123 | } |
| 124 | |
| 125 | static inline u32 entry_offset(const struct intel_gsc_cpd_entry *entry) |
| 126 | { |
| 127 | return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK; |
| 128 | } |
| 129 | |
| 130 | int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, size_t size) |
| 131 | { |
| 132 | struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); |
| 133 | const struct intel_gsc_cpd_header_v2 * = data; |
| 134 | const struct intel_gsc_cpd_entry *entry; |
| 135 | size_t min_size = sizeof(*header); |
| 136 | int i; |
| 137 | |
| 138 | if (!huc_fw->has_gsc_headers) { |
| 139 | huc_err(huc, "Invalid FW type for GSC header parsing!\n" ); |
| 140 | return -EINVAL; |
| 141 | } |
| 142 | |
| 143 | if (size < sizeof(*header)) { |
| 144 | huc_err(huc, "FW too small! %zu < %zu\n" , size, min_size); |
| 145 | return -ENODATA; |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * The GSC-enabled HuC binary starts with a directory header, followed |
| 150 | * by a series of entries. Each entry is identified by a name and |
| 151 | * points to a specific section of the binary containing the relevant |
| 152 | * data. The entries we're interested in are: |
| 153 | * - "HUCP.man": points to the GSC manifest header for the HuC, which |
| 154 | * contains the version info. |
| 155 | * - "huc_fw": points to the legacy-style binary that can be used for |
| 156 | * load via the DMA. This entry only contains a valid CSS |
| 157 | * on binaries for platforms that support 2-step HuC load |
| 158 | * via dma and auth via GSC (like MTL). |
| 159 | * |
| 160 | * -------------------------------------------------- |
| 161 | * [ intel_gsc_cpd_header_v2 ] |
| 162 | * -------------------------------------------------- |
| 163 | * [ intel_gsc_cpd_entry[] ] |
| 164 | * [ entry1 ] |
| 165 | * [ ... ] |
| 166 | * [ entryX ] |
| 167 | * [ "HUCP.man" ] |
| 168 | * [ ... ] |
| 169 | * [ offset >----------------------------]------o |
| 170 | * [ ... ] | |
| 171 | * [ entryY ] | |
| 172 | * [ "huc_fw" ] | |
| 173 | * [ ... ] | |
| 174 | * [ offset >----------------------------]----------o |
| 175 | * -------------------------------------------------- | | |
| 176 | * | | |
| 177 | * -------------------------------------------------- | | |
| 178 | * [ intel_gsc_manifest_header ]<-----o | |
| 179 | * [ ... ] | |
| 180 | * [ intel_gsc_version fw_version ] | |
| 181 | * [ ... ] | |
| 182 | * -------------------------------------------------- | |
| 183 | * | |
| 184 | * -------------------------------------------------- | |
| 185 | * [ data[] ]<---------o |
| 186 | * [ ... ] |
| 187 | * [ ... ] |
| 188 | * -------------------------------------------------- |
| 189 | */ |
| 190 | |
| 191 | if (header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) { |
| 192 | huc_err(huc, "invalid marker for CPD header: 0x%08x!\n" , |
| 193 | header->header_marker); |
| 194 | return -EINVAL; |
| 195 | } |
| 196 | |
| 197 | /* we only have binaries with header v2 and entry v1 for now */ |
| 198 | if (header->header_version != 2 || header->entry_version != 1) { |
| 199 | huc_err(huc, "invalid CPD header/entry version %u:%u!\n" , |
| 200 | header->header_version, header->entry_version); |
| 201 | return -EINVAL; |
| 202 | } |
| 203 | |
| 204 | if (header->header_length < sizeof(struct intel_gsc_cpd_header_v2)) { |
| 205 | huc_err(huc, "invalid CPD header length %u!\n" , |
| 206 | header->header_length); |
| 207 | return -EINVAL; |
| 208 | } |
| 209 | |
| 210 | min_size = header->header_length + sizeof(*entry) * header->num_of_entries; |
| 211 | if (size < min_size) { |
| 212 | huc_err(huc, "FW too small! %zu < %zu\n" , size, min_size); |
| 213 | return -ENODATA; |
| 214 | } |
| 215 | |
| 216 | entry = data + header->header_length; |
| 217 | |
| 218 | for (i = 0; i < header->num_of_entries; i++, entry++) { |
| 219 | if (strcmp(entry->name, "HUCP.man" ) == 0) |
| 220 | intel_uc_fw_version_from_gsc_manifest(ver: &huc_fw->file_selected.ver, |
| 221 | data: data + entry_offset(entry)); |
| 222 | |
| 223 | if (strcmp(entry->name, "huc_fw" ) == 0) { |
| 224 | u32 offset = entry_offset(entry); |
| 225 | |
| 226 | if (offset < size && css_valid(data: data + offset, size: size - offset)) |
| 227 | huc_fw->dma_start_offset = offset; |
| 228 | } |
| 229 | } |
| 230 | |
| 231 | return 0; |
| 232 | } |
| 233 | |
| 234 | int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc) |
| 235 | { |
| 236 | int ret; |
| 237 | |
| 238 | if (!intel_huc_is_loaded_by_gsc(huc)) |
| 239 | return -ENODEV; |
| 240 | |
| 241 | if (!intel_uc_fw_is_loadable(uc_fw: &huc->fw)) |
| 242 | return -ENOEXEC; |
| 243 | |
| 244 | /* |
| 245 | * If we abort a suspend, HuC might still be loaded when the mei |
| 246 | * component gets re-bound and this function called again. If so, just |
| 247 | * mark the HuC as loaded. |
| 248 | */ |
| 249 | if (intel_huc_is_authenticated(huc, type: INTEL_HUC_AUTH_BY_GSC)) { |
| 250 | intel_uc_fw_change_status(uc_fw: &huc->fw, status: INTEL_UC_FIRMWARE_RUNNING); |
| 251 | return 0; |
| 252 | } |
| 253 | |
| 254 | GEM_WARN_ON(intel_uc_fw_is_loaded(&huc->fw)); |
| 255 | |
| 256 | ret = intel_pxp_huc_load_and_auth(pxp: huc_to_gt(huc)->i915->pxp); |
| 257 | if (ret) |
| 258 | return ret; |
| 259 | |
| 260 | intel_uc_fw_change_status(uc_fw: &huc->fw, status: INTEL_UC_FIRMWARE_TRANSFERRED); |
| 261 | |
| 262 | return intel_huc_wait_for_auth_complete(huc, type: INTEL_HUC_AUTH_BY_GSC); |
| 263 | } |
| 264 | |
| 265 | /** |
| 266 | * intel_huc_fw_upload() - load HuC uCode to device via DMA transfer |
| 267 | * @huc: intel_huc structure |
| 268 | * |
| 269 | * Called from intel_uc_init_hw() during driver load, resume from sleep and |
| 270 | * after a GPU reset. Note that HuC must be loaded before GuC. |
| 271 | * |
| 272 | * The firmware image should have already been fetched into memory, so only |
| 273 | * check that fetch succeeded, and then transfer the image to the h/w. |
| 274 | * |
| 275 | * Return: non-zero code on error |
| 276 | */ |
| 277 | int intel_huc_fw_upload(struct intel_huc *huc) |
| 278 | { |
| 279 | if (intel_huc_is_loaded_by_gsc(huc)) |
| 280 | return -ENODEV; |
| 281 | |
| 282 | /* HW doesn't look at destination address for HuC, so set it to 0 */ |
| 283 | return intel_uc_fw_upload(uc_fw: &huc->fw, offset: 0, HUC_UKERNEL); |
| 284 | } |
| 285 | |