| 1 | // SPDX-License-Identifier: MIT | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2013-2021 Intel Corporation | 
|---|
| 4 | */ | 
|---|
| 5 |  | 
|---|
| 6 | #include "i915_drv.h" | 
|---|
| 7 | #include "i915_reg.h" | 
|---|
| 8 | #include "i915_wait_util.h" | 
|---|
| 9 | #include "intel_pcode.h" | 
|---|
| 10 |  | 
|---|
| 11 | static int gen6_check_mailbox_status(u32 mbox) | 
|---|
| 12 | { | 
|---|
| 13 | switch (mbox & GEN6_PCODE_ERROR_MASK) { | 
|---|
| 14 | case GEN6_PCODE_SUCCESS: | 
|---|
| 15 | return 0; | 
|---|
| 16 | case GEN6_PCODE_UNIMPLEMENTED_CMD: | 
|---|
| 17 | return -ENODEV; | 
|---|
| 18 | case GEN6_PCODE_ILLEGAL_CMD: | 
|---|
| 19 | return -ENXIO; | 
|---|
| 20 | case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: | 
|---|
| 21 | case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: | 
|---|
| 22 | return -EOVERFLOW; | 
|---|
| 23 | case GEN6_PCODE_TIMEOUT: | 
|---|
| 24 | return -ETIMEDOUT; | 
|---|
| 25 | default: | 
|---|
| 26 | MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); | 
|---|
| 27 | return 0; | 
|---|
| 28 | } | 
|---|
| 29 | } | 
|---|
| 30 |  | 
|---|
| 31 | static int gen7_check_mailbox_status(u32 mbox) | 
|---|
| 32 | { | 
|---|
| 33 | switch (mbox & GEN6_PCODE_ERROR_MASK) { | 
|---|
| 34 | case GEN6_PCODE_SUCCESS: | 
|---|
| 35 | return 0; | 
|---|
| 36 | case GEN6_PCODE_ILLEGAL_CMD: | 
|---|
| 37 | return -ENXIO; | 
|---|
| 38 | case GEN7_PCODE_TIMEOUT: | 
|---|
| 39 | return -ETIMEDOUT; | 
|---|
| 40 | case GEN7_PCODE_ILLEGAL_DATA: | 
|---|
| 41 | return -EINVAL; | 
|---|
| 42 | case GEN11_PCODE_ILLEGAL_SUBCOMMAND: | 
|---|
| 43 | return -ENXIO; | 
|---|
| 44 | case GEN11_PCODE_LOCKED: | 
|---|
| 45 | return -EBUSY; | 
|---|
| 46 | case GEN11_PCODE_REJECTED: | 
|---|
| 47 | return -EACCES; | 
|---|
| 48 | case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: | 
|---|
| 49 | return -EOVERFLOW; | 
|---|
| 50 | default: | 
|---|
| 51 | MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); | 
|---|
| 52 | return 0; | 
|---|
| 53 | } | 
|---|
| 54 | } | 
|---|
| 55 |  | 
|---|
| 56 | static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox, | 
|---|
| 57 | u32 *val, u32 *val1, | 
|---|
| 58 | int fast_timeout_us, int slow_timeout_ms, | 
|---|
| 59 | bool is_read) | 
|---|
| 60 | { | 
|---|
| 61 | lockdep_assert_held(&uncore->i915->sb_lock); | 
|---|
| 62 |  | 
|---|
| 63 | /* | 
|---|
| 64 | * GEN6_PCODE_* are outside of the forcewake domain, we can use | 
|---|
| 65 | * intel_uncore_read/write_fw variants to reduce the amount of work | 
|---|
| 66 | * required when reading/writing. | 
|---|
| 67 | */ | 
|---|
| 68 |  | 
|---|
| 69 | if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) | 
|---|
| 70 | return -EAGAIN; | 
|---|
| 71 |  | 
|---|
| 72 | intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val); | 
|---|
| 73 | intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0); | 
|---|
| 74 | intel_uncore_write_fw(uncore, | 
|---|
| 75 | GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | 
|---|
| 76 |  | 
|---|
| 77 | if (__intel_wait_for_register_fw(uncore, | 
|---|
| 78 | GEN6_PCODE_MAILBOX, | 
|---|
| 79 | GEN6_PCODE_READY, value: 0, | 
|---|
| 80 | fast_timeout_us, | 
|---|
| 81 | slow_timeout_ms, | 
|---|
| 82 | out_value: &mbox)) | 
|---|
| 83 | return -ETIMEDOUT; | 
|---|
| 84 |  | 
|---|
| 85 | if (is_read) | 
|---|
| 86 | *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA); | 
|---|
| 87 | if (is_read && val1) | 
|---|
| 88 | *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1); | 
|---|
| 89 |  | 
|---|
| 90 | if (GRAPHICS_VER(uncore->i915) > 6) | 
|---|
| 91 | return gen7_check_mailbox_status(mbox); | 
|---|
| 92 | else | 
|---|
| 93 | return gen6_check_mailbox_status(mbox); | 
|---|
| 94 | } | 
|---|
| 95 |  | 
|---|
| 96 | int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1) | 
|---|
| 97 | { | 
|---|
| 98 | int err; | 
|---|
| 99 |  | 
|---|
| 100 | mutex_lock(lock: &uncore->i915->sb_lock); | 
|---|
| 101 | err = __snb_pcode_rw(uncore, mbox, val, val1, fast_timeout_us: 500, slow_timeout_ms: 20, is_read: true); | 
|---|
| 102 | mutex_unlock(lock: &uncore->i915->sb_lock); | 
|---|
| 103 |  | 
|---|
| 104 | if (err) { | 
|---|
| 105 | drm_dbg(&uncore->i915->drm, | 
|---|
| 106 | "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", | 
|---|
| 107 | mbox, __builtin_return_address(0), err); | 
|---|
| 108 | } | 
|---|
| 109 |  | 
|---|
| 110 | return err; | 
|---|
| 111 | } | 
|---|
| 112 |  | 
|---|
| 113 | int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, | 
|---|
| 114 | int timeout_ms) | 
|---|
| 115 | { | 
|---|
| 116 | int err; | 
|---|
| 117 |  | 
|---|
| 118 | mutex_lock(lock: &uncore->i915->sb_lock); | 
|---|
| 119 | err = __snb_pcode_rw(uncore, mbox, val: &val, NULL, fast_timeout_us: 250, slow_timeout_ms: timeout_ms, is_read: false); | 
|---|
| 120 | mutex_unlock(lock: &uncore->i915->sb_lock); | 
|---|
| 121 |  | 
|---|
| 122 | if (err) { | 
|---|
| 123 | drm_dbg(&uncore->i915->drm, | 
|---|
| 124 | "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", | 
|---|
| 125 | val, mbox, __builtin_return_address(0), err); | 
|---|
| 126 | } | 
|---|
| 127 |  | 
|---|
| 128 | return err; | 
|---|
| 129 | } | 
|---|
| 130 |  | 
|---|
| 131 | static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox, | 
|---|
| 132 | u32 request, u32 reply_mask, u32 reply, | 
|---|
| 133 | u32 *status) | 
|---|
| 134 | { | 
|---|
| 135 | *status = __snb_pcode_rw(uncore, mbox, val: &request, NULL, fast_timeout_us: 500, slow_timeout_ms: 0, is_read: true); | 
|---|
| 136 |  | 
|---|
| 137 | return (*status == 0) && ((request & reply_mask) == reply); | 
|---|
| 138 | } | 
|---|
| 139 |  | 
|---|
| 140 | /** | 
|---|
| 141 | * skl_pcode_request - send PCODE request until acknowledgment | 
|---|
| 142 | * @uncore: uncore | 
|---|
| 143 | * @mbox: PCODE mailbox ID the request is targeted for | 
|---|
| 144 | * @request: request ID | 
|---|
| 145 | * @reply_mask: mask used to check for request acknowledgment | 
|---|
| 146 | * @reply: value used to check for request acknowledgment | 
|---|
| 147 | * @timeout_base_ms: timeout for polling with preemption enabled | 
|---|
| 148 | * | 
|---|
| 149 | * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE | 
|---|
| 150 | * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. | 
|---|
| 151 | * The request is acknowledged once the PCODE reply dword equals @reply after | 
|---|
| 152 | * applying @reply_mask. Polling is first attempted with preemption enabled | 
|---|
| 153 | * for @timeout_base_ms and if this times out for another 50 ms with | 
|---|
| 154 | * preemption disabled. | 
|---|
| 155 | * | 
|---|
| 156 | * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some | 
|---|
| 157 | * other error as reported by PCODE. | 
|---|
| 158 | */ | 
|---|
| 159 | int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request, | 
|---|
| 160 | u32 reply_mask, u32 reply, int timeout_base_ms) | 
|---|
| 161 | { | 
|---|
| 162 | u32 status; | 
|---|
| 163 | int ret; | 
|---|
| 164 |  | 
|---|
| 165 | mutex_lock(lock: &uncore->i915->sb_lock); | 
|---|
| 166 |  | 
|---|
| 167 | #define COND \ | 
|---|
| 168 | skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status) | 
|---|
| 169 |  | 
|---|
| 170 | /* | 
|---|
| 171 | * Prime the PCODE by doing a request first. Normally it guarantees | 
|---|
| 172 | * that a subsequent request, at most @timeout_base_ms later, succeeds. | 
|---|
| 173 | * _wait_for() doesn't guarantee when its passed condition is evaluated | 
|---|
| 174 | * first, so send the first request explicitly. | 
|---|
| 175 | */ | 
|---|
| 176 | if (COND) { | 
|---|
| 177 | ret = 0; | 
|---|
| 178 | goto out; | 
|---|
| 179 | } | 
|---|
| 180 | ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); | 
|---|
| 181 | if (!ret) | 
|---|
| 182 | goto out; | 
|---|
| 183 |  | 
|---|
| 184 | /* | 
|---|
| 185 | * The above can time out if the number of requests was low (2 in the | 
|---|
| 186 | * worst case) _and_ PCODE was busy for some reason even after a | 
|---|
| 187 | * (queued) request and @timeout_base_ms delay. As a workaround retry | 
|---|
| 188 | * the poll with preemption disabled to maximize the number of | 
|---|
| 189 | * requests. Increase the timeout from @timeout_base_ms to 50ms to | 
|---|
| 190 | * account for interrupts that could reduce the number of these | 
|---|
| 191 | * requests, and for any quirks of the PCODE firmware that delays | 
|---|
| 192 | * the request completion. | 
|---|
| 193 | */ | 
|---|
| 194 | drm_dbg_kms(&uncore->i915->drm, | 
|---|
| 195 | "PCODE timeout, retrying with preemption disabled\n"); | 
|---|
| 196 | drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3); | 
|---|
| 197 | preempt_disable(); | 
|---|
| 198 | ret = wait_for_atomic(COND, 50); | 
|---|
| 199 | preempt_enable(); | 
|---|
| 200 |  | 
|---|
| 201 | out: | 
|---|
| 202 | mutex_unlock(lock: &uncore->i915->sb_lock); | 
|---|
| 203 | return status ? status : ret; | 
|---|
| 204 | #undef COND | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms) | 
|---|
| 208 | { | 
|---|
| 209 | if (__intel_wait_for_register_fw(uncore, | 
|---|
| 210 | GEN6_PCODE_MAILBOX, | 
|---|
| 211 | GEN6_PCODE_READY, value: 0, | 
|---|
| 212 | fast_timeout_us: 500, slow_timeout_ms: timeout_ms, | 
|---|
| 213 | NULL)) | 
|---|
| 214 | return -EPROBE_DEFER; | 
|---|
| 215 |  | 
|---|
| 216 | return skl_pcode_request(uncore, | 
|---|
| 217 | DG1_PCODE_STATUS, | 
|---|
| 218 | DG1_UNCORE_GET_INIT_STATUS, | 
|---|
| 219 | DG1_UNCORE_INIT_STATUS_COMPLETE, | 
|---|
| 220 | DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_base_ms: timeout_ms); | 
|---|
| 221 | } | 
|---|
| 222 |  | 
|---|
| 223 | int intel_pcode_init(struct intel_uncore *uncore) | 
|---|
| 224 | { | 
|---|
| 225 | int err; | 
|---|
| 226 |  | 
|---|
| 227 | if (!IS_DGFX(uncore->i915)) | 
|---|
| 228 | return 0; | 
|---|
| 229 |  | 
|---|
| 230 | /* | 
|---|
| 231 | * Wait 10 seconds so that the punit to settle and complete | 
|---|
| 232 | * any outstanding transactions upon module load | 
|---|
| 233 | */ | 
|---|
| 234 | err = pcode_init_wait(uncore, timeout_ms: 10000); | 
|---|
| 235 |  | 
|---|
| 236 | if (err) { | 
|---|
| 237 | drm_notice(&uncore->i915->drm, | 
|---|
| 238 | "Waiting for HW initialisation...\n"); | 
|---|
| 239 | err = pcode_init_wait(uncore, timeout_ms: 180000); | 
|---|
| 240 | } | 
|---|
| 241 |  | 
|---|
| 242 | return err; | 
|---|
| 243 | } | 
|---|
| 244 |  | 
|---|
| 245 | int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val) | 
|---|
| 246 | { | 
|---|
| 247 | intel_wakeref_t wakeref; | 
|---|
| 248 | u32 mbox; | 
|---|
| 249 | int err; | 
|---|
| 250 |  | 
|---|
| 251 | mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd) | 
|---|
| 252 | | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1) | 
|---|
| 253 | | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2); | 
|---|
| 254 |  | 
|---|
| 255 | with_intel_runtime_pm(uncore->rpm, wakeref) | 
|---|
| 256 | err = snb_pcode_read(uncore, mbox, val, NULL); | 
|---|
| 257 |  | 
|---|
| 258 | return err; | 
|---|
| 259 | } | 
|---|
| 260 |  | 
|---|
| 261 | int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val) | 
|---|
| 262 | { | 
|---|
| 263 | intel_wakeref_t wakeref; | 
|---|
| 264 | u32 mbox; | 
|---|
| 265 | int err; | 
|---|
| 266 |  | 
|---|
| 267 | mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd) | 
|---|
| 268 | | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1) | 
|---|
| 269 | | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2); | 
|---|
| 270 |  | 
|---|
| 271 | with_intel_runtime_pm(uncore->rpm, wakeref) | 
|---|
| 272 | err = snb_pcode_write(uncore, mbox, val); | 
|---|
| 273 |  | 
|---|
| 274 | return err; | 
|---|
| 275 | } | 
|---|
| 276 |  | 
|---|
| 277 | /* Helpers with drm device */ | 
|---|
| 278 | int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1) | 
|---|
| 279 | { | 
|---|
| 280 | struct drm_i915_private *i915 = to_i915(dev: drm); | 
|---|
| 281 |  | 
|---|
| 282 | return snb_pcode_read(uncore: &i915->uncore, mbox, val, val1); | 
|---|
| 283 | } | 
|---|
| 284 |  | 
|---|
| 285 | int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms) | 
|---|
| 286 | { | 
|---|
| 287 | struct drm_i915_private *i915 = to_i915(dev: drm); | 
|---|
| 288 |  | 
|---|
| 289 | return snb_pcode_write_timeout(uncore: &i915->uncore, mbox, val, timeout_ms); | 
|---|
| 290 | } | 
|---|
| 291 |  | 
|---|
| 292 | int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request, | 
|---|
| 293 | u32 reply_mask, u32 reply, int timeout_base_ms) | 
|---|
| 294 | { | 
|---|
| 295 | struct drm_i915_private *i915 = to_i915(dev: drm); | 
|---|
| 296 |  | 
|---|
| 297 | return skl_pcode_request(uncore: &i915->uncore, mbox, request, reply_mask, reply, | 
|---|
| 298 | timeout_base_ms); | 
|---|
| 299 | } | 
|---|
| 300 |  | 
|---|