| 1 | /* | 
|---|
| 2 | * Copyright © 2015 Intel Corporation | 
|---|
| 3 | * | 
|---|
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|---|
| 5 | * copy of this software and associated documentation files (the "Software"), | 
|---|
| 6 | * to deal in the Software without restriction, including without limitation | 
|---|
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|---|
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | 
|---|
| 9 | * Software is furnished to do so, subject to the following conditions: | 
|---|
| 10 | * | 
|---|
| 11 | * The above copyright notice and this permission notice (including the next | 
|---|
| 12 | * paragraph) shall be included in all copies or substantial portions of the | 
|---|
| 13 | * Software. | 
|---|
| 14 | * | 
|---|
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|---|
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|---|
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|---|
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 
|---|
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 
|---|
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 
|---|
| 21 | * IN THE SOFTWARE. | 
|---|
| 22 | */ | 
|---|
| 23 |  | 
|---|
| 24 | #include <linux/debugfs.h> | 
|---|
| 25 | #include <linux/kernel.h> | 
|---|
| 26 |  | 
|---|
| 27 | #include <drm/drm_probe_helper.h> | 
|---|
| 28 |  | 
|---|
| 29 | #include "i915_drv.h" | 
|---|
| 30 | #include "i915_irq.h" | 
|---|
| 31 | #include "i915_utils.h" | 
|---|
| 32 | #include "intel_connector.h" | 
|---|
| 33 | #include "intel_display_power.h" | 
|---|
| 34 | #include "intel_display_core.h" | 
|---|
| 35 | #include "intel_display_rpm.h" | 
|---|
| 36 | #include "intel_display_types.h" | 
|---|
| 37 | #include "intel_dp.h" | 
|---|
| 38 | #include "intel_hdcp.h" | 
|---|
| 39 | #include "intel_hotplug.h" | 
|---|
| 40 | #include "intel_hotplug_irq.h" | 
|---|
| 41 |  | 
|---|
| 42 | /** | 
|---|
| 43 | * DOC: Hotplug | 
|---|
| 44 | * | 
|---|
| 45 | * Simply put, hotplug occurs when a display is connected to or disconnected | 
|---|
| 46 | * from the system. However, there may be adapters and docking stations and | 
|---|
| 47 | * Display Port short pulses and MST devices involved, complicating matters. | 
|---|
| 48 | * | 
|---|
| 49 | * Hotplug in i915 is handled in many different levels of abstraction. | 
|---|
| 50 | * | 
|---|
| 51 | * The platform dependent interrupt handling code in i915_irq.c enables, | 
|---|
| 52 | * disables, and does preliminary handling of the interrupts. The interrupt | 
|---|
| 53 | * handlers gather the hotplug detect (HPD) information from relevant registers | 
|---|
| 54 | * into a platform independent mask of hotplug pins that have fired. | 
|---|
| 55 | * | 
|---|
| 56 | * The platform independent interrupt handler intel_hpd_irq_handler() in | 
|---|
| 57 | * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes | 
|---|
| 58 | * further processing to appropriate bottom halves (Display Port specific and | 
|---|
| 59 | * regular hotplug). | 
|---|
| 60 | * | 
|---|
| 61 | * The Display Port work function i915_digport_work_func() calls into | 
|---|
| 62 | * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long | 
|---|
| 63 | * pulses, with failures and non-MST long pulses triggering regular hotplug | 
|---|
| 64 | * processing on the connector. | 
|---|
| 65 | * | 
|---|
| 66 | * The regular hotplug work function i915_hotplug_work_func() calls connector | 
|---|
| 67 | * detect hooks, and, if connector status changes, triggers sending of hotplug | 
|---|
| 68 | * uevent to userspace via drm_kms_helper_hotplug_event(). | 
|---|
| 69 | * | 
|---|
| 70 | * Finally, the userspace is responsible for triggering a modeset upon receiving | 
|---|
| 71 | * the hotplug uevent, disabling or enabling the crtc as needed. | 
|---|
| 72 | * | 
|---|
| 73 | * The hotplug interrupt storm detection and mitigation code keeps track of the | 
|---|
| 74 | * number of interrupts per hotplug pin per a period of time, and if the number | 
|---|
| 75 | * of interrupts exceeds a certain threshold, the interrupt is disabled for a | 
|---|
| 76 | * while before being re-enabled. The intention is to mitigate issues raising | 
|---|
| 77 | * from broken hardware triggering massive amounts of interrupts and grinding | 
|---|
| 78 | * the system to a halt. | 
|---|
| 79 | * | 
|---|
| 80 | * Current implementation expects that hotplug interrupt storm will not be | 
|---|
| 81 | * seen when display port sink is connected, hence on platforms whose DP | 
|---|
| 82 | * callback is handled by i915_digport_work_func reenabling of hpd is not | 
|---|
| 83 | * performed (it was never expected to be disabled in the first place ;) ) | 
|---|
| 84 | * this is specific to DP sinks handled by this routine and any other display | 
|---|
| 85 | * such as HDMI or DVI enabled on the same port will have proper logic since | 
|---|
| 86 | * it will use i915_hotplug_work_func where this logic is handled. | 
|---|
| 87 | */ | 
|---|
| 88 |  | 
|---|
| 89 | /** | 
|---|
| 90 | * intel_hpd_pin_default - return default pin associated with certain port. | 
|---|
| 91 | * @port: the hpd port to get associated pin | 
|---|
| 92 | * | 
|---|
| 93 | * It is only valid and used by digital port encoder. | 
|---|
| 94 | * | 
|---|
| 95 | * Return pin that is associatade with @port. | 
|---|
| 96 | */ | 
|---|
| 97 | enum hpd_pin intel_hpd_pin_default(enum port port) | 
|---|
| 98 | { | 
|---|
| 99 | return HPD_PORT_A + port - PORT_A; | 
|---|
| 100 | } | 
|---|
| 101 |  | 
|---|
| 102 | /* Threshold == 5 for long IRQs, 50 for short */ | 
|---|
| 103 | #define HPD_STORM_DEFAULT_THRESHOLD	50 | 
|---|
| 104 |  | 
|---|
| 105 | #define HPD_STORM_DETECT_PERIOD		1000 | 
|---|
| 106 | #define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000) | 
|---|
| 107 | #define HPD_RETRY_DELAY			1000 | 
|---|
| 108 |  | 
|---|
| 109 | static enum hpd_pin | 
|---|
| 110 | intel_connector_hpd_pin(struct intel_connector *connector) | 
|---|
| 111 | { | 
|---|
| 112 | struct intel_encoder *encoder = intel_attached_encoder(connector); | 
|---|
| 113 |  | 
|---|
| 114 | /* | 
|---|
| 115 | * MST connectors get their encoder attached dynamically | 
|---|
| 116 | * so need to make sure we have an encoder here. But since | 
|---|
| 117 | * MST encoders have their hpd_pin set to HPD_NONE we don't | 
|---|
| 118 | * have to special case them beyond that. | 
|---|
| 119 | */ | 
|---|
| 120 | return encoder ? encoder->hpd_pin : HPD_NONE; | 
|---|
| 121 | } | 
|---|
| 122 |  | 
|---|
| 123 | /** | 
|---|
| 124 | * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin | 
|---|
| 125 | * @display: display device | 
|---|
| 126 | * @pin: the pin to gather stats on | 
|---|
| 127 | * @long_hpd: whether the HPD IRQ was long or short | 
|---|
| 128 | * | 
|---|
| 129 | * Gather stats about HPD IRQs from the specified @pin, and detect IRQ | 
|---|
| 130 | * storms. Only the pin specific stats and state are changed, the caller is | 
|---|
| 131 | * responsible for further action. | 
|---|
| 132 | * | 
|---|
| 133 | * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is | 
|---|
| 134 | * stored in @display->hotplug.hpd_storm_threshold which defaults to | 
|---|
| 135 | * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and | 
|---|
| 136 | * short IRQs count as +1. If this threshold is exceeded, it's considered an | 
|---|
| 137 | * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. | 
|---|
| 138 | * | 
|---|
| 139 | * By default, most systems will only count long IRQs towards | 
|---|
| 140 | * &display->hotplug.hpd_storm_threshold. However, some older systems also | 
|---|
| 141 | * suffer from short IRQ storms and must also track these. Because short IRQ | 
|---|
| 142 | * storms are naturally caused by sideband interactions with DP MST devices, | 
|---|
| 143 | * short IRQ detection is only enabled for systems without DP MST support. | 
|---|
| 144 | * Systems which are new enough to support DP MST are far less likely to | 
|---|
| 145 | * suffer from IRQ storms at all, so this is fine. | 
|---|
| 146 | * | 
|---|
| 147 | * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, | 
|---|
| 148 | * and should only be adjusted for automated hotplug testing. | 
|---|
| 149 | * | 
|---|
| 150 | * Return true if an IRQ storm was detected on @pin. | 
|---|
| 151 | */ | 
|---|
| 152 | static bool intel_hpd_irq_storm_detect(struct intel_display *display, | 
|---|
| 153 | enum hpd_pin pin, bool long_hpd) | 
|---|
| 154 | { | 
|---|
| 155 | struct intel_hotplug *hpd = &display->hotplug; | 
|---|
| 156 | unsigned long start = hpd->stats[pin].last_jiffies; | 
|---|
| 157 | unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); | 
|---|
| 158 | const int increment = long_hpd ? 10 : 1; | 
|---|
| 159 | const int threshold = hpd->hpd_storm_threshold; | 
|---|
| 160 | bool storm = false; | 
|---|
| 161 |  | 
|---|
| 162 | if (!threshold || | 
|---|
| 163 | (!long_hpd && !display->hotplug.hpd_short_storm_enabled)) | 
|---|
| 164 | return false; | 
|---|
| 165 |  | 
|---|
| 166 | if (!time_in_range(jiffies, start, end)) { | 
|---|
| 167 | hpd->stats[pin].last_jiffies = jiffies; | 
|---|
| 168 | hpd->stats[pin].count = 0; | 
|---|
| 169 | } | 
|---|
| 170 |  | 
|---|
| 171 | hpd->stats[pin].count += increment; | 
|---|
| 172 | if (hpd->stats[pin].count > threshold) { | 
|---|
| 173 | hpd->stats[pin].state = HPD_MARK_DISABLED; | 
|---|
| 174 | drm_dbg_kms(display->drm, | 
|---|
| 175 | "HPD interrupt storm detected on PIN %d\n", pin); | 
|---|
| 176 | storm = true; | 
|---|
| 177 | } else { | 
|---|
| 178 | drm_dbg_kms(display->drm, | 
|---|
| 179 | "Received HPD interrupt on PIN %d - cnt: %d\n", | 
|---|
| 180 | pin, | 
|---|
| 181 | hpd->stats[pin].count); | 
|---|
| 182 | } | 
|---|
| 183 |  | 
|---|
| 184 | return storm; | 
|---|
| 185 | } | 
|---|
| 186 |  | 
|---|
| 187 | static bool detection_work_enabled(struct intel_display *display) | 
|---|
| 188 | { | 
|---|
| 189 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 190 |  | 
|---|
| 191 | return display->hotplug.detection_work_enabled; | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | static bool | 
|---|
| 195 | mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) | 
|---|
| 196 | { | 
|---|
| 197 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 198 |  | 
|---|
| 199 | if (!detection_work_enabled(display)) | 
|---|
| 200 | return false; | 
|---|
| 201 |  | 
|---|
| 202 | return mod_delayed_work(wq: display->wq.unordered, dwork: work, delay); | 
|---|
| 203 | } | 
|---|
| 204 |  | 
|---|
| 205 | static bool | 
|---|
| 206 | queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) | 
|---|
| 207 | { | 
|---|
| 208 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 209 |  | 
|---|
| 210 | if (!detection_work_enabled(display)) | 
|---|
| 211 | return false; | 
|---|
| 212 |  | 
|---|
| 213 | return queue_delayed_work(wq: display->wq.unordered, dwork: work, delay); | 
|---|
| 214 | } | 
|---|
| 215 |  | 
|---|
| 216 | static bool | 
|---|
| 217 | queue_detection_work(struct intel_display *display, struct work_struct *work) | 
|---|
| 218 | { | 
|---|
| 219 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 220 |  | 
|---|
| 221 | if (!detection_work_enabled(display)) | 
|---|
| 222 | return false; | 
|---|
| 223 |  | 
|---|
| 224 | return queue_work(wq: display->wq.unordered, work); | 
|---|
| 225 | } | 
|---|
| 226 |  | 
|---|
| 227 | static void | 
|---|
| 228 | intel_hpd_irq_storm_switch_to_polling(struct intel_display *display) | 
|---|
| 229 | { | 
|---|
| 230 | struct drm_connector_list_iter conn_iter; | 
|---|
| 231 | struct intel_connector *connector; | 
|---|
| 232 | bool hpd_disabled = false; | 
|---|
| 233 |  | 
|---|
| 234 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 235 |  | 
|---|
| 236 | drm_connector_list_iter_begin(dev: display->drm, iter: &conn_iter); | 
|---|
| 237 | for_each_intel_connector_iter(connector, &conn_iter) { | 
|---|
| 238 | enum hpd_pin pin; | 
|---|
| 239 |  | 
|---|
| 240 | if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) | 
|---|
| 241 | continue; | 
|---|
| 242 |  | 
|---|
| 243 | pin = intel_connector_hpd_pin(connector); | 
|---|
| 244 | if (pin == HPD_NONE || | 
|---|
| 245 | display->hotplug.stats[pin].state != HPD_MARK_DISABLED) | 
|---|
| 246 | continue; | 
|---|
| 247 |  | 
|---|
| 248 | drm_info(display->drm, | 
|---|
| 249 | "HPD interrupt storm detected on connector %s: " | 
|---|
| 250 | "switching from hotplug detection to polling\n", | 
|---|
| 251 | connector->base.name); | 
|---|
| 252 |  | 
|---|
| 253 | display->hotplug.stats[pin].state = HPD_DISABLED; | 
|---|
| 254 | connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | | 
|---|
| 255 | DRM_CONNECTOR_POLL_DISCONNECT; | 
|---|
| 256 | hpd_disabled = true; | 
|---|
| 257 | } | 
|---|
| 258 | drm_connector_list_iter_end(iter: &conn_iter); | 
|---|
| 259 |  | 
|---|
| 260 | /* Enable polling and queue hotplug re-enabling. */ | 
|---|
| 261 | if (hpd_disabled) { | 
|---|
| 262 | drm_kms_helper_poll_reschedule(dev: display->drm); | 
|---|
| 263 | mod_delayed_detection_work(display, | 
|---|
| 264 | work: &display->hotplug.reenable_work, | 
|---|
| 265 | delay: msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); | 
|---|
| 266 | } | 
|---|
| 267 | } | 
|---|
| 268 |  | 
|---|
| 269 | static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) | 
|---|
| 270 | { | 
|---|
| 271 | struct intel_display *display = | 
|---|
| 272 | container_of(work, typeof(*display), hotplug.reenable_work.work); | 
|---|
| 273 | struct drm_connector_list_iter conn_iter; | 
|---|
| 274 | struct intel_connector *connector; | 
|---|
| 275 | struct ref_tracker *wakeref; | 
|---|
| 276 | enum hpd_pin pin; | 
|---|
| 277 |  | 
|---|
| 278 | wakeref = intel_display_rpm_get(display); | 
|---|
| 279 |  | 
|---|
| 280 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 281 |  | 
|---|
| 282 | drm_connector_list_iter_begin(dev: display->drm, iter: &conn_iter); | 
|---|
| 283 | for_each_intel_connector_iter(connector, &conn_iter) { | 
|---|
| 284 | pin = intel_connector_hpd_pin(connector); | 
|---|
| 285 | if (pin == HPD_NONE || | 
|---|
| 286 | display->hotplug.stats[pin].state != HPD_DISABLED) | 
|---|
| 287 | continue; | 
|---|
| 288 |  | 
|---|
| 289 | if (connector->base.polled != connector->polled) | 
|---|
| 290 | drm_dbg(display->drm, | 
|---|
| 291 | "Reenabling HPD on connector %s\n", | 
|---|
| 292 | connector->base.name); | 
|---|
| 293 | connector->base.polled = connector->polled; | 
|---|
| 294 | } | 
|---|
| 295 | drm_connector_list_iter_end(iter: &conn_iter); | 
|---|
| 296 |  | 
|---|
| 297 | for_each_hpd_pin(pin) { | 
|---|
| 298 | if (display->hotplug.stats[pin].state == HPD_DISABLED) | 
|---|
| 299 | display->hotplug.stats[pin].state = HPD_ENABLED; | 
|---|
| 300 | } | 
|---|
| 301 |  | 
|---|
| 302 | intel_hpd_irq_setup(display); | 
|---|
| 303 |  | 
|---|
| 304 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 305 |  | 
|---|
| 306 | intel_display_rpm_put(display, wakeref); | 
|---|
| 307 | } | 
|---|
| 308 |  | 
|---|
| 309 | static enum intel_hotplug_state | 
|---|
| 310 | intel_hotplug_detect_connector(struct intel_connector *connector) | 
|---|
| 311 | { | 
|---|
| 312 | struct drm_device *dev = connector->base.dev; | 
|---|
| 313 | enum drm_connector_status old_status; | 
|---|
| 314 | u64 old_epoch_counter; | 
|---|
| 315 | int status; | 
|---|
| 316 | bool ret = false; | 
|---|
| 317 |  | 
|---|
| 318 | drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); | 
|---|
| 319 | old_status = connector->base.status; | 
|---|
| 320 | old_epoch_counter = connector->base.epoch_counter; | 
|---|
| 321 |  | 
|---|
| 322 | status = drm_helper_probe_detect(connector: &connector->base, NULL, force: false); | 
|---|
| 323 | if (!connector->base.force) | 
|---|
| 324 | connector->base.status = status; | 
|---|
| 325 |  | 
|---|
| 326 | if (old_epoch_counter != connector->base.epoch_counter) | 
|---|
| 327 | ret = true; | 
|---|
| 328 |  | 
|---|
| 329 | if (ret) { | 
|---|
| 330 | drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", | 
|---|
| 331 | connector->base.base.id, | 
|---|
| 332 | connector->base.name, | 
|---|
| 333 | drm_get_connector_status_name(old_status), | 
|---|
| 334 | drm_get_connector_status_name(connector->base.status), | 
|---|
| 335 | old_epoch_counter, | 
|---|
| 336 | connector->base.epoch_counter); | 
|---|
| 337 | return INTEL_HOTPLUG_CHANGED; | 
|---|
| 338 | } | 
|---|
| 339 | return INTEL_HOTPLUG_UNCHANGED; | 
|---|
| 340 | } | 
|---|
| 341 |  | 
|---|
| 342 | enum intel_hotplug_state | 
|---|
| 343 | intel_encoder_hotplug(struct intel_encoder *encoder, | 
|---|
| 344 | struct intel_connector *connector) | 
|---|
| 345 | { | 
|---|
| 346 | return intel_hotplug_detect_connector(connector); | 
|---|
| 347 | } | 
|---|
| 348 |  | 
|---|
| 349 | static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) | 
|---|
| 350 | { | 
|---|
| 351 | return intel_encoder_is_dig_port(encoder) && | 
|---|
| 352 | enc_to_dig_port(encoder)->hpd_pulse != NULL; | 
|---|
| 353 | } | 
|---|
| 354 |  | 
|---|
| 355 | static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin) | 
|---|
| 356 | { | 
|---|
| 357 | struct intel_encoder *encoder; | 
|---|
| 358 |  | 
|---|
| 359 | for_each_intel_encoder(display->drm, encoder) { | 
|---|
| 360 | if (encoder->hpd_pin != pin) | 
|---|
| 361 | continue; | 
|---|
| 362 |  | 
|---|
| 363 | if (intel_encoder_has_hpd_pulse(encoder)) | 
|---|
| 364 | return true; | 
|---|
| 365 | } | 
|---|
| 366 |  | 
|---|
| 367 | return false; | 
|---|
| 368 | } | 
|---|
| 369 |  | 
|---|
| 370 | static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin) | 
|---|
| 371 | { | 
|---|
| 372 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 373 |  | 
|---|
| 374 | return display->hotplug.stats[pin].blocked_count; | 
|---|
| 375 | } | 
|---|
| 376 |  | 
|---|
| 377 | static u32 get_blocked_hpd_pin_mask(struct intel_display *display) | 
|---|
| 378 | { | 
|---|
| 379 | enum hpd_pin pin; | 
|---|
| 380 | u32 hpd_pin_mask = 0; | 
|---|
| 381 |  | 
|---|
| 382 | for_each_hpd_pin(pin) { | 
|---|
| 383 | if (hpd_pin_is_blocked(display, pin)) | 
|---|
| 384 | hpd_pin_mask |= BIT(pin); | 
|---|
| 385 | } | 
|---|
| 386 |  | 
|---|
| 387 | return hpd_pin_mask; | 
|---|
| 388 | } | 
|---|
| 389 |  | 
|---|
| 390 | static void i915_digport_work_func(struct work_struct *work) | 
|---|
| 391 | { | 
|---|
| 392 | struct intel_display *display = | 
|---|
| 393 | container_of(work, struct intel_display, hotplug.dig_port_work); | 
|---|
| 394 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 395 | u32 long_hpd_pin_mask, short_hpd_pin_mask; | 
|---|
| 396 | struct intel_encoder *encoder; | 
|---|
| 397 | u32 blocked_hpd_pin_mask; | 
|---|
| 398 | u32 old_bits = 0; | 
|---|
| 399 |  | 
|---|
| 400 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 401 |  | 
|---|
| 402 | blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); | 
|---|
| 403 | long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask; | 
|---|
| 404 | hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask; | 
|---|
| 405 | short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask; | 
|---|
| 406 | hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask; | 
|---|
| 407 |  | 
|---|
| 408 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 409 |  | 
|---|
| 410 | for_each_intel_encoder(display->drm, encoder) { | 
|---|
| 411 | struct intel_digital_port *dig_port; | 
|---|
| 412 | enum hpd_pin pin = encoder->hpd_pin; | 
|---|
| 413 | bool long_hpd, short_hpd; | 
|---|
| 414 | enum irqreturn ret; | 
|---|
| 415 |  | 
|---|
| 416 | if (!intel_encoder_has_hpd_pulse(encoder)) | 
|---|
| 417 | continue; | 
|---|
| 418 |  | 
|---|
| 419 | long_hpd = long_hpd_pin_mask & BIT(pin); | 
|---|
| 420 | short_hpd = short_hpd_pin_mask & BIT(pin); | 
|---|
| 421 |  | 
|---|
| 422 | if (!long_hpd && !short_hpd) | 
|---|
| 423 | continue; | 
|---|
| 424 |  | 
|---|
| 425 | dig_port = enc_to_dig_port(encoder); | 
|---|
| 426 |  | 
|---|
| 427 | ret = dig_port->hpd_pulse(dig_port, long_hpd); | 
|---|
| 428 | if (ret == IRQ_NONE) { | 
|---|
| 429 | /* fall back to old school hpd */ | 
|---|
| 430 | old_bits |= BIT(pin); | 
|---|
| 431 | } | 
|---|
| 432 | } | 
|---|
| 433 |  | 
|---|
| 434 | if (old_bits) { | 
|---|
| 435 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 436 | display->hotplug.event_bits |= old_bits; | 
|---|
| 437 | queue_delayed_detection_work(display, | 
|---|
| 438 | work: &display->hotplug.hotplug_work, delay: 0); | 
|---|
| 439 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 440 | } | 
|---|
| 441 | } | 
|---|
| 442 |  | 
|---|
| 443 | /** | 
|---|
| 444 | * intel_hpd_trigger_irq - trigger an hpd irq event for a port | 
|---|
| 445 | * @dig_port: digital port | 
|---|
| 446 | * | 
|---|
| 447 | * Trigger an HPD interrupt event for the given port, emulating a short pulse | 
|---|
| 448 | * generated by the sink, and schedule the dig port work to handle it. | 
|---|
| 449 | */ | 
|---|
| 450 | void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) | 
|---|
| 451 | { | 
|---|
| 452 | struct intel_display *display = to_intel_display(dig_port); | 
|---|
| 453 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 454 | struct intel_encoder *encoder = &dig_port->base; | 
|---|
| 455 |  | 
|---|
| 456 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 457 |  | 
|---|
| 458 | hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin); | 
|---|
| 459 | if (!hpd_pin_is_blocked(display, pin: encoder->hpd_pin)) | 
|---|
| 460 | queue_work(wq: hotplug->dp_wq, work: &hotplug->dig_port_work); | 
|---|
| 461 |  | 
|---|
| 462 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 463 | } | 
|---|
| 464 |  | 
|---|
| 465 | /* | 
|---|
| 466 | * Handle hotplug events outside the interrupt handler proper. | 
|---|
| 467 | */ | 
|---|
| 468 | static void i915_hotplug_work_func(struct work_struct *work) | 
|---|
| 469 | { | 
|---|
| 470 | struct intel_display *display = | 
|---|
| 471 | container_of(work, struct intel_display, hotplug.hotplug_work.work); | 
|---|
| 472 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 473 | struct drm_connector_list_iter conn_iter; | 
|---|
| 474 | struct intel_connector *connector; | 
|---|
| 475 | u32 changed = 0, retry = 0; | 
|---|
| 476 | u32 hpd_event_bits; | 
|---|
| 477 | u32 hpd_retry_bits; | 
|---|
| 478 | struct drm_connector *first_changed_connector = NULL; | 
|---|
| 479 | int changed_connectors = 0; | 
|---|
| 480 | u32 blocked_hpd_pin_mask; | 
|---|
| 481 |  | 
|---|
| 482 | mutex_lock(lock: &display->drm->mode_config.mutex); | 
|---|
| 483 | drm_dbg_kms(display->drm, "running encoder hotplug functions\n"); | 
|---|
| 484 |  | 
|---|
| 485 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 486 |  | 
|---|
| 487 | blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); | 
|---|
| 488 | hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask; | 
|---|
| 489 | hotplug->event_bits &= ~hpd_event_bits; | 
|---|
| 490 | hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask; | 
|---|
| 491 | hotplug->retry_bits &= ~hpd_retry_bits; | 
|---|
| 492 |  | 
|---|
| 493 | /* Enable polling for connectors which had HPD IRQ storms */ | 
|---|
| 494 | intel_hpd_irq_storm_switch_to_polling(display); | 
|---|
| 495 |  | 
|---|
| 496 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 497 |  | 
|---|
| 498 | /* Skip calling encode hotplug handlers if ignore long HPD set*/ | 
|---|
| 499 | if (display->hotplug.ignore_long_hpd) { | 
|---|
| 500 | drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); | 
|---|
| 501 | mutex_unlock(lock: &display->drm->mode_config.mutex); | 
|---|
| 502 | return; | 
|---|
| 503 | } | 
|---|
| 504 |  | 
|---|
| 505 | drm_connector_list_iter_begin(dev: display->drm, iter: &conn_iter); | 
|---|
| 506 | for_each_intel_connector_iter(connector, &conn_iter) { | 
|---|
| 507 | enum hpd_pin pin; | 
|---|
| 508 | u32 hpd_bit; | 
|---|
| 509 |  | 
|---|
| 510 | pin = intel_connector_hpd_pin(connector); | 
|---|
| 511 | if (pin == HPD_NONE) | 
|---|
| 512 | continue; | 
|---|
| 513 |  | 
|---|
| 514 | hpd_bit = BIT(pin); | 
|---|
| 515 | if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { | 
|---|
| 516 | struct intel_encoder *encoder = | 
|---|
| 517 | intel_attached_encoder(connector); | 
|---|
| 518 |  | 
|---|
| 519 | if (hpd_event_bits & hpd_bit) | 
|---|
| 520 | connector->hotplug_retries = 0; | 
|---|
| 521 | else | 
|---|
| 522 | connector->hotplug_retries++; | 
|---|
| 523 |  | 
|---|
| 524 | drm_dbg_kms(display->drm, | 
|---|
| 525 | "Connector %s (pin %i) received hotplug event. (retry %d)\n", | 
|---|
| 526 | connector->base.name, pin, | 
|---|
| 527 | connector->hotplug_retries); | 
|---|
| 528 |  | 
|---|
| 529 | switch (encoder->hotplug(encoder, connector)) { | 
|---|
| 530 | case INTEL_HOTPLUG_UNCHANGED: | 
|---|
| 531 | break; | 
|---|
| 532 | case INTEL_HOTPLUG_CHANGED: | 
|---|
| 533 | changed |= hpd_bit; | 
|---|
| 534 | changed_connectors++; | 
|---|
| 535 | if (!first_changed_connector) { | 
|---|
| 536 | drm_connector_get(connector: &connector->base); | 
|---|
| 537 | first_changed_connector = &connector->base; | 
|---|
| 538 | } | 
|---|
| 539 | break; | 
|---|
| 540 | case INTEL_HOTPLUG_RETRY: | 
|---|
| 541 | retry |= hpd_bit; | 
|---|
| 542 | break; | 
|---|
| 543 | } | 
|---|
| 544 | } | 
|---|
| 545 | } | 
|---|
| 546 | drm_connector_list_iter_end(iter: &conn_iter); | 
|---|
| 547 | mutex_unlock(lock: &display->drm->mode_config.mutex); | 
|---|
| 548 |  | 
|---|
| 549 | if (changed_connectors == 1) | 
|---|
| 550 | drm_kms_helper_connector_hotplug_event(connector: first_changed_connector); | 
|---|
| 551 | else if (changed_connectors > 0) | 
|---|
| 552 | drm_kms_helper_hotplug_event(dev: display->drm); | 
|---|
| 553 |  | 
|---|
| 554 | if (first_changed_connector) | 
|---|
| 555 | drm_connector_put(connector: first_changed_connector); | 
|---|
| 556 |  | 
|---|
| 557 | /* Remove shared HPD pins that have changed */ | 
|---|
| 558 | retry &= ~changed; | 
|---|
| 559 | if (retry) { | 
|---|
| 560 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 561 | display->hotplug.retry_bits |= retry; | 
|---|
| 562 |  | 
|---|
| 563 | mod_delayed_detection_work(display, | 
|---|
| 564 | work: &display->hotplug.hotplug_work, | 
|---|
| 565 | delay: msecs_to_jiffies(HPD_RETRY_DELAY)); | 
|---|
| 566 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 567 | } | 
|---|
| 568 | } | 
|---|
| 569 |  | 
|---|
| 570 |  | 
|---|
| 571 | /** | 
|---|
| 572 | * intel_hpd_irq_handler - main hotplug irq handler | 
|---|
| 573 | * @display: display device | 
|---|
| 574 | * @pin_mask: a mask of hpd pins that have triggered the irq | 
|---|
| 575 | * @long_mask: a mask of hpd pins that may be long hpd pulses | 
|---|
| 576 | * | 
|---|
| 577 | * This is the main hotplug irq handler for all platforms. The platform specific | 
|---|
| 578 | * irq handlers call the platform specific hotplug irq handlers, which read and | 
|---|
| 579 | * decode the appropriate registers into bitmasks about hpd pins that have | 
|---|
| 580 | * triggered (@pin_mask), and which of those pins may be long pulses | 
|---|
| 581 | * (@long_mask). The @long_mask is ignored if the port corresponding to the pin | 
|---|
| 582 | * is not a digital port. | 
|---|
| 583 | * | 
|---|
| 584 | * Here, we do hotplug irq storm detection and mitigation, and pass further | 
|---|
| 585 | * processing to appropriate bottom halves. | 
|---|
| 586 | */ | 
|---|
| 587 | void intel_hpd_irq_handler(struct intel_display *display, | 
|---|
| 588 | u32 pin_mask, u32 long_mask) | 
|---|
| 589 | { | 
|---|
| 590 | struct intel_encoder *encoder; | 
|---|
| 591 | bool storm_detected = false; | 
|---|
| 592 | bool queue_dig = false, queue_hp = false; | 
|---|
| 593 | u32 long_hpd_pulse_mask = 0; | 
|---|
| 594 | u32 short_hpd_pulse_mask = 0; | 
|---|
| 595 | enum hpd_pin pin; | 
|---|
| 596 |  | 
|---|
| 597 | if (!pin_mask) | 
|---|
| 598 | return; | 
|---|
| 599 |  | 
|---|
| 600 | spin_lock(lock: &display->irq.lock); | 
|---|
| 601 |  | 
|---|
| 602 | /* | 
|---|
| 603 | * Determine whether ->hpd_pulse() exists for each pin, and | 
|---|
| 604 | * whether we have a short or a long pulse. This is needed | 
|---|
| 605 | * as each pin may have up to two encoders (HDMI and DP) and | 
|---|
| 606 | * only the one of them (DP) will have ->hpd_pulse(). | 
|---|
| 607 | */ | 
|---|
| 608 | for_each_intel_encoder(display->drm, encoder) { | 
|---|
| 609 | bool long_hpd; | 
|---|
| 610 |  | 
|---|
| 611 | pin = encoder->hpd_pin; | 
|---|
| 612 | if (!(BIT(pin) & pin_mask)) | 
|---|
| 613 | continue; | 
|---|
| 614 |  | 
|---|
| 615 | if (!intel_encoder_has_hpd_pulse(encoder)) | 
|---|
| 616 | continue; | 
|---|
| 617 |  | 
|---|
| 618 | long_hpd = long_mask & BIT(pin); | 
|---|
| 619 |  | 
|---|
| 620 | drm_dbg(display->drm, | 
|---|
| 621 | "digital hpd on [ENCODER:%d:%s] - %s\n", | 
|---|
| 622 | encoder->base.base.id, encoder->base.name, | 
|---|
| 623 | long_hpd ? "long": "short"); | 
|---|
| 624 |  | 
|---|
| 625 | if (!hpd_pin_is_blocked(display, pin)) | 
|---|
| 626 | queue_dig = true; | 
|---|
| 627 |  | 
|---|
| 628 | if (long_hpd) { | 
|---|
| 629 | long_hpd_pulse_mask |= BIT(pin); | 
|---|
| 630 | display->hotplug.long_hpd_pin_mask |= BIT(pin); | 
|---|
| 631 | } else { | 
|---|
| 632 | short_hpd_pulse_mask |= BIT(pin); | 
|---|
| 633 | display->hotplug.short_hpd_pin_mask |= BIT(pin); | 
|---|
| 634 | } | 
|---|
| 635 | } | 
|---|
| 636 |  | 
|---|
| 637 | /* Now process each pin just once */ | 
|---|
| 638 | for_each_hpd_pin(pin) { | 
|---|
| 639 | bool long_hpd; | 
|---|
| 640 |  | 
|---|
| 641 | if (!(BIT(pin) & pin_mask)) | 
|---|
| 642 | continue; | 
|---|
| 643 |  | 
|---|
| 644 | if (display->hotplug.stats[pin].state == HPD_DISABLED) { | 
|---|
| 645 | /* | 
|---|
| 646 | * On GMCH platforms the interrupt mask bits only | 
|---|
| 647 | * prevent irq generation, not the setting of the | 
|---|
| 648 | * hotplug bits itself. So only WARN about unexpected | 
|---|
| 649 | * interrupts on saner platforms. | 
|---|
| 650 | */ | 
|---|
| 651 | drm_WARN_ONCE(display->drm, !HAS_GMCH(display), | 
|---|
| 652 | "Received HPD interrupt on pin %d although disabled\n", | 
|---|
| 653 | pin); | 
|---|
| 654 | continue; | 
|---|
| 655 | } | 
|---|
| 656 |  | 
|---|
| 657 | if (display->hotplug.stats[pin].state != HPD_ENABLED) | 
|---|
| 658 | continue; | 
|---|
| 659 |  | 
|---|
| 660 | /* | 
|---|
| 661 | * Delegate to ->hpd_pulse() if one of the encoders for this | 
|---|
| 662 | * pin has it, otherwise let the hotplug_work deal with this | 
|---|
| 663 | * pin directly. | 
|---|
| 664 | */ | 
|---|
| 665 | if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { | 
|---|
| 666 | long_hpd = long_hpd_pulse_mask & BIT(pin); | 
|---|
| 667 | } else { | 
|---|
| 668 | display->hotplug.event_bits |= BIT(pin); | 
|---|
| 669 | long_hpd = true; | 
|---|
| 670 |  | 
|---|
| 671 | if (!hpd_pin_is_blocked(display, pin)) | 
|---|
| 672 | queue_hp = true; | 
|---|
| 673 | } | 
|---|
| 674 |  | 
|---|
| 675 | if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) { | 
|---|
| 676 | display->hotplug.event_bits &= ~BIT(pin); | 
|---|
| 677 | storm_detected = true; | 
|---|
| 678 | queue_hp = true; | 
|---|
| 679 | } | 
|---|
| 680 | } | 
|---|
| 681 |  | 
|---|
| 682 | /* | 
|---|
| 683 | * Disable any IRQs that storms were detected on. Polling enablement | 
|---|
| 684 | * happens later in our hotplug work. | 
|---|
| 685 | */ | 
|---|
| 686 | if (storm_detected) | 
|---|
| 687 | intel_hpd_irq_setup(display); | 
|---|
| 688 |  | 
|---|
| 689 | /* | 
|---|
| 690 | * Our hotplug handler can grab modeset locks (by calling down into the | 
|---|
| 691 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | 
|---|
| 692 | * queue for otherwise the flush_work in the pageflip code will | 
|---|
| 693 | * deadlock. | 
|---|
| 694 | */ | 
|---|
| 695 | if (queue_dig) | 
|---|
| 696 | queue_work(wq: display->hotplug.dp_wq, work: &display->hotplug.dig_port_work); | 
|---|
| 697 | if (queue_hp) | 
|---|
| 698 | queue_delayed_detection_work(display, | 
|---|
| 699 | work: &display->hotplug.hotplug_work, delay: 0); | 
|---|
| 700 |  | 
|---|
| 701 | spin_unlock(lock: &display->irq.lock); | 
|---|
| 702 | } | 
|---|
| 703 |  | 
|---|
| 704 | /** | 
|---|
| 705 | * intel_hpd_init - initializes and enables hpd support | 
|---|
| 706 | * @display: display device instance | 
|---|
| 707 | * | 
|---|
| 708 | * This function enables the hotplug support. It requires that interrupts have | 
|---|
| 709 | * already been enabled with intel_irq_init_hw(). From this point on hotplug and | 
|---|
| 710 | * poll request can run concurrently to other code, so locking rules must be | 
|---|
| 711 | * obeyed. | 
|---|
| 712 | * | 
|---|
| 713 | * This is a separate step from interrupt enabling to simplify the locking rules | 
|---|
| 714 | * in the driver load and resume code. | 
|---|
| 715 | * | 
|---|
| 716 | * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). | 
|---|
| 717 | */ | 
|---|
| 718 | void intel_hpd_init(struct intel_display *display) | 
|---|
| 719 | { | 
|---|
| 720 | int i; | 
|---|
| 721 |  | 
|---|
| 722 | if (!HAS_DISPLAY(display)) | 
|---|
| 723 | return; | 
|---|
| 724 |  | 
|---|
| 725 | for_each_hpd_pin(i) { | 
|---|
| 726 | display->hotplug.stats[i].count = 0; | 
|---|
| 727 | display->hotplug.stats[i].state = HPD_ENABLED; | 
|---|
| 728 | } | 
|---|
| 729 |  | 
|---|
| 730 | /* | 
|---|
| 731 | * Interrupt setup is already guaranteed to be single-threaded, this is | 
|---|
| 732 | * just to make the assert_spin_locked checks happy. | 
|---|
| 733 | */ | 
|---|
| 734 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 735 | intel_hpd_irq_setup(display); | 
|---|
| 736 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 737 | } | 
|---|
| 738 |  | 
|---|
| 739 | static void i915_hpd_poll_detect_connectors(struct intel_display *display) | 
|---|
| 740 | { | 
|---|
| 741 | struct drm_connector_list_iter conn_iter; | 
|---|
| 742 | struct intel_connector *connector; | 
|---|
| 743 | struct intel_connector *first_changed_connector = NULL; | 
|---|
| 744 | int changed = 0; | 
|---|
| 745 |  | 
|---|
| 746 | mutex_lock(lock: &display->drm->mode_config.mutex); | 
|---|
| 747 |  | 
|---|
| 748 | if (!display->drm->mode_config.poll_enabled) | 
|---|
| 749 | goto out; | 
|---|
| 750 |  | 
|---|
| 751 | drm_connector_list_iter_begin(dev: display->drm, iter: &conn_iter); | 
|---|
| 752 | for_each_intel_connector_iter(connector, &conn_iter) { | 
|---|
| 753 | if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD)) | 
|---|
| 754 | continue; | 
|---|
| 755 |  | 
|---|
| 756 | if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED) | 
|---|
| 757 | continue; | 
|---|
| 758 |  | 
|---|
| 759 | changed++; | 
|---|
| 760 |  | 
|---|
| 761 | if (changed == 1) { | 
|---|
| 762 | drm_connector_get(connector: &connector->base); | 
|---|
| 763 | first_changed_connector = connector; | 
|---|
| 764 | } | 
|---|
| 765 | } | 
|---|
| 766 | drm_connector_list_iter_end(iter: &conn_iter); | 
|---|
| 767 |  | 
|---|
| 768 | out: | 
|---|
| 769 | mutex_unlock(lock: &display->drm->mode_config.mutex); | 
|---|
| 770 |  | 
|---|
| 771 | if (!changed) | 
|---|
| 772 | return; | 
|---|
| 773 |  | 
|---|
| 774 | if (changed == 1) | 
|---|
| 775 | drm_kms_helper_connector_hotplug_event(connector: &first_changed_connector->base); | 
|---|
| 776 | else | 
|---|
| 777 | drm_kms_helper_hotplug_event(dev: display->drm); | 
|---|
| 778 |  | 
|---|
| 779 | drm_connector_put(connector: &first_changed_connector->base); | 
|---|
| 780 | } | 
|---|
| 781 |  | 
|---|
| 782 | static void i915_hpd_poll_init_work(struct work_struct *work) | 
|---|
| 783 | { | 
|---|
| 784 | struct intel_display *display = | 
|---|
| 785 | container_of(work, typeof(*display), hotplug.poll_init_work); | 
|---|
| 786 | struct drm_connector_list_iter conn_iter; | 
|---|
| 787 | struct intel_connector *connector; | 
|---|
| 788 | intel_wakeref_t wakeref; | 
|---|
| 789 | bool enabled; | 
|---|
| 790 |  | 
|---|
| 791 | mutex_lock(lock: &display->drm->mode_config.mutex); | 
|---|
| 792 |  | 
|---|
| 793 | enabled = READ_ONCE(display->hotplug.poll_enabled); | 
|---|
| 794 | /* | 
|---|
| 795 | * Prevent taking a power reference from this sequence of | 
|---|
| 796 | * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() -> | 
|---|
| 797 | * connector detect which would requeue i915_hpd_poll_init_work() | 
|---|
| 798 | * and so risk an endless loop of this same sequence. | 
|---|
| 799 | */ | 
|---|
| 800 | if (!enabled) { | 
|---|
| 801 | wakeref = intel_display_power_get(display, | 
|---|
| 802 | domain: POWER_DOMAIN_DISPLAY_CORE); | 
|---|
| 803 | drm_WARN_ON(display->drm, | 
|---|
| 804 | READ_ONCE(display->hotplug.poll_enabled)); | 
|---|
| 805 | cancel_work(work: &display->hotplug.poll_init_work); | 
|---|
| 806 | } | 
|---|
| 807 |  | 
|---|
| 808 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 809 |  | 
|---|
| 810 | drm_connector_list_iter_begin(dev: display->drm, iter: &conn_iter); | 
|---|
| 811 | for_each_intel_connector_iter(connector, &conn_iter) { | 
|---|
| 812 | enum hpd_pin pin; | 
|---|
| 813 |  | 
|---|
| 814 | pin = intel_connector_hpd_pin(connector); | 
|---|
| 815 | if (pin == HPD_NONE) | 
|---|
| 816 | continue; | 
|---|
| 817 |  | 
|---|
| 818 | if (display->hotplug.stats[pin].state == HPD_DISABLED) | 
|---|
| 819 | continue; | 
|---|
| 820 |  | 
|---|
| 821 | connector->base.polled = connector->polled; | 
|---|
| 822 |  | 
|---|
| 823 | if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) | 
|---|
| 824 | connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | | 
|---|
| 825 | DRM_CONNECTOR_POLL_DISCONNECT; | 
|---|
| 826 | } | 
|---|
| 827 | drm_connector_list_iter_end(iter: &conn_iter); | 
|---|
| 828 |  | 
|---|
| 829 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 830 |  | 
|---|
| 831 | if (enabled) | 
|---|
| 832 | drm_kms_helper_poll_reschedule(dev: display->drm); | 
|---|
| 833 |  | 
|---|
| 834 | mutex_unlock(lock: &display->drm->mode_config.mutex); | 
|---|
| 835 |  | 
|---|
| 836 | /* | 
|---|
| 837 | * We might have missed any hotplugs that happened while we were | 
|---|
| 838 | * in the middle of disabling polling | 
|---|
| 839 | */ | 
|---|
| 840 | if (!enabled) { | 
|---|
| 841 | i915_hpd_poll_detect_connectors(display); | 
|---|
| 842 |  | 
|---|
| 843 | intel_display_power_put(display, | 
|---|
| 844 | domain: POWER_DOMAIN_DISPLAY_CORE, | 
|---|
| 845 | wakeref); | 
|---|
| 846 | } | 
|---|
| 847 | } | 
|---|
| 848 |  | 
|---|
| 849 | /** | 
|---|
| 850 | * intel_hpd_poll_enable - enable polling for connectors with hpd | 
|---|
| 851 | * @display: display device instance | 
|---|
| 852 | * | 
|---|
| 853 | * This function enables polling for all connectors which support HPD. | 
|---|
| 854 | * Under certain conditions HPD may not be functional. On most Intel GPUs, | 
|---|
| 855 | * this happens when we enter runtime suspend. | 
|---|
| 856 | * On Valleyview and Cherryview systems, this also happens when we shut off all | 
|---|
| 857 | * of the powerwells. | 
|---|
| 858 | * | 
|---|
| 859 | * Since this function can get called in contexts where we're already holding | 
|---|
| 860 | * dev->mode_config.mutex, we do the actual hotplug enabling in a separate | 
|---|
| 861 | * worker. | 
|---|
| 862 | * | 
|---|
| 863 | * Also see: intel_hpd_init() and intel_hpd_poll_disable(). | 
|---|
| 864 | */ | 
|---|
| 865 | void intel_hpd_poll_enable(struct intel_display *display) | 
|---|
| 866 | { | 
|---|
| 867 | if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display)) | 
|---|
| 868 | return; | 
|---|
| 869 |  | 
|---|
| 870 | WRITE_ONCE(display->hotplug.poll_enabled, true); | 
|---|
| 871 |  | 
|---|
| 872 | /* | 
|---|
| 873 | * We might already be holding dev->mode_config.mutex, so do this in a | 
|---|
| 874 | * separate worker | 
|---|
| 875 | * As well, there's no issue if we race here since we always reschedule | 
|---|
| 876 | * this worker anyway | 
|---|
| 877 | */ | 
|---|
| 878 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 879 | queue_detection_work(display, | 
|---|
| 880 | work: &display->hotplug.poll_init_work); | 
|---|
| 881 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 882 | } | 
|---|
| 883 |  | 
|---|
| 884 | /** | 
|---|
| 885 | * intel_hpd_poll_disable - disable polling for connectors with hpd | 
|---|
| 886 | * @display: display device instance | 
|---|
| 887 | * | 
|---|
| 888 | * This function disables polling for all connectors which support HPD. | 
|---|
| 889 | * Under certain conditions HPD may not be functional. On most Intel GPUs, | 
|---|
| 890 | * this happens when we enter runtime suspend. | 
|---|
| 891 | * On Valleyview and Cherryview systems, this also happens when we shut off all | 
|---|
| 892 | * of the powerwells. | 
|---|
| 893 | * | 
|---|
| 894 | * Since this function can get called in contexts where we're already holding | 
|---|
| 895 | * dev->mode_config.mutex, we do the actual hotplug enabling in a separate | 
|---|
| 896 | * worker. | 
|---|
| 897 | * | 
|---|
| 898 | * Also used during driver init to initialize connector->polled | 
|---|
| 899 | * appropriately for all connectors. | 
|---|
| 900 | * | 
|---|
| 901 | * Also see: intel_hpd_init() and intel_hpd_poll_enable(). | 
|---|
| 902 | */ | 
|---|
| 903 | void intel_hpd_poll_disable(struct intel_display *display) | 
|---|
| 904 | { | 
|---|
| 905 | struct intel_encoder *encoder; | 
|---|
| 906 |  | 
|---|
| 907 | if (!HAS_DISPLAY(display)) | 
|---|
| 908 | return; | 
|---|
| 909 |  | 
|---|
| 910 | for_each_intel_dp(display->drm, encoder) | 
|---|
| 911 | intel_dp_dpcd_set_probe(intel_dp: enc_to_intel_dp(encoder), force_on_external: true); | 
|---|
| 912 |  | 
|---|
| 913 | WRITE_ONCE(display->hotplug.poll_enabled, false); | 
|---|
| 914 |  | 
|---|
| 915 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 916 | queue_detection_work(display, | 
|---|
| 917 | work: &display->hotplug.poll_init_work); | 
|---|
| 918 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 919 | } | 
|---|
| 920 |  | 
|---|
| 921 | void intel_hpd_poll_fini(struct intel_display *display) | 
|---|
| 922 | { | 
|---|
| 923 | struct intel_connector *connector; | 
|---|
| 924 | struct drm_connector_list_iter conn_iter; | 
|---|
| 925 |  | 
|---|
| 926 | /* Kill all the work that may have been queued by hpd. */ | 
|---|
| 927 | drm_connector_list_iter_begin(dev: display->drm, iter: &conn_iter); | 
|---|
| 928 | for_each_intel_connector_iter(connector, &conn_iter) { | 
|---|
| 929 | intel_connector_cancel_modeset_retry_work(connector); | 
|---|
| 930 | intel_hdcp_cancel_works(connector); | 
|---|
| 931 | } | 
|---|
| 932 | drm_connector_list_iter_end(iter: &conn_iter); | 
|---|
| 933 | } | 
|---|
| 934 |  | 
|---|
| 935 | void intel_hpd_init_early(struct intel_display *display) | 
|---|
| 936 | { | 
|---|
| 937 | INIT_DELAYED_WORK(&display->hotplug.hotplug_work, | 
|---|
| 938 | i915_hotplug_work_func); | 
|---|
| 939 | INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func); | 
|---|
| 940 | INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work); | 
|---|
| 941 | INIT_DELAYED_WORK(&display->hotplug.reenable_work, | 
|---|
| 942 | intel_hpd_irq_storm_reenable_work); | 
|---|
| 943 |  | 
|---|
| 944 | display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; | 
|---|
| 945 | /* If we have MST support, we want to avoid doing short HPD IRQ storm | 
|---|
| 946 | * detection, as short HPD storms will occur as a natural part of | 
|---|
| 947 | * sideband messaging with MST. | 
|---|
| 948 | * On older platforms however, IRQ storms can occur with both long and | 
|---|
| 949 | * short pulses, as seen on some G4x systems. | 
|---|
| 950 | */ | 
|---|
| 951 | display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display); | 
|---|
| 952 | } | 
|---|
| 953 |  | 
|---|
| 954 | static bool cancel_all_detection_work(struct intel_display *display) | 
|---|
| 955 | { | 
|---|
| 956 | bool was_pending = false; | 
|---|
| 957 |  | 
|---|
| 958 | if (cancel_delayed_work_sync(dwork: &display->hotplug.hotplug_work)) | 
|---|
| 959 | was_pending = true; | 
|---|
| 960 | if (cancel_work_sync(work: &display->hotplug.poll_init_work)) | 
|---|
| 961 | was_pending = true; | 
|---|
| 962 | if (cancel_delayed_work_sync(dwork: &display->hotplug.reenable_work)) | 
|---|
| 963 | was_pending = true; | 
|---|
| 964 |  | 
|---|
| 965 | return was_pending; | 
|---|
| 966 | } | 
|---|
| 967 |  | 
|---|
| 968 | void intel_hpd_cancel_work(struct intel_display *display) | 
|---|
| 969 | { | 
|---|
| 970 | if (!HAS_DISPLAY(display)) | 
|---|
| 971 | return; | 
|---|
| 972 |  | 
|---|
| 973 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 974 |  | 
|---|
| 975 | display->hotplug.long_hpd_pin_mask = 0; | 
|---|
| 976 | display->hotplug.short_hpd_pin_mask = 0; | 
|---|
| 977 | display->hotplug.event_bits = 0; | 
|---|
| 978 | display->hotplug.retry_bits = 0; | 
|---|
| 979 |  | 
|---|
| 980 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 981 |  | 
|---|
| 982 | cancel_work_sync(work: &display->hotplug.dig_port_work); | 
|---|
| 983 |  | 
|---|
| 984 | /* | 
|---|
| 985 | * All other work triggered by hotplug events should be canceled by | 
|---|
| 986 | * now. | 
|---|
| 987 | */ | 
|---|
| 988 | if (cancel_all_detection_work(display)) | 
|---|
| 989 | drm_dbg_kms(display->drm, "Hotplug detection work still active\n"); | 
|---|
| 990 | } | 
|---|
| 991 |  | 
|---|
| 992 | static void queue_work_for_missed_irqs(struct intel_display *display) | 
|---|
| 993 | { | 
|---|
| 994 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 995 | bool queue_hp_work = false; | 
|---|
| 996 | u32 blocked_hpd_pin_mask; | 
|---|
| 997 | enum hpd_pin pin; | 
|---|
| 998 |  | 
|---|
| 999 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 1000 |  | 
|---|
| 1001 | blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); | 
|---|
| 1002 | if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask) | 
|---|
| 1003 | queue_hp_work = true; | 
|---|
| 1004 |  | 
|---|
| 1005 | for_each_hpd_pin(pin) { | 
|---|
| 1006 | switch (display->hotplug.stats[pin].state) { | 
|---|
| 1007 | case HPD_MARK_DISABLED: | 
|---|
| 1008 | queue_hp_work = true; | 
|---|
| 1009 | break; | 
|---|
| 1010 | case HPD_DISABLED: | 
|---|
| 1011 | case HPD_ENABLED: | 
|---|
| 1012 | break; | 
|---|
| 1013 | default: | 
|---|
| 1014 | MISSING_CASE(display->hotplug.stats[pin].state); | 
|---|
| 1015 | } | 
|---|
| 1016 | } | 
|---|
| 1017 |  | 
|---|
| 1018 | if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask) | 
|---|
| 1019 | queue_work(wq: hotplug->dp_wq, work: &hotplug->dig_port_work); | 
|---|
| 1020 |  | 
|---|
| 1021 | if (queue_hp_work) | 
|---|
| 1022 | queue_delayed_detection_work(display, work: &display->hotplug.hotplug_work, delay: 0); | 
|---|
| 1023 | } | 
|---|
| 1024 |  | 
|---|
| 1025 | static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin) | 
|---|
| 1026 | { | 
|---|
| 1027 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 1028 |  | 
|---|
| 1029 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 1030 |  | 
|---|
| 1031 | hotplug->stats[pin].blocked_count++; | 
|---|
| 1032 |  | 
|---|
| 1033 | return hotplug->stats[pin].blocked_count == 1; | 
|---|
| 1034 | } | 
|---|
| 1035 |  | 
|---|
| 1036 | static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin) | 
|---|
| 1037 | { | 
|---|
| 1038 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 1039 |  | 
|---|
| 1040 | lockdep_assert_held(&display->irq.lock); | 
|---|
| 1041 |  | 
|---|
| 1042 | if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0)) | 
|---|
| 1043 | return true; | 
|---|
| 1044 |  | 
|---|
| 1045 | hotplug->stats[pin].blocked_count--; | 
|---|
| 1046 |  | 
|---|
| 1047 | return hotplug->stats[pin].blocked_count == 0; | 
|---|
| 1048 | } | 
|---|
| 1049 |  | 
|---|
| 1050 | /** | 
|---|
| 1051 | * intel_hpd_block - Block handling of HPD IRQs on an HPD pin | 
|---|
| 1052 | * @encoder: Encoder to block the HPD handling for | 
|---|
| 1053 | * | 
|---|
| 1054 | * Blocks the handling of HPD IRQs on the HPD pin of @encoder. | 
|---|
| 1055 | * | 
|---|
| 1056 | * On return: | 
|---|
| 1057 | * | 
|---|
| 1058 | * - It's guaranteed that the blocked encoders' HPD pulse handler | 
|---|
| 1059 | *   (via intel_digital_port::hpd_pulse()) is not running. | 
|---|
| 1060 | * - The hotplug event handling (via intel_encoder::hotplug()) of an | 
|---|
| 1061 | *   HPD IRQ pending at the time this function is called may be still | 
|---|
| 1062 | *   running. | 
|---|
| 1063 | * - Detection on the encoder's connector (via | 
|---|
| 1064 | *   drm_connector_helper_funcs::detect_ctx(), | 
|---|
| 1065 | *   drm_connector_funcs::detect()) remains allowed, for instance as part of | 
|---|
| 1066 | *   userspace connector probing, or DRM core's connector polling. | 
|---|
| 1067 | * | 
|---|
| 1068 | * The call must be followed by calling intel_hpd_unblock(), or | 
|---|
| 1069 | * intel_hpd_clear_and_unblock(). | 
|---|
| 1070 | * | 
|---|
| 1071 | * Note that the handling of HPD IRQs for another encoder using the same HPD | 
|---|
| 1072 | * pin as that of @encoder will be also blocked. | 
|---|
| 1073 | */ | 
|---|
| 1074 | void intel_hpd_block(struct intel_encoder *encoder) | 
|---|
| 1075 | { | 
|---|
| 1076 | struct intel_display *display = to_intel_display(encoder); | 
|---|
| 1077 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 1078 | bool do_flush = false; | 
|---|
| 1079 |  | 
|---|
| 1080 | if (encoder->hpd_pin == HPD_NONE) | 
|---|
| 1081 | return; | 
|---|
| 1082 |  | 
|---|
| 1083 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 1084 |  | 
|---|
| 1085 | if (block_hpd_pin(display, pin: encoder->hpd_pin)) | 
|---|
| 1086 | do_flush = true; | 
|---|
| 1087 |  | 
|---|
| 1088 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 1089 |  | 
|---|
| 1090 | if (do_flush && hpd_pin_has_pulse(display, pin: encoder->hpd_pin)) | 
|---|
| 1091 | flush_work(work: &hotplug->dig_port_work); | 
|---|
| 1092 | } | 
|---|
| 1093 |  | 
|---|
| 1094 | /** | 
|---|
| 1095 | * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin | 
|---|
| 1096 | * @encoder: Encoder to unblock the HPD handling for | 
|---|
| 1097 | * | 
|---|
| 1098 | * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was | 
|---|
| 1099 | * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the | 
|---|
| 1100 | * HPD pin while it was blocked will be handled for @encoder and for any | 
|---|
| 1101 | * other encoder sharing the same HPD pin. | 
|---|
| 1102 | */ | 
|---|
| 1103 | void intel_hpd_unblock(struct intel_encoder *encoder) | 
|---|
| 1104 | { | 
|---|
| 1105 | struct intel_display *display = to_intel_display(encoder); | 
|---|
| 1106 |  | 
|---|
| 1107 | if (encoder->hpd_pin == HPD_NONE) | 
|---|
| 1108 | return; | 
|---|
| 1109 |  | 
|---|
| 1110 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 1111 |  | 
|---|
| 1112 | if (unblock_hpd_pin(display, pin: encoder->hpd_pin)) | 
|---|
| 1113 | queue_work_for_missed_irqs(display); | 
|---|
| 1114 |  | 
|---|
| 1115 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 1116 | } | 
|---|
| 1117 |  | 
|---|
| 1118 | /** | 
|---|
| 1119 | * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin | 
|---|
| 1120 | * @encoder: Encoder to unblock the HPD handling for | 
|---|
| 1121 | * | 
|---|
| 1122 | * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was | 
|---|
| 1123 | * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the | 
|---|
| 1124 | * HPD pin while it was blocked will be cleared, handling only new IRQs. | 
|---|
| 1125 | */ | 
|---|
| 1126 | void intel_hpd_clear_and_unblock(struct intel_encoder *encoder) | 
|---|
| 1127 | { | 
|---|
| 1128 | struct intel_display *display = to_intel_display(encoder); | 
|---|
| 1129 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 1130 | enum hpd_pin pin = encoder->hpd_pin; | 
|---|
| 1131 |  | 
|---|
| 1132 | if (pin == HPD_NONE) | 
|---|
| 1133 | return; | 
|---|
| 1134 |  | 
|---|
| 1135 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 1136 |  | 
|---|
| 1137 | if (unblock_hpd_pin(display, pin)) { | 
|---|
| 1138 | hotplug->event_bits &= ~BIT(pin); | 
|---|
| 1139 | hotplug->retry_bits &= ~BIT(pin); | 
|---|
| 1140 | hotplug->short_hpd_pin_mask &= ~BIT(pin); | 
|---|
| 1141 | hotplug->long_hpd_pin_mask &= ~BIT(pin); | 
|---|
| 1142 | } | 
|---|
| 1143 |  | 
|---|
| 1144 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 1145 | } | 
|---|
| 1146 |  | 
|---|
| 1147 | void intel_hpd_enable_detection_work(struct intel_display *display) | 
|---|
| 1148 | { | 
|---|
| 1149 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 1150 | display->hotplug.detection_work_enabled = true; | 
|---|
| 1151 | queue_work_for_missed_irqs(display); | 
|---|
| 1152 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 1153 | } | 
|---|
| 1154 |  | 
|---|
| 1155 | void intel_hpd_disable_detection_work(struct intel_display *display) | 
|---|
| 1156 | { | 
|---|
| 1157 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 1158 | display->hotplug.detection_work_enabled = false; | 
|---|
| 1159 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 1160 |  | 
|---|
| 1161 | cancel_all_detection_work(display); | 
|---|
| 1162 | } | 
|---|
| 1163 |  | 
|---|
| 1164 | bool intel_hpd_schedule_detection(struct intel_display *display) | 
|---|
| 1165 | { | 
|---|
| 1166 | unsigned long flags; | 
|---|
| 1167 | bool ret; | 
|---|
| 1168 |  | 
|---|
| 1169 | spin_lock_irqsave(&display->irq.lock, flags); | 
|---|
| 1170 | ret = queue_delayed_detection_work(display, work: &display->hotplug.hotplug_work, delay: 0); | 
|---|
| 1171 | spin_unlock_irqrestore(lock: &display->irq.lock, flags); | 
|---|
| 1172 |  | 
|---|
| 1173 | return ret; | 
|---|
| 1174 | } | 
|---|
| 1175 |  | 
|---|
| 1176 | static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) | 
|---|
| 1177 | { | 
|---|
| 1178 | struct intel_display *display = m->private; | 
|---|
| 1179 | struct drm_i915_private *dev_priv = to_i915(dev: display->drm); | 
|---|
| 1180 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 1181 |  | 
|---|
| 1182 | /* Synchronize with everything first in case there's been an HPD | 
|---|
| 1183 | * storm, but we haven't finished handling it in the kernel yet | 
|---|
| 1184 | */ | 
|---|
| 1185 | intel_synchronize_irq(i915: dev_priv); | 
|---|
| 1186 | flush_work(work: &display->hotplug.dig_port_work); | 
|---|
| 1187 | flush_delayed_work(dwork: &display->hotplug.hotplug_work); | 
|---|
| 1188 |  | 
|---|
| 1189 | seq_printf(m, fmt: "Threshold: %d\n", hotplug->hpd_storm_threshold); | 
|---|
| 1190 | seq_printf(m, fmt: "Detected: %s\n", | 
|---|
| 1191 | str_yes_no(delayed_work_pending(&hotplug->reenable_work))); | 
|---|
| 1192 |  | 
|---|
| 1193 | return 0; | 
|---|
| 1194 | } | 
|---|
| 1195 |  | 
|---|
| 1196 | static ssize_t i915_hpd_storm_ctl_write(struct file *file, | 
|---|
| 1197 | const char __user *ubuf, size_t len, | 
|---|
| 1198 | loff_t *offp) | 
|---|
| 1199 | { | 
|---|
| 1200 | struct seq_file *m = file->private_data; | 
|---|
| 1201 | struct intel_display *display = m->private; | 
|---|
| 1202 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 1203 | unsigned int new_threshold; | 
|---|
| 1204 | int i; | 
|---|
| 1205 | char *newline; | 
|---|
| 1206 | char tmp[16]; | 
|---|
| 1207 |  | 
|---|
| 1208 | if (len >= sizeof(tmp)) | 
|---|
| 1209 | return -EINVAL; | 
|---|
| 1210 |  | 
|---|
| 1211 | if (copy_from_user(to: tmp, from: ubuf, n: len)) | 
|---|
| 1212 | return -EFAULT; | 
|---|
| 1213 |  | 
|---|
| 1214 | tmp[len] = '\0'; | 
|---|
| 1215 |  | 
|---|
| 1216 | /* Strip newline, if any */ | 
|---|
| 1217 | newline = strchr(tmp, '\n'); | 
|---|
| 1218 | if (newline) | 
|---|
| 1219 | *newline = '\0'; | 
|---|
| 1220 |  | 
|---|
| 1221 | if (strcmp(tmp, "reset") == 0) | 
|---|
| 1222 | new_threshold = HPD_STORM_DEFAULT_THRESHOLD; | 
|---|
| 1223 | else if (kstrtouint(s: tmp, base: 10, res: &new_threshold) != 0) | 
|---|
| 1224 | return -EINVAL; | 
|---|
| 1225 |  | 
|---|
| 1226 | if (new_threshold > 0) | 
|---|
| 1227 | drm_dbg_kms(display->drm, | 
|---|
| 1228 | "Setting HPD storm detection threshold to %d\n", | 
|---|
| 1229 | new_threshold); | 
|---|
| 1230 | else | 
|---|
| 1231 | drm_dbg_kms(display->drm, "Disabling HPD storm detection\n"); | 
|---|
| 1232 |  | 
|---|
| 1233 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 1234 | hotplug->hpd_storm_threshold = new_threshold; | 
|---|
| 1235 | /* Reset the HPD storm stats so we don't accidentally trigger a storm */ | 
|---|
| 1236 | for_each_hpd_pin(i) | 
|---|
| 1237 | hotplug->stats[i].count = 0; | 
|---|
| 1238 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 1239 |  | 
|---|
| 1240 | /* Re-enable hpd immediately if we were in an irq storm */ | 
|---|
| 1241 | flush_delayed_work(dwork: &display->hotplug.reenable_work); | 
|---|
| 1242 |  | 
|---|
| 1243 | return len; | 
|---|
| 1244 | } | 
|---|
| 1245 |  | 
|---|
| 1246 | static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) | 
|---|
| 1247 | { | 
|---|
| 1248 | return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); | 
|---|
| 1249 | } | 
|---|
| 1250 |  | 
|---|
| 1251 | static const struct file_operations i915_hpd_storm_ctl_fops = { | 
|---|
| 1252 | .owner = THIS_MODULE, | 
|---|
| 1253 | .open = i915_hpd_storm_ctl_open, | 
|---|
| 1254 | .read = seq_read, | 
|---|
| 1255 | .llseek = seq_lseek, | 
|---|
| 1256 | .release = single_release, | 
|---|
| 1257 | .write = i915_hpd_storm_ctl_write | 
|---|
| 1258 | }; | 
|---|
| 1259 |  | 
|---|
| 1260 | static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) | 
|---|
| 1261 | { | 
|---|
| 1262 | struct intel_display *display = m->private; | 
|---|
| 1263 |  | 
|---|
| 1264 | seq_printf(m, fmt: "Enabled: %s\n", | 
|---|
| 1265 | str_yes_no(v: display->hotplug.hpd_short_storm_enabled)); | 
|---|
| 1266 |  | 
|---|
| 1267 | return 0; | 
|---|
| 1268 | } | 
|---|
| 1269 |  | 
|---|
| 1270 | static int | 
|---|
| 1271 | i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) | 
|---|
| 1272 | { | 
|---|
| 1273 | return single_open(file, i915_hpd_short_storm_ctl_show, | 
|---|
| 1274 | inode->i_private); | 
|---|
| 1275 | } | 
|---|
| 1276 |  | 
|---|
| 1277 | static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, | 
|---|
| 1278 | const char __user *ubuf, | 
|---|
| 1279 | size_t len, loff_t *offp) | 
|---|
| 1280 | { | 
|---|
| 1281 | struct seq_file *m = file->private_data; | 
|---|
| 1282 | struct intel_display *display = m->private; | 
|---|
| 1283 | struct intel_hotplug *hotplug = &display->hotplug; | 
|---|
| 1284 | char *newline; | 
|---|
| 1285 | char tmp[16]; | 
|---|
| 1286 | int i; | 
|---|
| 1287 | bool new_state; | 
|---|
| 1288 |  | 
|---|
| 1289 | if (len >= sizeof(tmp)) | 
|---|
| 1290 | return -EINVAL; | 
|---|
| 1291 |  | 
|---|
| 1292 | if (copy_from_user(to: tmp, from: ubuf, n: len)) | 
|---|
| 1293 | return -EFAULT; | 
|---|
| 1294 |  | 
|---|
| 1295 | tmp[len] = '\0'; | 
|---|
| 1296 |  | 
|---|
| 1297 | /* Strip newline, if any */ | 
|---|
| 1298 | newline = strchr(tmp, '\n'); | 
|---|
| 1299 | if (newline) | 
|---|
| 1300 | *newline = '\0'; | 
|---|
| 1301 |  | 
|---|
| 1302 | /* Reset to the "default" state for this system */ | 
|---|
| 1303 | if (strcmp(tmp, "reset") == 0) | 
|---|
| 1304 | new_state = !HAS_DP_MST(display); | 
|---|
| 1305 | else if (kstrtobool(s: tmp, res: &new_state) != 0) | 
|---|
| 1306 | return -EINVAL; | 
|---|
| 1307 |  | 
|---|
| 1308 | drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n", | 
|---|
| 1309 | new_state ? "En": "Dis"); | 
|---|
| 1310 |  | 
|---|
| 1311 | spin_lock_irq(lock: &display->irq.lock); | 
|---|
| 1312 | hotplug->hpd_short_storm_enabled = new_state; | 
|---|
| 1313 | /* Reset the HPD storm stats so we don't accidentally trigger a storm */ | 
|---|
| 1314 | for_each_hpd_pin(i) | 
|---|
| 1315 | hotplug->stats[i].count = 0; | 
|---|
| 1316 | spin_unlock_irq(lock: &display->irq.lock); | 
|---|
| 1317 |  | 
|---|
| 1318 | /* Re-enable hpd immediately if we were in an irq storm */ | 
|---|
| 1319 | flush_delayed_work(dwork: &display->hotplug.reenable_work); | 
|---|
| 1320 |  | 
|---|
| 1321 | return len; | 
|---|
| 1322 | } | 
|---|
| 1323 |  | 
|---|
| 1324 | static const struct file_operations i915_hpd_short_storm_ctl_fops = { | 
|---|
| 1325 | .owner = THIS_MODULE, | 
|---|
| 1326 | .open = i915_hpd_short_storm_ctl_open, | 
|---|
| 1327 | .read = seq_read, | 
|---|
| 1328 | .llseek = seq_lseek, | 
|---|
| 1329 | .release = single_release, | 
|---|
| 1330 | .write = i915_hpd_short_storm_ctl_write, | 
|---|
| 1331 | }; | 
|---|
| 1332 |  | 
|---|
| 1333 | void intel_hpd_debugfs_register(struct intel_display *display) | 
|---|
| 1334 | { | 
|---|
| 1335 | struct dentry *debugfs_root = display->drm->debugfs_root; | 
|---|
| 1336 |  | 
|---|
| 1337 | debugfs_create_file( "i915_hpd_storm_ctl", 0644, debugfs_root, | 
|---|
| 1338 | display, &i915_hpd_storm_ctl_fops); | 
|---|
| 1339 | debugfs_create_file( "i915_hpd_short_storm_ctl", 0644, debugfs_root, | 
|---|
| 1340 | display, &i915_hpd_short_storm_ctl_fops); | 
|---|
| 1341 | debugfs_create_bool(name: "i915_ignore_long_hpd", mode: 0644, parent: debugfs_root, | 
|---|
| 1342 | value: &display->hotplug.ignore_long_hpd); | 
|---|
| 1343 | } | 
|---|
| 1344 |  | 
|---|