| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
|---|
| 4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 
|---|
| 5 | * | 
|---|
| 6 | * This file contains the core interrupt handling code. Detailed | 
|---|
| 7 | * information is available in Documentation/core-api/genericirq.rst | 
|---|
| 8 | * | 
|---|
| 9 | */ | 
|---|
| 10 |  | 
|---|
| 11 | #include <linux/irq.h> | 
|---|
| 12 | #include <linux/random.h> | 
|---|
| 13 | #include <linux/sched.h> | 
|---|
| 14 | #include <linux/interrupt.h> | 
|---|
| 15 | #include <linux/kernel_stat.h> | 
|---|
| 16 |  | 
|---|
| 17 | #include <asm/irq_regs.h> | 
|---|
| 18 |  | 
|---|
| 19 | #include <trace/events/irq.h> | 
|---|
| 20 |  | 
|---|
| 21 | #include "internals.h" | 
|---|
| 22 |  | 
|---|
| 23 | #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER | 
|---|
| 24 | void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; | 
|---|
| 25 | #endif | 
|---|
| 26 |  | 
|---|
| 27 | /** | 
|---|
| 28 | * handle_bad_irq - handle spurious and unhandled irqs | 
|---|
| 29 | * @desc:      description of the interrupt | 
|---|
| 30 | * | 
|---|
| 31 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | 
|---|
| 32 | */ | 
|---|
| 33 | void handle_bad_irq(struct irq_desc *desc) | 
|---|
| 34 | { | 
|---|
| 35 | unsigned int irq = irq_desc_get_irq(desc); | 
|---|
| 36 |  | 
|---|
| 37 | print_irq_desc(irq, desc); | 
|---|
| 38 | kstat_incr_irqs_this_cpu(desc); | 
|---|
| 39 | ack_bad_irq(irq); | 
|---|
| 40 | } | 
|---|
| 41 | EXPORT_SYMBOL_GPL(handle_bad_irq); | 
|---|
| 42 |  | 
|---|
| 43 | /* | 
|---|
| 44 | * Special, empty irq handler: | 
|---|
| 45 | */ | 
|---|
| 46 | irqreturn_t no_action(int cpl, void *dev_id) | 
|---|
| 47 | { | 
|---|
| 48 | return IRQ_NONE; | 
|---|
| 49 | } | 
|---|
| 50 | EXPORT_SYMBOL_GPL(no_action); | 
|---|
| 51 |  | 
|---|
| 52 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | 
|---|
| 53 | { | 
|---|
| 54 | if (test_and_set_bit(nr: IRQTF_WARNED, addr: &action->thread_flags)) | 
|---|
| 55 | return; | 
|---|
| 56 |  | 
|---|
| 57 | printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " | 
|---|
| 58 | "but no thread function available.", irq, action->name); | 
|---|
| 59 | } | 
|---|
| 60 |  | 
|---|
| 61 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) | 
|---|
| 62 | { | 
|---|
| 63 | /* | 
|---|
| 64 | * In case the thread crashed and was killed we just pretend that | 
|---|
| 65 | * we handled the interrupt. The hardirq handler has disabled the | 
|---|
| 66 | * device interrupt, so no irq storm is lurking. | 
|---|
| 67 | */ | 
|---|
| 68 | if (action->thread->flags & PF_EXITING) | 
|---|
| 69 | return; | 
|---|
| 70 |  | 
|---|
| 71 | /* | 
|---|
| 72 | * Wake up the handler thread for this action. If the | 
|---|
| 73 | * RUNTHREAD bit is already set, nothing to do. | 
|---|
| 74 | */ | 
|---|
| 75 | if (test_and_set_bit(nr: IRQTF_RUNTHREAD, addr: &action->thread_flags)) | 
|---|
| 76 | return; | 
|---|
| 77 |  | 
|---|
| 78 | /* | 
|---|
| 79 | * It's safe to OR the mask lockless here. We have only two | 
|---|
| 80 | * places which write to threads_oneshot: This code and the | 
|---|
| 81 | * irq thread. | 
|---|
| 82 | * | 
|---|
| 83 | * This code is the hard irq context and can never run on two | 
|---|
| 84 | * cpus in parallel. If it ever does we have more serious | 
|---|
| 85 | * problems than this bitmask. | 
|---|
| 86 | * | 
|---|
| 87 | * The irq threads of this irq which clear their "running" bit | 
|---|
| 88 | * in threads_oneshot are serialized via desc->lock against | 
|---|
| 89 | * each other and they are serialized against this code by | 
|---|
| 90 | * IRQS_INPROGRESS. | 
|---|
| 91 | * | 
|---|
| 92 | * Hard irq handler: | 
|---|
| 93 | * | 
|---|
| 94 | *	spin_lock(desc->lock); | 
|---|
| 95 | *	desc->state |= IRQS_INPROGRESS; | 
|---|
| 96 | *	spin_unlock(desc->lock); | 
|---|
| 97 | *	set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | 
|---|
| 98 | *	desc->threads_oneshot |= mask; | 
|---|
| 99 | *	spin_lock(desc->lock); | 
|---|
| 100 | *	desc->state &= ~IRQS_INPROGRESS; | 
|---|
| 101 | *	spin_unlock(desc->lock); | 
|---|
| 102 | * | 
|---|
| 103 | * irq thread: | 
|---|
| 104 | * | 
|---|
| 105 | * again: | 
|---|
| 106 | *	spin_lock(desc->lock); | 
|---|
| 107 | *	if (desc->state & IRQS_INPROGRESS) { | 
|---|
| 108 | *		spin_unlock(desc->lock); | 
|---|
| 109 | *		while(desc->state & IRQS_INPROGRESS) | 
|---|
| 110 | *			cpu_relax(); | 
|---|
| 111 | *		goto again; | 
|---|
| 112 | *	} | 
|---|
| 113 | *	if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 
|---|
| 114 | *		desc->threads_oneshot &= ~mask; | 
|---|
| 115 | *	spin_unlock(desc->lock); | 
|---|
| 116 | * | 
|---|
| 117 | * So either the thread waits for us to clear IRQS_INPROGRESS | 
|---|
| 118 | * or we are waiting in the flow handler for desc->lock to be | 
|---|
| 119 | * released before we reach this point. The thread also checks | 
|---|
| 120 | * IRQTF_RUNTHREAD under desc->lock. If set it leaves | 
|---|
| 121 | * threads_oneshot untouched and runs the thread another time. | 
|---|
| 122 | */ | 
|---|
| 123 | desc->threads_oneshot |= action->thread_mask; | 
|---|
| 124 |  | 
|---|
| 125 | /* | 
|---|
| 126 | * We increment the threads_active counter in case we wake up | 
|---|
| 127 | * the irq thread. The irq thread decrements the counter when | 
|---|
| 128 | * it returns from the handler or in the exit path and wakes | 
|---|
| 129 | * up waiters which are stuck in synchronize_irq() when the | 
|---|
| 130 | * active count becomes zero. synchronize_irq() is serialized | 
|---|
| 131 | * against this code (hard irq handler) via IRQS_INPROGRESS | 
|---|
| 132 | * like the finalize_oneshot() code. See comment above. | 
|---|
| 133 | */ | 
|---|
| 134 | atomic_inc(v: &desc->threads_active); | 
|---|
| 135 |  | 
|---|
| 136 | wake_up_process(tsk: action->thread); | 
|---|
| 137 | } | 
|---|
| 138 |  | 
|---|
| 139 | static DEFINE_STATIC_KEY_FALSE(irqhandler_duration_check_enabled); | 
|---|
| 140 | static u64 irqhandler_duration_threshold_ns __ro_after_init; | 
|---|
| 141 |  | 
|---|
| 142 | static int __init irqhandler_duration_check_setup(char *arg) | 
|---|
| 143 | { | 
|---|
| 144 | unsigned long val; | 
|---|
| 145 | int ret; | 
|---|
| 146 |  | 
|---|
| 147 | ret = kstrtoul(s: arg, base: 0, res: &val); | 
|---|
| 148 | if (ret) { | 
|---|
| 149 | pr_err( "Unable to parse irqhandler.duration_warn_us setting: ret=%d\n", ret); | 
|---|
| 150 | return 0; | 
|---|
| 151 | } | 
|---|
| 152 |  | 
|---|
| 153 | if (!val) { | 
|---|
| 154 | pr_err( "Invalid irqhandler.duration_warn_us setting, must be > 0\n"); | 
|---|
| 155 | return 0; | 
|---|
| 156 | } | 
|---|
| 157 |  | 
|---|
| 158 | irqhandler_duration_threshold_ns = val * 1000; | 
|---|
| 159 | static_branch_enable(&irqhandler_duration_check_enabled); | 
|---|
| 160 |  | 
|---|
| 161 | return 1; | 
|---|
| 162 | } | 
|---|
| 163 | __setup( "irqhandler.duration_warn_us=", irqhandler_duration_check_setup); | 
|---|
| 164 |  | 
|---|
| 165 | static inline void irqhandler_duration_check(u64 ts_start, unsigned int irq, | 
|---|
| 166 | const struct irqaction *action) | 
|---|
| 167 | { | 
|---|
| 168 | u64 delta_ns = local_clock() - ts_start; | 
|---|
| 169 |  | 
|---|
| 170 | if (unlikely(delta_ns > irqhandler_duration_threshold_ns)) { | 
|---|
| 171 | pr_warn_ratelimited( "[CPU%u] long duration of IRQ[%u:%ps], took: %llu us\n", | 
|---|
| 172 | smp_processor_id(), irq, action->handler, | 
|---|
| 173 | div_u64(delta_ns, NSEC_PER_USEC)); | 
|---|
| 174 | } | 
|---|
| 175 | } | 
|---|
| 176 |  | 
|---|
| 177 | irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc) | 
|---|
| 178 | { | 
|---|
| 179 | irqreturn_t retval = IRQ_NONE; | 
|---|
| 180 | unsigned int irq = desc->irq_data.irq; | 
|---|
| 181 | struct irqaction *action; | 
|---|
| 182 |  | 
|---|
| 183 | record_irq_time(desc); | 
|---|
| 184 |  | 
|---|
| 185 | for_each_action_of_desc(desc, action) { | 
|---|
| 186 | irqreturn_t res; | 
|---|
| 187 |  | 
|---|
| 188 | /* | 
|---|
| 189 | * If this IRQ would be threaded under force_irqthreads, mark it so. | 
|---|
| 190 | */ | 
|---|
| 191 | if (irq_settings_can_thread(desc) && | 
|---|
| 192 | !(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))) | 
|---|
| 193 | lockdep_hardirq_threaded(); | 
|---|
| 194 |  | 
|---|
| 195 | trace_irq_handler_entry(irq, action); | 
|---|
| 196 |  | 
|---|
| 197 | if (static_branch_unlikely(&irqhandler_duration_check_enabled)) { | 
|---|
| 198 | u64 ts_start = local_clock(); | 
|---|
| 199 |  | 
|---|
| 200 | res = action->handler(irq, action->dev_id); | 
|---|
| 201 | irqhandler_duration_check(ts_start, irq, action); | 
|---|
| 202 | } else { | 
|---|
| 203 | res = action->handler(irq, action->dev_id); | 
|---|
| 204 | } | 
|---|
| 205 |  | 
|---|
| 206 | trace_irq_handler_exit(irq, action, ret: res); | 
|---|
| 207 |  | 
|---|
| 208 | if (WARN_ONCE(!irqs_disabled(), "irq %u handler %pS enabled interrupts\n", | 
|---|
| 209 | irq, action->handler)) | 
|---|
| 210 | local_irq_disable(); | 
|---|
| 211 |  | 
|---|
| 212 | switch (res) { | 
|---|
| 213 | case IRQ_WAKE_THREAD: | 
|---|
| 214 | /* | 
|---|
| 215 | * Catch drivers which return WAKE_THREAD but | 
|---|
| 216 | * did not set up a thread function | 
|---|
| 217 | */ | 
|---|
| 218 | if (unlikely(!action->thread_fn)) { | 
|---|
| 219 | warn_no_thread(irq, action); | 
|---|
| 220 | break; | 
|---|
| 221 | } | 
|---|
| 222 |  | 
|---|
| 223 | __irq_wake_thread(desc, action); | 
|---|
| 224 | break; | 
|---|
| 225 |  | 
|---|
| 226 | default: | 
|---|
| 227 | break; | 
|---|
| 228 | } | 
|---|
| 229 |  | 
|---|
| 230 | retval |= res; | 
|---|
| 231 | } | 
|---|
| 232 |  | 
|---|
| 233 | return retval; | 
|---|
| 234 | } | 
|---|
| 235 |  | 
|---|
| 236 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) | 
|---|
| 237 | { | 
|---|
| 238 | irqreturn_t retval; | 
|---|
| 239 |  | 
|---|
| 240 | retval = __handle_irq_event_percpu(desc); | 
|---|
| 241 |  | 
|---|
| 242 | add_interrupt_randomness(irq: desc->irq_data.irq); | 
|---|
| 243 |  | 
|---|
| 244 | if (!irq_settings_no_debug(desc)) | 
|---|
| 245 | note_interrupt(desc, action_ret: retval); | 
|---|
| 246 | return retval; | 
|---|
| 247 | } | 
|---|
| 248 |  | 
|---|
| 249 | irqreturn_t handle_irq_event(struct irq_desc *desc) | 
|---|
| 250 | { | 
|---|
| 251 | irqreturn_t ret; | 
|---|
| 252 |  | 
|---|
| 253 | desc->istate &= ~IRQS_PENDING; | 
|---|
| 254 | irqd_set(d: &desc->irq_data, mask: IRQD_IRQ_INPROGRESS); | 
|---|
| 255 | raw_spin_unlock(&desc->lock); | 
|---|
| 256 |  | 
|---|
| 257 | ret = handle_irq_event_percpu(desc); | 
|---|
| 258 |  | 
|---|
| 259 | raw_spin_lock(&desc->lock); | 
|---|
| 260 | irqd_clear(d: &desc->irq_data, mask: IRQD_IRQ_INPROGRESS); | 
|---|
| 261 | return ret; | 
|---|
| 262 | } | 
|---|
| 263 |  | 
|---|
| 264 | #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER | 
|---|
| 265 | int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) | 
|---|
| 266 | { | 
|---|
| 267 | if (handle_arch_irq) | 
|---|
| 268 | return -EBUSY; | 
|---|
| 269 |  | 
|---|
| 270 | handle_arch_irq = handle_irq; | 
|---|
| 271 | return 0; | 
|---|
| 272 | } | 
|---|
| 273 |  | 
|---|
| 274 | /** | 
|---|
| 275 | * generic_handle_arch_irq - root irq handler for architectures which do no | 
|---|
| 276 | *                           entry accounting themselves | 
|---|
| 277 | * @regs:	Register file coming from the low-level handling code | 
|---|
| 278 | */ | 
|---|
| 279 | asmlinkage void noinstr generic_handle_arch_irq(struct pt_regs *regs) | 
|---|
| 280 | { | 
|---|
| 281 | struct pt_regs *old_regs; | 
|---|
| 282 |  | 
|---|
| 283 | irq_enter(); | 
|---|
| 284 | old_regs = set_irq_regs(regs); | 
|---|
| 285 | handle_arch_irq(regs); | 
|---|
| 286 | set_irq_regs(old_regs); | 
|---|
| 287 | irq_exit(); | 
|---|
| 288 | } | 
|---|
| 289 | #endif | 
|---|
| 290 |  | 
|---|