| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * Generic cpu hotunplug interrupt migration code copied from the | 
|---|
| 4 | * arch/arm implementation | 
|---|
| 5 | * | 
|---|
| 6 | * Copyright (C) Russell King | 
|---|
| 7 | * | 
|---|
| 8 | * This program is free software; you can redistribute it and/or modify | 
|---|
| 9 | * it under the terms of the GNU General Public License version 2 as | 
|---|
| 10 | * published by the Free Software Foundation. | 
|---|
| 11 | */ | 
|---|
| 12 | #include <linux/interrupt.h> | 
|---|
| 13 | #include <linux/ratelimit.h> | 
|---|
| 14 | #include <linux/irq.h> | 
|---|
| 15 | #include <linux/sched/isolation.h> | 
|---|
| 16 |  | 
|---|
| 17 | #include "internals.h" | 
|---|
| 18 |  | 
|---|
| 19 | /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */ | 
|---|
| 20 | static inline bool irq_needs_fixup(struct irq_data *d) | 
|---|
| 21 | { | 
|---|
| 22 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); | 
|---|
| 23 | unsigned int cpu = smp_processor_id(); | 
|---|
| 24 |  | 
|---|
| 25 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 
|---|
| 26 | /* | 
|---|
| 27 | * The cpumask_empty() check is a workaround for interrupt chips, | 
|---|
| 28 | * which do not implement effective affinity, but the architecture has | 
|---|
| 29 | * enabled the config switch. Use the general affinity mask instead. | 
|---|
| 30 | */ | 
|---|
| 31 | if (cpumask_empty(srcp: m)) | 
|---|
| 32 | m = irq_data_get_affinity_mask(d); | 
|---|
| 33 |  | 
|---|
| 34 | /* | 
|---|
| 35 | * Sanity check. If the mask is not empty when excluding the outgoing | 
|---|
| 36 | * CPU then it must contain at least one online CPU. The outgoing CPU | 
|---|
| 37 | * has been removed from the online mask already. | 
|---|
| 38 | */ | 
|---|
| 39 | if (cpumask_any_but(mask: m, cpu) < nr_cpu_ids && | 
|---|
| 40 | !cpumask_intersects(src1p: m, cpu_online_mask)) { | 
|---|
| 41 | /* | 
|---|
| 42 | * If this happens then there was a missed IRQ fixup at some | 
|---|
| 43 | * point. Warn about it and enforce fixup. | 
|---|
| 44 | */ | 
|---|
| 45 | pr_warn( "Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", | 
|---|
| 46 | cpumask_pr_args(m), d->irq, cpu); | 
|---|
| 47 | return true; | 
|---|
| 48 | } | 
|---|
| 49 | #endif | 
|---|
| 50 | return cpumask_test_cpu(cpu, cpumask: m); | 
|---|
| 51 | } | 
|---|
| 52 |  | 
|---|
| 53 | static bool migrate_one_irq(struct irq_desc *desc) | 
|---|
| 54 | { | 
|---|
| 55 | struct irq_data *d = irq_desc_get_irq_data(desc); | 
|---|
| 56 | struct irq_chip *chip = irq_data_get_irq_chip(d); | 
|---|
| 57 | bool maskchip = !irq_can_move_pcntxt(data: d) && !irqd_irq_masked(d); | 
|---|
| 58 | const struct cpumask *affinity; | 
|---|
| 59 | bool brokeaff = false; | 
|---|
| 60 | int err; | 
|---|
| 61 |  | 
|---|
| 62 | /* | 
|---|
| 63 | * IRQ chip might be already torn down, but the irq descriptor is | 
|---|
| 64 | * still in the radix tree. Also if the chip has no affinity setter, | 
|---|
| 65 | * nothing can be done here. | 
|---|
| 66 | */ | 
|---|
| 67 | if (!chip || !chip->irq_set_affinity) { | 
|---|
| 68 | pr_debug( "IRQ %u: Unable to migrate away\n", d->irq); | 
|---|
| 69 | return false; | 
|---|
| 70 | } | 
|---|
| 71 |  | 
|---|
| 72 | /* | 
|---|
| 73 | * Complete an eventually pending irq move cleanup. If this | 
|---|
| 74 | * interrupt was moved in hard irq context, then the vectors need | 
|---|
| 75 | * to be cleaned up. It can't wait until this interrupt actually | 
|---|
| 76 | * happens and this CPU was involved. | 
|---|
| 77 | */ | 
|---|
| 78 | irq_force_complete_move(desc); | 
|---|
| 79 |  | 
|---|
| 80 | /* | 
|---|
| 81 | * No move required, if: | 
|---|
| 82 | * - Interrupt is per cpu | 
|---|
| 83 | * - Interrupt is not started | 
|---|
| 84 | * - Affinity mask does not include this CPU. | 
|---|
| 85 | * | 
|---|
| 86 | * Note: Do not check desc->action as this might be a chained | 
|---|
| 87 | * interrupt. | 
|---|
| 88 | */ | 
|---|
| 89 | if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { | 
|---|
| 90 | /* | 
|---|
| 91 | * If an irq move is pending, abort it if the dying CPU is | 
|---|
| 92 | * the sole target. | 
|---|
| 93 | */ | 
|---|
| 94 | irq_fixup_move_pending(desc, force_clear: false); | 
|---|
| 95 | return false; | 
|---|
| 96 | } | 
|---|
| 97 |  | 
|---|
| 98 | /* | 
|---|
| 99 | * If there is a setaffinity pending, then try to reuse the pending | 
|---|
| 100 | * mask, so the last change of the affinity does not get lost. If | 
|---|
| 101 | * there is no move pending or the pending mask does not contain | 
|---|
| 102 | * any online CPU, use the current affinity mask. | 
|---|
| 103 | */ | 
|---|
| 104 | if (irq_fixup_move_pending(desc, force_clear: true)) | 
|---|
| 105 | affinity = irq_desc_get_pending_mask(desc); | 
|---|
| 106 | else | 
|---|
| 107 | affinity = irq_data_get_affinity_mask(d); | 
|---|
| 108 |  | 
|---|
| 109 | /* Mask the chip for interrupts which cannot move in process context */ | 
|---|
| 110 | if (maskchip && chip->irq_mask) | 
|---|
| 111 | chip->irq_mask(d); | 
|---|
| 112 |  | 
|---|
| 113 | if (!cpumask_intersects(src1p: affinity, cpu_online_mask)) { | 
|---|
| 114 | /* | 
|---|
| 115 | * If the interrupt is managed, then shut it down and leave | 
|---|
| 116 | * the affinity untouched. | 
|---|
| 117 | */ | 
|---|
| 118 | if (irqd_affinity_is_managed(d)) { | 
|---|
| 119 | irqd_set_managed_shutdown(d); | 
|---|
| 120 | irq_shutdown_and_deactivate(desc); | 
|---|
| 121 | return false; | 
|---|
| 122 | } | 
|---|
| 123 | affinity = cpu_online_mask; | 
|---|
| 124 | brokeaff = true; | 
|---|
| 125 | } | 
|---|
| 126 | /* | 
|---|
| 127 | * Do not set the force argument of irq_do_set_affinity() as this | 
|---|
| 128 | * disables the masking of offline CPUs from the supplied affinity | 
|---|
| 129 | * mask and therefore might keep/reassign the irq to the outgoing | 
|---|
| 130 | * CPU. | 
|---|
| 131 | */ | 
|---|
| 132 | err = irq_do_set_affinity(data: d, dest: affinity, force: false); | 
|---|
| 133 |  | 
|---|
| 134 | /* | 
|---|
| 135 | * If there are online CPUs in the affinity mask, but they have no | 
|---|
| 136 | * vectors left to make the migration work, try to break the | 
|---|
| 137 | * affinity by migrating to any online CPU. | 
|---|
| 138 | */ | 
|---|
| 139 | if (err == -ENOSPC && !irqd_affinity_is_managed(d) && affinity != cpu_online_mask) { | 
|---|
| 140 | pr_debug( "IRQ%u: set affinity failed for %*pbl, re-try with online CPUs\n", | 
|---|
| 141 | d->irq, cpumask_pr_args(affinity)); | 
|---|
| 142 |  | 
|---|
| 143 | affinity = cpu_online_mask; | 
|---|
| 144 | brokeaff = true; | 
|---|
| 145 |  | 
|---|
| 146 | err = irq_do_set_affinity(data: d, dest: affinity, force: false); | 
|---|
| 147 | } | 
|---|
| 148 |  | 
|---|
| 149 | if (err) { | 
|---|
| 150 | pr_warn_ratelimited( "IRQ%u: set affinity failed(%d).\n", | 
|---|
| 151 | d->irq, err); | 
|---|
| 152 | brokeaff = false; | 
|---|
| 153 | } | 
|---|
| 154 |  | 
|---|
| 155 | if (maskchip && chip->irq_unmask) | 
|---|
| 156 | chip->irq_unmask(d); | 
|---|
| 157 |  | 
|---|
| 158 | return brokeaff; | 
|---|
| 159 | } | 
|---|
| 160 |  | 
|---|
| 161 | /** | 
|---|
| 162 | * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu | 
|---|
| 163 | * | 
|---|
| 164 | * The current CPU has been marked offline.  Migrate IRQs off this CPU. | 
|---|
| 165 | * If the affinity settings do not allow other CPUs, force them onto any | 
|---|
| 166 | * available CPU. | 
|---|
| 167 | * | 
|---|
| 168 | * Note: we must iterate over all IRQs, whether they have an attached | 
|---|
| 169 | * action structure or not, as we need to get chained interrupts too. | 
|---|
| 170 | */ | 
|---|
| 171 | void irq_migrate_all_off_this_cpu(void) | 
|---|
| 172 | { | 
|---|
| 173 | struct irq_desc *desc; | 
|---|
| 174 | unsigned int irq; | 
|---|
| 175 |  | 
|---|
| 176 | for_each_active_irq(irq) { | 
|---|
| 177 | bool affinity_broken; | 
|---|
| 178 |  | 
|---|
| 179 | desc = irq_to_desc(irq); | 
|---|
| 180 | scoped_guard(raw_spinlock, &desc->lock) | 
|---|
| 181 | affinity_broken = migrate_one_irq(desc); | 
|---|
| 182 |  | 
|---|
| 183 | if (affinity_broken) { | 
|---|
| 184 | pr_debug_ratelimited( "IRQ %u: no longer affine to CPU%u\n", | 
|---|
| 185 | irq, smp_processor_id()); | 
|---|
| 186 | } | 
|---|
| 187 | } | 
|---|
| 188 | } | 
|---|
| 189 |  | 
|---|
| 190 | static bool hk_should_isolate(struct irq_data *data, unsigned int cpu) | 
|---|
| 191 | { | 
|---|
| 192 | const struct cpumask *hk_mask; | 
|---|
| 193 |  | 
|---|
| 194 | if (!housekeeping_enabled(type: HK_TYPE_MANAGED_IRQ)) | 
|---|
| 195 | return false; | 
|---|
| 196 |  | 
|---|
| 197 | hk_mask = housekeeping_cpumask(type: HK_TYPE_MANAGED_IRQ); | 
|---|
| 198 | if (cpumask_subset(src1p: irq_data_get_effective_affinity_mask(d: data), src2p: hk_mask)) | 
|---|
| 199 | return false; | 
|---|
| 200 |  | 
|---|
| 201 | return cpumask_test_cpu(cpu, cpumask: hk_mask); | 
|---|
| 202 | } | 
|---|
| 203 |  | 
|---|
| 204 | static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) | 
|---|
| 205 | { | 
|---|
| 206 | struct irq_data *data = irq_desc_get_irq_data(desc); | 
|---|
| 207 | const struct cpumask *affinity = irq_data_get_affinity_mask(d: data); | 
|---|
| 208 |  | 
|---|
| 209 | if (!irqd_affinity_is_managed(d: data) || !desc->action || | 
|---|
| 210 | !irq_data_get_irq_chip(d: data) || !cpumask_test_cpu(cpu, cpumask: affinity)) | 
|---|
| 211 | return; | 
|---|
| 212 |  | 
|---|
| 213 | if (irqd_is_managed_and_shutdown(d: data)) | 
|---|
| 214 | irq_startup_managed(desc); | 
|---|
| 215 |  | 
|---|
| 216 | /* | 
|---|
| 217 | * If the interrupt can only be directed to a single target | 
|---|
| 218 | * CPU then it is already assigned to a CPU in the affinity | 
|---|
| 219 | * mask. No point in trying to move it around unless the | 
|---|
| 220 | * isolation mechanism requests to move it to an upcoming | 
|---|
| 221 | * housekeeping CPU. | 
|---|
| 222 | */ | 
|---|
| 223 | if (!irqd_is_single_target(d: data) || hk_should_isolate(data, cpu)) | 
|---|
| 224 | irq_set_affinity_locked(data, cpumask: affinity, force: false); | 
|---|
| 225 | } | 
|---|
| 226 |  | 
|---|
| 227 | /** | 
|---|
| 228 | * irq_affinity_online_cpu - Restore affinity for managed interrupts | 
|---|
| 229 | * @cpu:	Upcoming CPU for which interrupts should be restored | 
|---|
| 230 | */ | 
|---|
| 231 | int irq_affinity_online_cpu(unsigned int cpu) | 
|---|
| 232 | { | 
|---|
| 233 | struct irq_desc *desc; | 
|---|
| 234 | unsigned int irq; | 
|---|
| 235 |  | 
|---|
| 236 | irq_lock_sparse(); | 
|---|
| 237 | for_each_active_irq(irq) { | 
|---|
| 238 | desc = irq_to_desc(irq); | 
|---|
| 239 | scoped_guard(raw_spinlock_irq, &desc->lock) | 
|---|
| 240 | irq_restore_affinity_of_irq(desc, cpu); | 
|---|
| 241 | } | 
|---|
| 242 | irq_unlock_sparse(); | 
|---|
| 243 |  | 
|---|
| 244 | return 0; | 
|---|
| 245 | } | 
|---|
| 246 |  | 
|---|