| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 |  | 
|---|
| 3 | #include <linux/irq.h> | 
|---|
| 4 | #include <linux/interrupt.h> | 
|---|
| 5 |  | 
|---|
| 6 | #include "internals.h" | 
|---|
| 7 |  | 
|---|
| 8 | /** | 
|---|
| 9 | * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU | 
|---|
| 10 | * @desc:		Interrupt descriptor to clean up | 
|---|
| 11 | * @force_clear:	If set clear the move pending bit unconditionally. | 
|---|
| 12 | *			If not set, clear it only when the dying CPU is the | 
|---|
| 13 | *			last one in the pending mask. | 
|---|
| 14 | * | 
|---|
| 15 | * Returns true if the pending bit was set and the pending mask contains an | 
|---|
| 16 | * online CPU other than the dying CPU. | 
|---|
| 17 | */ | 
|---|
| 18 | bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) | 
|---|
| 19 | { | 
|---|
| 20 | struct irq_data *data = irq_desc_get_irq_data(desc); | 
|---|
| 21 |  | 
|---|
| 22 | if (!irqd_is_setaffinity_pending(d: data)) | 
|---|
| 23 | return false; | 
|---|
| 24 |  | 
|---|
| 25 | /* | 
|---|
| 26 | * The outgoing CPU might be the last online target in a pending | 
|---|
| 27 | * interrupt move. If that's the case clear the pending move bit. | 
|---|
| 28 | */ | 
|---|
| 29 | if (!cpumask_intersects(src1p: desc->pending_mask, cpu_online_mask)) { | 
|---|
| 30 | irqd_clr_move_pending(d: data); | 
|---|
| 31 | return false; | 
|---|
| 32 | } | 
|---|
| 33 | if (force_clear) | 
|---|
| 34 | irqd_clr_move_pending(d: data); | 
|---|
| 35 | return true; | 
|---|
| 36 | } | 
|---|
| 37 |  | 
|---|
| 38 | void irq_force_complete_move(struct irq_desc *desc) | 
|---|
| 39 | { | 
|---|
| 40 | for (struct irq_data *d = irq_desc_get_irq_data(desc); d; d = irqd_get_parent_data(irqd: d)) { | 
|---|
| 41 | if (d->chip && d->chip->irq_force_complete_move) { | 
|---|
| 42 | d->chip->irq_force_complete_move(d); | 
|---|
| 43 | return; | 
|---|
| 44 | } | 
|---|
| 45 | } | 
|---|
| 46 | } | 
|---|
| 47 |  | 
|---|
| 48 | void irq_move_masked_irq(struct irq_data *idata) | 
|---|
| 49 | { | 
|---|
| 50 | struct irq_desc *desc = irq_data_to_desc(data: idata); | 
|---|
| 51 | struct irq_data *data = &desc->irq_data; | 
|---|
| 52 | struct irq_chip *chip = data->chip; | 
|---|
| 53 |  | 
|---|
| 54 | if (likely(!irqd_is_setaffinity_pending(data))) | 
|---|
| 55 | return; | 
|---|
| 56 |  | 
|---|
| 57 | irqd_clr_move_pending(d: data); | 
|---|
| 58 |  | 
|---|
| 59 | /* | 
|---|
| 60 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | 
|---|
| 61 | */ | 
|---|
| 62 | if (irqd_is_per_cpu(d: data)) { | 
|---|
| 63 | WARN_ON(1); | 
|---|
| 64 | return; | 
|---|
| 65 | } | 
|---|
| 66 |  | 
|---|
| 67 | if (unlikely(cpumask_empty(desc->pending_mask))) | 
|---|
| 68 | return; | 
|---|
| 69 |  | 
|---|
| 70 | if (!chip->irq_set_affinity) | 
|---|
| 71 | return; | 
|---|
| 72 |  | 
|---|
| 73 | assert_raw_spin_locked(&desc->lock); | 
|---|
| 74 |  | 
|---|
| 75 | /* | 
|---|
| 76 | * If there was a valid mask to work with, please | 
|---|
| 77 | * do the disable, re-program, enable sequence. | 
|---|
| 78 | * This is *not* particularly important for level triggered | 
|---|
| 79 | * but in a edge trigger case, we might be setting rte | 
|---|
| 80 | * when an active trigger is coming in. This could | 
|---|
| 81 | * cause some ioapics to mal-function. | 
|---|
| 82 | * Being paranoid i guess! | 
|---|
| 83 | * | 
|---|
| 84 | * For correct operation this depends on the caller | 
|---|
| 85 | * masking the irqs. | 
|---|
| 86 | */ | 
|---|
| 87 | if (cpumask_intersects(src1p: desc->pending_mask, cpu_online_mask)) { | 
|---|
| 88 | int ret; | 
|---|
| 89 |  | 
|---|
| 90 | ret = irq_do_set_affinity(data, dest: desc->pending_mask, force: false); | 
|---|
| 91 | /* | 
|---|
| 92 | * If the there is a cleanup pending in the underlying | 
|---|
| 93 | * vector management, reschedule the move for the next | 
|---|
| 94 | * interrupt. Leave desc->pending_mask intact. | 
|---|
| 95 | */ | 
|---|
| 96 | if (ret == -EBUSY) { | 
|---|
| 97 | irqd_set_move_pending(d: data); | 
|---|
| 98 | return; | 
|---|
| 99 | } | 
|---|
| 100 | } | 
|---|
| 101 | cpumask_clear(dstp: desc->pending_mask); | 
|---|
| 102 | } | 
|---|
| 103 |  | 
|---|
| 104 | void __irq_move_irq(struct irq_data *idata) | 
|---|
| 105 | { | 
|---|
| 106 | bool masked; | 
|---|
| 107 |  | 
|---|
| 108 | /* | 
|---|
| 109 | * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled, | 
|---|
| 110 | * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is | 
|---|
| 111 | * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here. | 
|---|
| 112 | */ | 
|---|
| 113 | idata = irq_desc_get_irq_data(desc: irq_data_to_desc(data: idata)); | 
|---|
| 114 |  | 
|---|
| 115 | if (unlikely(irqd_irq_disabled(idata))) | 
|---|
| 116 | return; | 
|---|
| 117 |  | 
|---|
| 118 | /* | 
|---|
| 119 | * Be careful vs. already masked interrupts. If this is a | 
|---|
| 120 | * threaded interrupt with ONESHOT set, we can end up with an | 
|---|
| 121 | * interrupt storm. | 
|---|
| 122 | */ | 
|---|
| 123 | masked = irqd_irq_masked(d: idata); | 
|---|
| 124 | if (!masked) | 
|---|
| 125 | idata->chip->irq_mask(idata); | 
|---|
| 126 | irq_move_masked_irq(idata); | 
|---|
| 127 | if (!masked) | 
|---|
| 128 | idata->chip->irq_unmask(idata); | 
|---|
| 129 | } | 
|---|
| 130 |  | 
|---|
| 131 | bool irq_can_move_in_process_context(struct irq_data *data) | 
|---|
| 132 | { | 
|---|
| 133 | /* | 
|---|
| 134 | * Get the top level irq_data in the hierarchy, which is optimized | 
|---|
| 135 | * away when CONFIG_IRQ_DOMAIN_HIERARCHY is disabled. | 
|---|
| 136 | */ | 
|---|
| 137 | data = irq_desc_get_irq_data(desc: irq_data_to_desc(data)); | 
|---|
| 138 | return irq_can_move_pcntxt(data); | 
|---|
| 139 | } | 
|---|
| 140 |  | 
|---|