| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
|---|
| 4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 
|---|
| 5 | * | 
|---|
| 6 | * This file contains the interrupt descriptor management code. Detailed | 
|---|
| 7 | * information is available in Documentation/core-api/genericirq.rst | 
|---|
| 8 | * | 
|---|
| 9 | */ | 
|---|
| 10 | #include <linux/irq.h> | 
|---|
| 11 | #include <linux/slab.h> | 
|---|
| 12 | #include <linux/export.h> | 
|---|
| 13 | #include <linux/interrupt.h> | 
|---|
| 14 | #include <linux/kernel_stat.h> | 
|---|
| 15 | #include <linux/maple_tree.h> | 
|---|
| 16 | #include <linux/irqdomain.h> | 
|---|
| 17 | #include <linux/sysfs.h> | 
|---|
| 18 | #include <linux/string_choices.h> | 
|---|
| 19 |  | 
|---|
| 20 | #include "internals.h" | 
|---|
| 21 |  | 
|---|
| 22 | /* | 
|---|
| 23 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | 
|---|
| 24 | */ | 
|---|
| 25 | static struct lock_class_key irq_desc_lock_class; | 
|---|
| 26 |  | 
|---|
| 27 | #if defined(CONFIG_SMP) | 
|---|
| 28 | static int __init irq_affinity_setup(char *str) | 
|---|
| 29 | { | 
|---|
| 30 | alloc_bootmem_cpumask_var(mask: &irq_default_affinity); | 
|---|
| 31 | cpulist_parse(buf: str, dstp: irq_default_affinity); | 
|---|
| 32 | /* | 
|---|
| 33 | * Set at least the boot cpu. We don't want to end up with | 
|---|
| 34 | * bugreports caused by random commandline masks | 
|---|
| 35 | */ | 
|---|
| 36 | cpumask_set_cpu(smp_processor_id(), dstp: irq_default_affinity); | 
|---|
| 37 | return 1; | 
|---|
| 38 | } | 
|---|
| 39 | __setup( "irqaffinity=", irq_affinity_setup); | 
|---|
| 40 |  | 
|---|
| 41 | static void __init init_irq_default_affinity(void) | 
|---|
| 42 | { | 
|---|
| 43 | if (!cpumask_available(mask: irq_default_affinity)) | 
|---|
| 44 | zalloc_cpumask_var(mask: &irq_default_affinity, GFP_NOWAIT); | 
|---|
| 45 | if (cpumask_empty(srcp: irq_default_affinity)) | 
|---|
| 46 | cpumask_setall(dstp: irq_default_affinity); | 
|---|
| 47 | } | 
|---|
| 48 | #else | 
|---|
| 49 | static void __init init_irq_default_affinity(void) | 
|---|
| 50 | { | 
|---|
| 51 | } | 
|---|
| 52 | #endif | 
|---|
| 53 |  | 
|---|
| 54 | #ifdef CONFIG_SMP | 
|---|
| 55 | static int alloc_masks(struct irq_desc *desc, int node) | 
|---|
| 56 | { | 
|---|
| 57 | if (!zalloc_cpumask_var_node(mask: &desc->irq_common_data.affinity, | 
|---|
| 58 | GFP_KERNEL, node)) | 
|---|
| 59 | return -ENOMEM; | 
|---|
| 60 |  | 
|---|
| 61 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 
|---|
| 62 | if (!zalloc_cpumask_var_node(mask: &desc->irq_common_data.effective_affinity, | 
|---|
| 63 | GFP_KERNEL, node)) { | 
|---|
| 64 | free_cpumask_var(mask: desc->irq_common_data.affinity); | 
|---|
| 65 | return -ENOMEM; | 
|---|
| 66 | } | 
|---|
| 67 | #endif | 
|---|
| 68 |  | 
|---|
| 69 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|---|
| 70 | if (!zalloc_cpumask_var_node(mask: &desc->pending_mask, GFP_KERNEL, node)) { | 
|---|
| 71 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 
|---|
| 72 | free_cpumask_var(mask: desc->irq_common_data.effective_affinity); | 
|---|
| 73 | #endif | 
|---|
| 74 | free_cpumask_var(mask: desc->irq_common_data.affinity); | 
|---|
| 75 | return -ENOMEM; | 
|---|
| 76 | } | 
|---|
| 77 | #endif | 
|---|
| 78 | return 0; | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | static void desc_smp_init(struct irq_desc *desc, int node, | 
|---|
| 82 | const struct cpumask *affinity) | 
|---|
| 83 | { | 
|---|
| 84 | if (!affinity) | 
|---|
| 85 | affinity = irq_default_affinity; | 
|---|
| 86 | cpumask_copy(dstp: desc->irq_common_data.affinity, srcp: affinity); | 
|---|
| 87 |  | 
|---|
| 88 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|---|
| 89 | cpumask_clear(dstp: desc->pending_mask); | 
|---|
| 90 | #endif | 
|---|
| 91 | #ifdef CONFIG_NUMA | 
|---|
| 92 | desc->irq_common_data.node = node; | 
|---|
| 93 | #endif | 
|---|
| 94 | } | 
|---|
| 95 |  | 
|---|
| 96 | static void free_masks(struct irq_desc *desc) | 
|---|
| 97 | { | 
|---|
| 98 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|---|
| 99 | free_cpumask_var(mask: desc->pending_mask); | 
|---|
| 100 | #endif | 
|---|
| 101 | free_cpumask_var(mask: desc->irq_common_data.affinity); | 
|---|
| 102 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 
|---|
| 103 | free_cpumask_var(mask: desc->irq_common_data.effective_affinity); | 
|---|
| 104 | #endif | 
|---|
| 105 | } | 
|---|
| 106 |  | 
|---|
| 107 | #else | 
|---|
| 108 | static inline int | 
|---|
| 109 | alloc_masks(struct irq_desc *desc, int node) { return 0; } | 
|---|
| 110 | static inline void | 
|---|
| 111 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } | 
|---|
| 112 | static inline void free_masks(struct irq_desc *desc) { } | 
|---|
| 113 | #endif | 
|---|
| 114 |  | 
|---|
| 115 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | 
|---|
| 116 | const struct cpumask *affinity, struct module *owner) | 
|---|
| 117 | { | 
|---|
| 118 | int cpu; | 
|---|
| 119 |  | 
|---|
| 120 | desc->irq_common_data.handler_data = NULL; | 
|---|
| 121 | desc->irq_common_data.msi_desc = NULL; | 
|---|
| 122 |  | 
|---|
| 123 | desc->irq_data.common = &desc->irq_common_data; | 
|---|
| 124 | desc->irq_data.irq = irq; | 
|---|
| 125 | desc->irq_data.chip = &no_irq_chip; | 
|---|
| 126 | desc->irq_data.chip_data = NULL; | 
|---|
| 127 | irq_settings_clr_and_set(desc, clr: ~0, set: _IRQ_DEFAULT_INIT_FLAGS); | 
|---|
| 128 | irqd_set(d: &desc->irq_data, mask: IRQD_IRQ_DISABLED); | 
|---|
| 129 | irqd_set(d: &desc->irq_data, mask: IRQD_IRQ_MASKED); | 
|---|
| 130 | desc->handle_irq = handle_bad_irq; | 
|---|
| 131 | desc->depth = 1; | 
|---|
| 132 | desc->irq_count = 0; | 
|---|
| 133 | desc->irqs_unhandled = 0; | 
|---|
| 134 | desc->tot_count = 0; | 
|---|
| 135 | desc->name = NULL; | 
|---|
| 136 | desc->owner = owner; | 
|---|
| 137 | for_each_possible_cpu(cpu) | 
|---|
| 138 | *per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { }; | 
|---|
| 139 | desc_smp_init(desc, node, affinity); | 
|---|
| 140 | } | 
|---|
| 141 |  | 
|---|
| 142 | static unsigned int nr_irqs = NR_IRQS; | 
|---|
| 143 |  | 
|---|
| 144 | /** | 
|---|
| 145 | * irq_get_nr_irqs() - Number of interrupts supported by the system. | 
|---|
| 146 | */ | 
|---|
| 147 | unsigned int irq_get_nr_irqs(void) | 
|---|
| 148 | { | 
|---|
| 149 | return nr_irqs; | 
|---|
| 150 | } | 
|---|
| 151 | EXPORT_SYMBOL_GPL(irq_get_nr_irqs); | 
|---|
| 152 |  | 
|---|
| 153 | /** | 
|---|
| 154 | * irq_set_nr_irqs() - Set the number of interrupts supported by the system. | 
|---|
| 155 | * @nr: New number of interrupts. | 
|---|
| 156 | * | 
|---|
| 157 | * Return: @nr. | 
|---|
| 158 | */ | 
|---|
| 159 | unsigned int irq_set_nr_irqs(unsigned int nr) | 
|---|
| 160 | { | 
|---|
| 161 | nr_irqs = nr; | 
|---|
| 162 |  | 
|---|
| 163 | return nr; | 
|---|
| 164 | } | 
|---|
| 165 | EXPORT_SYMBOL_GPL(irq_set_nr_irqs); | 
|---|
| 166 |  | 
|---|
| 167 | static DEFINE_MUTEX(sparse_irq_lock); | 
|---|
| 168 | static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs, | 
|---|
| 169 | MT_FLAGS_ALLOC_RANGE | | 
|---|
| 170 | MT_FLAGS_LOCK_EXTERN | | 
|---|
| 171 | MT_FLAGS_USE_RCU, | 
|---|
| 172 | sparse_irq_lock); | 
|---|
| 173 |  | 
|---|
| 174 | static int irq_find_free_area(unsigned int from, unsigned int cnt) | 
|---|
| 175 | { | 
|---|
| 176 | MA_STATE(mas, &sparse_irqs, 0, 0); | 
|---|
| 177 |  | 
|---|
| 178 | if (mas_empty_area(mas: &mas, min: from, MAX_SPARSE_IRQS, size: cnt)) | 
|---|
| 179 | return -ENOSPC; | 
|---|
| 180 | return mas.index; | 
|---|
| 181 | } | 
|---|
| 182 |  | 
|---|
| 183 | static unsigned int irq_find_at_or_after(unsigned int offset) | 
|---|
| 184 | { | 
|---|
| 185 | unsigned long index = offset; | 
|---|
| 186 | struct irq_desc *desc; | 
|---|
| 187 |  | 
|---|
| 188 | guard(rcu)(); | 
|---|
| 189 | desc = mt_find(mt: &sparse_irqs, index: &index, max: nr_irqs); | 
|---|
| 190 |  | 
|---|
| 191 | return desc ? irq_desc_get_irq(desc) : nr_irqs; | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 
|---|
| 195 | { | 
|---|
| 196 | MA_STATE(mas, &sparse_irqs, irq, irq); | 
|---|
| 197 | WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0); | 
|---|
| 198 | } | 
|---|
| 199 |  | 
|---|
| 200 | static void delete_irq_desc(unsigned int irq) | 
|---|
| 201 | { | 
|---|
| 202 | MA_STATE(mas, &sparse_irqs, irq, irq); | 
|---|
| 203 | mas_erase(mas: &mas); | 
|---|
| 204 | } | 
|---|
| 205 |  | 
|---|
| 206 | #ifdef CONFIG_SPARSE_IRQ | 
|---|
| 207 | static const struct kobj_type irq_kobj_type; | 
|---|
| 208 | #endif | 
|---|
| 209 |  | 
|---|
| 210 | static int init_desc(struct irq_desc *desc, int irq, int node, | 
|---|
| 211 | unsigned int flags, | 
|---|
| 212 | const struct cpumask *affinity, | 
|---|
| 213 | struct module *owner) | 
|---|
| 214 | { | 
|---|
| 215 | desc->kstat_irqs = alloc_percpu(struct irqstat); | 
|---|
| 216 | if (!desc->kstat_irqs) | 
|---|
| 217 | return -ENOMEM; | 
|---|
| 218 |  | 
|---|
| 219 | if (alloc_masks(desc, node)) { | 
|---|
| 220 | free_percpu(pdata: desc->kstat_irqs); | 
|---|
| 221 | return -ENOMEM; | 
|---|
| 222 | } | 
|---|
| 223 |  | 
|---|
| 224 | raw_spin_lock_init(&desc->lock); | 
|---|
| 225 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
|---|
| 226 | mutex_init(&desc->request_mutex); | 
|---|
| 227 | init_waitqueue_head(&desc->wait_for_threads); | 
|---|
| 228 | desc_set_defaults(irq, desc, node, affinity, owner); | 
|---|
| 229 | irqd_set(d: &desc->irq_data, mask: flags); | 
|---|
| 230 | irq_resend_init(desc); | 
|---|
| 231 | #ifdef CONFIG_SPARSE_IRQ | 
|---|
| 232 | kobject_init(kobj: &desc->kobj, ktype: &irq_kobj_type); | 
|---|
| 233 | init_rcu_head(head: &desc->rcu); | 
|---|
| 234 | #endif | 
|---|
| 235 |  | 
|---|
| 236 | return 0; | 
|---|
| 237 | } | 
|---|
| 238 |  | 
|---|
| 239 | #ifdef CONFIG_SPARSE_IRQ | 
|---|
| 240 |  | 
|---|
| 241 | static void irq_kobj_release(struct kobject *kobj); | 
|---|
| 242 |  | 
|---|
| 243 | #ifdef CONFIG_SYSFS | 
|---|
| 244 | static struct kobject *irq_kobj_base; | 
|---|
| 245 |  | 
|---|
| 246 | #define IRQ_ATTR_RO(_name) \ | 
|---|
| 247 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | 
|---|
| 248 |  | 
|---|
| 249 | static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | 
|---|
| 250 | { | 
|---|
| 251 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 252 | ssize_t ret = 0; | 
|---|
| 253 | char *p = ""; | 
|---|
| 254 | int cpu; | 
|---|
| 255 |  | 
|---|
| 256 | for_each_possible_cpu(cpu) { | 
|---|
| 257 | unsigned int c = irq_desc_kstat_cpu(desc, cpu); | 
|---|
| 258 |  | 
|---|
| 259 | ret += sysfs_emit_at(buf, at: ret, fmt: "%s%u", p, c); | 
|---|
| 260 | p = ","; | 
|---|
| 261 | } | 
|---|
| 262 |  | 
|---|
| 263 | ret += sysfs_emit_at(buf, at: ret, fmt: "\n"); | 
|---|
| 264 | return ret; | 
|---|
| 265 | } | 
|---|
| 266 | IRQ_ATTR_RO(per_cpu_count); | 
|---|
| 267 |  | 
|---|
| 268 | static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | 
|---|
| 269 | { | 
|---|
| 270 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 271 |  | 
|---|
| 272 | guard(raw_spinlock_irq)(l: &desc->lock); | 
|---|
| 273 | if (desc->irq_data.chip && desc->irq_data.chip->name) | 
|---|
| 274 | return sysfs_emit(buf, fmt: "%s\n", desc->irq_data.chip->name); | 
|---|
| 275 | return 0; | 
|---|
| 276 | } | 
|---|
| 277 | IRQ_ATTR_RO(chip_name); | 
|---|
| 278 |  | 
|---|
| 279 | static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | 
|---|
| 280 | { | 
|---|
| 281 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 282 |  | 
|---|
| 283 | guard(raw_spinlock_irq)(l: &desc->lock); | 
|---|
| 284 | if (desc->irq_data.domain) | 
|---|
| 285 | return sysfs_emit(buf, fmt: "%lu\n", desc->irq_data.hwirq); | 
|---|
| 286 | return 0; | 
|---|
| 287 | } | 
|---|
| 288 | IRQ_ATTR_RO(hwirq); | 
|---|
| 289 |  | 
|---|
| 290 | static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | 
|---|
| 291 | { | 
|---|
| 292 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 293 |  | 
|---|
| 294 | guard(raw_spinlock_irq)(l: &desc->lock); | 
|---|
| 295 | return sysfs_emit(buf, fmt: "%s\n", irqd_is_level_type(d: &desc->irq_data) ? "level": "edge"); | 
|---|
| 296 |  | 
|---|
| 297 | } | 
|---|
| 298 | IRQ_ATTR_RO(type); | 
|---|
| 299 |  | 
|---|
| 300 | static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | 
|---|
| 301 | { | 
|---|
| 302 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 303 |  | 
|---|
| 304 | guard(raw_spinlock_irq)(l: &desc->lock); | 
|---|
| 305 | return sysfs_emit(buf, fmt: "%s\n", str_enabled_disabled(v: irqd_is_wakeup_set(d: &desc->irq_data))); | 
|---|
| 306 | } | 
|---|
| 307 | IRQ_ATTR_RO(wakeup); | 
|---|
| 308 |  | 
|---|
| 309 | static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | 
|---|
| 310 | { | 
|---|
| 311 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 312 |  | 
|---|
| 313 | guard(raw_spinlock_irq)(l: &desc->lock); | 
|---|
| 314 | if (desc->name) | 
|---|
| 315 | return sysfs_emit(buf, fmt: "%s\n", desc->name); | 
|---|
| 316 | return 0; | 
|---|
| 317 | } | 
|---|
| 318 | IRQ_ATTR_RO(name); | 
|---|
| 319 |  | 
|---|
| 320 | static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | 
|---|
| 321 | { | 
|---|
| 322 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 323 | struct irqaction *action; | 
|---|
| 324 | ssize_t ret = 0; | 
|---|
| 325 | char *p = ""; | 
|---|
| 326 |  | 
|---|
| 327 | scoped_guard(raw_spinlock_irq, &desc->lock) { | 
|---|
| 328 | for_each_action_of_desc(desc, action) { | 
|---|
| 329 | ret += sysfs_emit_at(buf, at: ret, fmt: "%s%s", p, action->name); | 
|---|
| 330 | p = ","; | 
|---|
| 331 | } | 
|---|
| 332 | } | 
|---|
| 333 |  | 
|---|
| 334 | if (ret) | 
|---|
| 335 | ret += sysfs_emit_at(buf, at: ret, fmt: "\n"); | 
|---|
| 336 | return ret; | 
|---|
| 337 | } | 
|---|
| 338 | IRQ_ATTR_RO(actions); | 
|---|
| 339 |  | 
|---|
| 340 | static struct attribute *irq_attrs[] = { | 
|---|
| 341 | &per_cpu_count_attr.attr, | 
|---|
| 342 | &chip_name_attr.attr, | 
|---|
| 343 | &hwirq_attr.attr, | 
|---|
| 344 | &type_attr.attr, | 
|---|
| 345 | &wakeup_attr.attr, | 
|---|
| 346 | &name_attr.attr, | 
|---|
| 347 | &actions_attr.attr, | 
|---|
| 348 | NULL | 
|---|
| 349 | }; | 
|---|
| 350 | ATTRIBUTE_GROUPS(irq); | 
|---|
| 351 |  | 
|---|
| 352 | static const struct kobj_type irq_kobj_type = { | 
|---|
| 353 | .release	= irq_kobj_release, | 
|---|
| 354 | .sysfs_ops	= &kobj_sysfs_ops, | 
|---|
| 355 | .default_groups = irq_groups, | 
|---|
| 356 | }; | 
|---|
| 357 |  | 
|---|
| 358 | static void irq_sysfs_add(int irq, struct irq_desc *desc) | 
|---|
| 359 | { | 
|---|
| 360 | if (irq_kobj_base) { | 
|---|
| 361 | /* | 
|---|
| 362 | * Continue even in case of failure as this is nothing | 
|---|
| 363 | * crucial and failures in the late irq_sysfs_init() | 
|---|
| 364 | * cannot be rolled back. | 
|---|
| 365 | */ | 
|---|
| 366 | if (kobject_add(kobj: &desc->kobj, parent: irq_kobj_base, fmt: "%d", irq)) | 
|---|
| 367 | pr_warn( "Failed to add kobject for irq %d\n", irq); | 
|---|
| 368 | else | 
|---|
| 369 | desc->istate |= IRQS_SYSFS; | 
|---|
| 370 | } | 
|---|
| 371 | } | 
|---|
| 372 |  | 
|---|
| 373 | static void irq_sysfs_del(struct irq_desc *desc) | 
|---|
| 374 | { | 
|---|
| 375 | /* | 
|---|
| 376 | * Only invoke kobject_del() when kobject_add() was successfully | 
|---|
| 377 | * invoked for the descriptor. This covers both early boot, where | 
|---|
| 378 | * sysfs is not initialized yet, and the case of a failed | 
|---|
| 379 | * kobject_add() invocation. | 
|---|
| 380 | */ | 
|---|
| 381 | if (desc->istate & IRQS_SYSFS) | 
|---|
| 382 | kobject_del(kobj: &desc->kobj); | 
|---|
| 383 | } | 
|---|
| 384 |  | 
|---|
| 385 | static int __init irq_sysfs_init(void) | 
|---|
| 386 | { | 
|---|
| 387 | struct irq_desc *desc; | 
|---|
| 388 | int irq; | 
|---|
| 389 |  | 
|---|
| 390 | /* Prevent concurrent irq alloc/free */ | 
|---|
| 391 | guard(mutex)(T: &sparse_irq_lock); | 
|---|
| 392 | irq_kobj_base = kobject_create_and_add(name: "irq", parent: kernel_kobj); | 
|---|
| 393 | if (!irq_kobj_base) | 
|---|
| 394 | return -ENOMEM; | 
|---|
| 395 |  | 
|---|
| 396 | /* Add the already allocated interrupts */ | 
|---|
| 397 | for_each_irq_desc(irq, desc) | 
|---|
| 398 | irq_sysfs_add(irq, desc); | 
|---|
| 399 | return 0; | 
|---|
| 400 | } | 
|---|
| 401 | postcore_initcall(irq_sysfs_init); | 
|---|
| 402 |  | 
|---|
| 403 | #else /* !CONFIG_SYSFS */ | 
|---|
| 404 |  | 
|---|
| 405 | static const struct kobj_type irq_kobj_type = { | 
|---|
| 406 | .release	= irq_kobj_release, | 
|---|
| 407 | }; | 
|---|
| 408 |  | 
|---|
| 409 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} | 
|---|
| 410 | static void irq_sysfs_del(struct irq_desc *desc) {} | 
|---|
| 411 |  | 
|---|
| 412 | #endif /* CONFIG_SYSFS */ | 
|---|
| 413 |  | 
|---|
| 414 | struct irq_desc *irq_to_desc(unsigned int irq) | 
|---|
| 415 | { | 
|---|
| 416 | return mtree_load(mt: &sparse_irqs, index: irq); | 
|---|
| 417 | } | 
|---|
| 418 | #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE | 
|---|
| 419 | EXPORT_SYMBOL_GPL(irq_to_desc); | 
|---|
| 420 | #endif | 
|---|
| 421 |  | 
|---|
| 422 | void irq_lock_sparse(void) | 
|---|
| 423 | { | 
|---|
| 424 | mutex_lock(lock: &sparse_irq_lock); | 
|---|
| 425 | } | 
|---|
| 426 |  | 
|---|
| 427 | void irq_unlock_sparse(void) | 
|---|
| 428 | { | 
|---|
| 429 | mutex_unlock(lock: &sparse_irq_lock); | 
|---|
| 430 | } | 
|---|
| 431 |  | 
|---|
| 432 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, | 
|---|
| 433 | const struct cpumask *affinity, | 
|---|
| 434 | struct module *owner) | 
|---|
| 435 | { | 
|---|
| 436 | struct irq_desc *desc; | 
|---|
| 437 | int ret; | 
|---|
| 438 |  | 
|---|
| 439 | desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); | 
|---|
| 440 | if (!desc) | 
|---|
| 441 | return NULL; | 
|---|
| 442 |  | 
|---|
| 443 | ret = init_desc(desc, irq, node, flags, affinity, owner); | 
|---|
| 444 | if (unlikely(ret)) { | 
|---|
| 445 | kfree(objp: desc); | 
|---|
| 446 | return NULL; | 
|---|
| 447 | } | 
|---|
| 448 |  | 
|---|
| 449 | return desc; | 
|---|
| 450 | } | 
|---|
| 451 |  | 
|---|
| 452 | static void irq_kobj_release(struct kobject *kobj) | 
|---|
| 453 | { | 
|---|
| 454 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 
|---|
| 455 |  | 
|---|
| 456 | free_masks(desc); | 
|---|
| 457 | free_percpu(pdata: desc->kstat_irqs); | 
|---|
| 458 | kfree(objp: desc); | 
|---|
| 459 | } | 
|---|
| 460 |  | 
|---|
| 461 | static void delayed_free_desc(struct rcu_head *rhp) | 
|---|
| 462 | { | 
|---|
| 463 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); | 
|---|
| 464 |  | 
|---|
| 465 | kobject_put(kobj: &desc->kobj); | 
|---|
| 466 | } | 
|---|
| 467 |  | 
|---|
| 468 | static void free_desc(unsigned int irq) | 
|---|
| 469 | { | 
|---|
| 470 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 471 |  | 
|---|
| 472 | irq_remove_debugfs_entry(d: desc); | 
|---|
| 473 | unregister_irq_proc(irq, desc); | 
|---|
| 474 |  | 
|---|
| 475 | /* | 
|---|
| 476 | * sparse_irq_lock protects also show_interrupts() and | 
|---|
| 477 | * kstat_irq_usr(). Once we deleted the descriptor from the | 
|---|
| 478 | * sparse tree we can free it. Access in proc will fail to | 
|---|
| 479 | * lookup the descriptor. | 
|---|
| 480 | * | 
|---|
| 481 | * The sysfs entry must be serialized against a concurrent | 
|---|
| 482 | * irq_sysfs_init() as well. | 
|---|
| 483 | */ | 
|---|
| 484 | irq_sysfs_del(desc); | 
|---|
| 485 | delete_irq_desc(irq); | 
|---|
| 486 |  | 
|---|
| 487 | /* | 
|---|
| 488 | * We free the descriptor, masks and stat fields via RCU. That | 
|---|
| 489 | * allows demultiplex interrupts to do rcu based management of | 
|---|
| 490 | * the child interrupts. | 
|---|
| 491 | * This also allows us to use rcu in kstat_irqs_usr(). | 
|---|
| 492 | */ | 
|---|
| 493 | call_rcu(head: &desc->rcu, func: delayed_free_desc); | 
|---|
| 494 | } | 
|---|
| 495 |  | 
|---|
| 496 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, | 
|---|
| 497 | const struct irq_affinity_desc *affinity, | 
|---|
| 498 | struct module *owner) | 
|---|
| 499 | { | 
|---|
| 500 | struct irq_desc *desc; | 
|---|
| 501 | int i; | 
|---|
| 502 |  | 
|---|
| 503 | /* Validate affinity mask(s) */ | 
|---|
| 504 | if (affinity) { | 
|---|
| 505 | for (i = 0; i < cnt; i++) { | 
|---|
| 506 | if (cpumask_empty(srcp: &affinity[i].mask)) | 
|---|
| 507 | return -EINVAL; | 
|---|
| 508 | } | 
|---|
| 509 | } | 
|---|
| 510 |  | 
|---|
| 511 | for (i = 0; i < cnt; i++) { | 
|---|
| 512 | const struct cpumask *mask = NULL; | 
|---|
| 513 | unsigned int flags = 0; | 
|---|
| 514 |  | 
|---|
| 515 | if (affinity) { | 
|---|
| 516 | if (affinity->is_managed) { | 
|---|
| 517 | flags = IRQD_AFFINITY_MANAGED | | 
|---|
| 518 | IRQD_MANAGED_SHUTDOWN; | 
|---|
| 519 | } | 
|---|
| 520 | flags |= IRQD_AFFINITY_SET; | 
|---|
| 521 | mask = &affinity->mask; | 
|---|
| 522 | node = cpu_to_node(cpu: cpumask_first(srcp: mask)); | 
|---|
| 523 | affinity++; | 
|---|
| 524 | } | 
|---|
| 525 |  | 
|---|
| 526 | desc = alloc_desc(irq: start + i, node, flags, affinity: mask, owner); | 
|---|
| 527 | if (!desc) | 
|---|
| 528 | goto err; | 
|---|
| 529 | irq_insert_desc(irq: start + i, desc); | 
|---|
| 530 | irq_sysfs_add(irq: start + i, desc); | 
|---|
| 531 | irq_add_debugfs_entry(irq: start + i, d: desc); | 
|---|
| 532 | } | 
|---|
| 533 | return start; | 
|---|
| 534 |  | 
|---|
| 535 | err: | 
|---|
| 536 | for (i--; i >= 0; i--) | 
|---|
| 537 | free_desc(irq: start + i); | 
|---|
| 538 | return -ENOMEM; | 
|---|
| 539 | } | 
|---|
| 540 |  | 
|---|
| 541 | static bool irq_expand_nr_irqs(unsigned int nr) | 
|---|
| 542 | { | 
|---|
| 543 | if (nr > MAX_SPARSE_IRQS) | 
|---|
| 544 | return false; | 
|---|
| 545 | nr_irqs = nr; | 
|---|
| 546 | return true; | 
|---|
| 547 | } | 
|---|
| 548 |  | 
|---|
| 549 | int __init early_irq_init(void) | 
|---|
| 550 | { | 
|---|
| 551 | int i, initcnt, node = first_online_node; | 
|---|
| 552 | struct irq_desc *desc; | 
|---|
| 553 |  | 
|---|
| 554 | init_irq_default_affinity(); | 
|---|
| 555 |  | 
|---|
| 556 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ | 
|---|
| 557 | initcnt = arch_probe_nr_irqs(); | 
|---|
| 558 | printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", | 
|---|
| 559 | NR_IRQS, nr_irqs, initcnt); | 
|---|
| 560 |  | 
|---|
| 561 | if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS)) | 
|---|
| 562 | nr_irqs = MAX_SPARSE_IRQS; | 
|---|
| 563 |  | 
|---|
| 564 | if (WARN_ON(initcnt > MAX_SPARSE_IRQS)) | 
|---|
| 565 | initcnt = MAX_SPARSE_IRQS; | 
|---|
| 566 |  | 
|---|
| 567 | if (initcnt > nr_irqs) | 
|---|
| 568 | nr_irqs = initcnt; | 
|---|
| 569 |  | 
|---|
| 570 | for (i = 0; i < initcnt; i++) { | 
|---|
| 571 | desc = alloc_desc(irq: i, node, flags: 0, NULL, NULL); | 
|---|
| 572 | irq_insert_desc(irq: i, desc); | 
|---|
| 573 | } | 
|---|
| 574 | return arch_early_irq_init(); | 
|---|
| 575 | } | 
|---|
| 576 |  | 
|---|
| 577 | #else /* !CONFIG_SPARSE_IRQ */ | 
|---|
| 578 |  | 
|---|
| 579 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 
|---|
| 580 | [0 ... NR_IRQS-1] = { | 
|---|
| 581 | .handle_irq	= handle_bad_irq, | 
|---|
| 582 | .depth		= 1, | 
|---|
| 583 | .lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 
|---|
| 584 | } | 
|---|
| 585 | }; | 
|---|
| 586 |  | 
|---|
| 587 | int __init early_irq_init(void) | 
|---|
| 588 | { | 
|---|
| 589 | int count, i, node = first_online_node; | 
|---|
| 590 | int ret; | 
|---|
| 591 |  | 
|---|
| 592 | init_irq_default_affinity(); | 
|---|
| 593 |  | 
|---|
| 594 | printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); | 
|---|
| 595 |  | 
|---|
| 596 | count = ARRAY_SIZE(irq_desc); | 
|---|
| 597 |  | 
|---|
| 598 | for (i = 0; i < count; i++) { | 
|---|
| 599 | ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL); | 
|---|
| 600 | if (unlikely(ret)) | 
|---|
| 601 | goto __free_desc_res; | 
|---|
| 602 | } | 
|---|
| 603 |  | 
|---|
| 604 | return arch_early_irq_init(); | 
|---|
| 605 |  | 
|---|
| 606 | __free_desc_res: | 
|---|
| 607 | while (--i >= 0) { | 
|---|
| 608 | free_masks(irq_desc + i); | 
|---|
| 609 | free_percpu(irq_desc[i].kstat_irqs); | 
|---|
| 610 | } | 
|---|
| 611 |  | 
|---|
| 612 | return ret; | 
|---|
| 613 | } | 
|---|
| 614 |  | 
|---|
| 615 | struct irq_desc *irq_to_desc(unsigned int irq) | 
|---|
| 616 | { | 
|---|
| 617 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 
|---|
| 618 | } | 
|---|
| 619 | EXPORT_SYMBOL(irq_to_desc); | 
|---|
| 620 |  | 
|---|
| 621 | static void free_desc(unsigned int irq) | 
|---|
| 622 | { | 
|---|
| 623 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 624 |  | 
|---|
| 625 | scoped_guard(raw_spinlock_irqsave, &desc->lock) | 
|---|
| 626 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); | 
|---|
| 627 | delete_irq_desc(irq); | 
|---|
| 628 | } | 
|---|
| 629 |  | 
|---|
| 630 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, | 
|---|
| 631 | const struct irq_affinity_desc *affinity, | 
|---|
| 632 | struct module *owner) | 
|---|
| 633 | { | 
|---|
| 634 | u32 i; | 
|---|
| 635 |  | 
|---|
| 636 | for (i = 0; i < cnt; i++) { | 
|---|
| 637 | struct irq_desc *desc = irq_to_desc(start + i); | 
|---|
| 638 |  | 
|---|
| 639 | desc->owner = owner; | 
|---|
| 640 | irq_insert_desc(start + i, desc); | 
|---|
| 641 | } | 
|---|
| 642 | return start; | 
|---|
| 643 | } | 
|---|
| 644 |  | 
|---|
| 645 | static inline bool irq_expand_nr_irqs(unsigned int nr) | 
|---|
| 646 | { | 
|---|
| 647 | return false; | 
|---|
| 648 | } | 
|---|
| 649 |  | 
|---|
| 650 | void irq_mark_irq(unsigned int irq) | 
|---|
| 651 | { | 
|---|
| 652 | guard(mutex)(&sparse_irq_lock); | 
|---|
| 653 | irq_insert_desc(irq, irq_desc + irq); | 
|---|
| 654 | } | 
|---|
| 655 |  | 
|---|
| 656 | #endif /* !CONFIG_SPARSE_IRQ */ | 
|---|
| 657 |  | 
|---|
| 658 | int handle_irq_desc(struct irq_desc *desc) | 
|---|
| 659 | { | 
|---|
| 660 | struct irq_data *data; | 
|---|
| 661 |  | 
|---|
| 662 | if (!desc) | 
|---|
| 663 | return -EINVAL; | 
|---|
| 664 |  | 
|---|
| 665 | data = irq_desc_get_irq_data(desc); | 
|---|
| 666 | if (WARN_ON_ONCE(!in_hardirq() && irqd_is_handle_enforce_irqctx(data))) | 
|---|
| 667 | return -EPERM; | 
|---|
| 668 |  | 
|---|
| 669 | generic_handle_irq_desc(desc); | 
|---|
| 670 | return 0; | 
|---|
| 671 | } | 
|---|
| 672 |  | 
|---|
| 673 | /** | 
|---|
| 674 | * generic_handle_irq - Invoke the handler for a particular irq | 
|---|
| 675 | * @irq:	The irq number to handle | 
|---|
| 676 | * | 
|---|
| 677 | * Returns:	0 on success, or -EINVAL if conversion has failed | 
|---|
| 678 | * | 
|---|
| 679 | * 		This function must be called from an IRQ context with irq regs | 
|---|
| 680 | * 		initialized. | 
|---|
| 681 | */ | 
|---|
| 682 | int generic_handle_irq(unsigned int irq) | 
|---|
| 683 | { | 
|---|
| 684 | return handle_irq_desc(desc: irq_to_desc(irq)); | 
|---|
| 685 | } | 
|---|
| 686 | EXPORT_SYMBOL_GPL(generic_handle_irq); | 
|---|
| 687 |  | 
|---|
| 688 | /** | 
|---|
| 689 | * generic_handle_irq_safe - Invoke the handler for a particular irq from any | 
|---|
| 690 | *			     context. | 
|---|
| 691 | * @irq:	The irq number to handle | 
|---|
| 692 | * | 
|---|
| 693 | * Returns:	0 on success, a negative value on error. | 
|---|
| 694 | * | 
|---|
| 695 | * This function can be called from any context (IRQ or process context). It | 
|---|
| 696 | * will report an error if not invoked from IRQ context and the irq has been | 
|---|
| 697 | * marked to enforce IRQ-context only. | 
|---|
| 698 | */ | 
|---|
| 699 | int generic_handle_irq_safe(unsigned int irq) | 
|---|
| 700 | { | 
|---|
| 701 | unsigned long flags; | 
|---|
| 702 | int ret; | 
|---|
| 703 |  | 
|---|
| 704 | local_irq_save(flags); | 
|---|
| 705 | ret = handle_irq_desc(desc: irq_to_desc(irq)); | 
|---|
| 706 | local_irq_restore(flags); | 
|---|
| 707 | return ret; | 
|---|
| 708 | } | 
|---|
| 709 | EXPORT_SYMBOL_GPL(generic_handle_irq_safe); | 
|---|
| 710 |  | 
|---|
| 711 | #ifdef CONFIG_IRQ_DOMAIN | 
|---|
| 712 | /** | 
|---|
| 713 | * generic_handle_domain_irq - Invoke the handler for a HW irq belonging | 
|---|
| 714 | *                             to a domain. | 
|---|
| 715 | * @domain:	The domain where to perform the lookup | 
|---|
| 716 | * @hwirq:	The HW irq number to convert to a logical one | 
|---|
| 717 | * | 
|---|
| 718 | * Returns:	0 on success, or -EINVAL if conversion has failed | 
|---|
| 719 | * | 
|---|
| 720 | * 		This function must be called from an IRQ context with irq regs | 
|---|
| 721 | * 		initialized. | 
|---|
| 722 | */ | 
|---|
| 723 | int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) | 
|---|
| 724 | { | 
|---|
| 725 | return handle_irq_desc(desc: irq_resolve_mapping(domain, hwirq)); | 
|---|
| 726 | } | 
|---|
| 727 | EXPORT_SYMBOL_GPL(generic_handle_domain_irq); | 
|---|
| 728 |  | 
|---|
| 729 | /** | 
|---|
| 730 | * generic_handle_irq_safe - Invoke the handler for a HW irq belonging | 
|---|
| 731 | *			     to a domain from any context. | 
|---|
| 732 | * @domain:	The domain where to perform the lookup | 
|---|
| 733 | * @hwirq:	The HW irq number to convert to a logical one | 
|---|
| 734 | * | 
|---|
| 735 | * Returns:	0 on success, a negative value on error. | 
|---|
| 736 | * | 
|---|
| 737 | * This function can be called from any context (IRQ or process | 
|---|
| 738 | * context). If the interrupt is marked as 'enforce IRQ-context only' then | 
|---|
| 739 | * the function must be invoked from hard interrupt context. | 
|---|
| 740 | */ | 
|---|
| 741 | int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) | 
|---|
| 742 | { | 
|---|
| 743 | unsigned long flags; | 
|---|
| 744 | int ret; | 
|---|
| 745 |  | 
|---|
| 746 | local_irq_save(flags); | 
|---|
| 747 | ret = handle_irq_desc(desc: irq_resolve_mapping(domain, hwirq)); | 
|---|
| 748 | local_irq_restore(flags); | 
|---|
| 749 | return ret; | 
|---|
| 750 | } | 
|---|
| 751 | EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); | 
|---|
| 752 |  | 
|---|
| 753 | /** | 
|---|
| 754 | * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging | 
|---|
| 755 | *                             to a domain. | 
|---|
| 756 | * @domain:	The domain where to perform the lookup | 
|---|
| 757 | * @hwirq:	The HW irq number to convert to a logical one | 
|---|
| 758 | * | 
|---|
| 759 | * Returns:	0 on success, or -EINVAL if conversion has failed | 
|---|
| 760 | * | 
|---|
| 761 | * 		This function must be called from an NMI context with irq regs | 
|---|
| 762 | * 		initialized. | 
|---|
| 763 | **/ | 
|---|
| 764 | int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq) | 
|---|
| 765 | { | 
|---|
| 766 | WARN_ON_ONCE(!in_nmi()); | 
|---|
| 767 | return handle_irq_desc(desc: irq_resolve_mapping(domain, hwirq)); | 
|---|
| 768 | } | 
|---|
| 769 | #endif | 
|---|
| 770 |  | 
|---|
| 771 | /* Dynamic interrupt handling */ | 
|---|
| 772 |  | 
|---|
| 773 | /** | 
|---|
| 774 | * irq_free_descs - free irq descriptors | 
|---|
| 775 | * @from:	Start of descriptor range | 
|---|
| 776 | * @cnt:	Number of consecutive irqs to free | 
|---|
| 777 | */ | 
|---|
| 778 | void irq_free_descs(unsigned int from, unsigned int cnt) | 
|---|
| 779 | { | 
|---|
| 780 | int i; | 
|---|
| 781 |  | 
|---|
| 782 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | 
|---|
| 783 | return; | 
|---|
| 784 |  | 
|---|
| 785 | guard(mutex)(T: &sparse_irq_lock); | 
|---|
| 786 | for (i = 0; i < cnt; i++) | 
|---|
| 787 | free_desc(irq: from + i); | 
|---|
| 788 | } | 
|---|
| 789 | EXPORT_SYMBOL_GPL(irq_free_descs); | 
|---|
| 790 |  | 
|---|
| 791 | /** | 
|---|
| 792 | * __irq_alloc_descs - allocate and initialize a range of irq descriptors | 
|---|
| 793 | * @irq:	Allocate for specific irq number if irq >= 0 | 
|---|
| 794 | * @from:	Start the search from this irq number | 
|---|
| 795 | * @cnt:	Number of consecutive irqs to allocate. | 
|---|
| 796 | * @node:	Preferred node on which the irq descriptor should be allocated | 
|---|
| 797 | * @owner:	Owning module (can be NULL) | 
|---|
| 798 | * @affinity:	Optional pointer to an affinity mask array of size @cnt which | 
|---|
| 799 | *		hints where the irq descriptors should be allocated and which | 
|---|
| 800 | *		default affinities to use | 
|---|
| 801 | * | 
|---|
| 802 | * Returns the first irq number or error code | 
|---|
| 803 | */ | 
|---|
| 804 | int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 
|---|
| 805 | struct module *owner, const struct irq_affinity_desc *affinity) | 
|---|
| 806 | { | 
|---|
| 807 | int start; | 
|---|
| 808 |  | 
|---|
| 809 | if (!cnt) | 
|---|
| 810 | return -EINVAL; | 
|---|
| 811 |  | 
|---|
| 812 | if (irq >= 0) { | 
|---|
| 813 | if (from > irq) | 
|---|
| 814 | return -EINVAL; | 
|---|
| 815 | from = irq; | 
|---|
| 816 | } else { | 
|---|
| 817 | /* | 
|---|
| 818 | * For interrupts which are freely allocated the | 
|---|
| 819 | * architecture can force a lower bound to the @from | 
|---|
| 820 | * argument. x86 uses this to exclude the GSI space. | 
|---|
| 821 | */ | 
|---|
| 822 | from = arch_dynirq_lower_bound(from); | 
|---|
| 823 | } | 
|---|
| 824 |  | 
|---|
| 825 | guard(mutex)(T: &sparse_irq_lock); | 
|---|
| 826 |  | 
|---|
| 827 | start = irq_find_free_area(from, cnt); | 
|---|
| 828 | if (irq >=0 && start != irq) | 
|---|
| 829 | return -EEXIST; | 
|---|
| 830 |  | 
|---|
| 831 | if (start + cnt > nr_irqs) { | 
|---|
| 832 | if (!irq_expand_nr_irqs(nr: start + cnt)) | 
|---|
| 833 | return -ENOMEM; | 
|---|
| 834 | } | 
|---|
| 835 | return alloc_descs(start, cnt, node, affinity, owner); | 
|---|
| 836 | } | 
|---|
| 837 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); | 
|---|
| 838 |  | 
|---|
| 839 | /** | 
|---|
| 840 | * irq_get_next_irq - get next allocated irq number | 
|---|
| 841 | * @offset:	where to start the search | 
|---|
| 842 | * | 
|---|
| 843 | * Returns next irq number after offset or nr_irqs if none is found. | 
|---|
| 844 | */ | 
|---|
| 845 | unsigned int irq_get_next_irq(unsigned int offset) | 
|---|
| 846 | { | 
|---|
| 847 | return irq_find_at_or_after(offset); | 
|---|
| 848 | } | 
|---|
| 849 |  | 
|---|
| 850 | struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, | 
|---|
| 851 | unsigned int check) | 
|---|
| 852 | { | 
|---|
| 853 | struct irq_desc *desc; | 
|---|
| 854 |  | 
|---|
| 855 | desc = irq_to_desc(irq); | 
|---|
| 856 | if (!desc) | 
|---|
| 857 | return NULL; | 
|---|
| 858 |  | 
|---|
| 859 | if (check & _IRQ_DESC_CHECK) { | 
|---|
| 860 | if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) | 
|---|
| 861 | return NULL; | 
|---|
| 862 |  | 
|---|
| 863 | if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) | 
|---|
| 864 | return NULL; | 
|---|
| 865 | } | 
|---|
| 866 |  | 
|---|
| 867 | if (bus) | 
|---|
| 868 | chip_bus_lock(desc); | 
|---|
| 869 | raw_spin_lock_irqsave(&desc->lock, *flags); | 
|---|
| 870 |  | 
|---|
| 871 | return desc; | 
|---|
| 872 | } | 
|---|
| 873 |  | 
|---|
| 874 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | 
|---|
| 875 | __releases(&desc->lock) | 
|---|
| 876 | { | 
|---|
| 877 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 
|---|
| 878 | if (bus) | 
|---|
| 879 | chip_bus_sync_unlock(desc); | 
|---|
| 880 | } | 
|---|
| 881 |  | 
|---|
| 882 | int irq_set_percpu_devid_partition(unsigned int irq, | 
|---|
| 883 | const struct cpumask *affinity) | 
|---|
| 884 | { | 
|---|
| 885 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 886 |  | 
|---|
| 887 | if (!desc || desc->percpu_enabled) | 
|---|
| 888 | return -EINVAL; | 
|---|
| 889 |  | 
|---|
| 890 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); | 
|---|
| 891 |  | 
|---|
| 892 | if (!desc->percpu_enabled) | 
|---|
| 893 | return -ENOMEM; | 
|---|
| 894 |  | 
|---|
| 895 | desc->percpu_affinity = affinity ? : cpu_possible_mask; | 
|---|
| 896 |  | 
|---|
| 897 | irq_set_percpu_devid_flags(irq); | 
|---|
| 898 | return 0; | 
|---|
| 899 | } | 
|---|
| 900 |  | 
|---|
| 901 | int irq_set_percpu_devid(unsigned int irq) | 
|---|
| 902 | { | 
|---|
| 903 | return irq_set_percpu_devid_partition(irq, NULL); | 
|---|
| 904 | } | 
|---|
| 905 |  | 
|---|
| 906 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) | 
|---|
| 907 | { | 
|---|
| 908 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 909 |  | 
|---|
| 910 | if (!desc || !desc->percpu_enabled) | 
|---|
| 911 | return -EINVAL; | 
|---|
| 912 |  | 
|---|
| 913 | if (affinity) | 
|---|
| 914 | cpumask_copy(dstp: affinity, srcp: desc->percpu_affinity); | 
|---|
| 915 |  | 
|---|
| 916 | return 0; | 
|---|
| 917 | } | 
|---|
| 918 | EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); | 
|---|
| 919 |  | 
|---|
| 920 | void kstat_incr_irq_this_cpu(unsigned int irq) | 
|---|
| 921 | { | 
|---|
| 922 | kstat_incr_irqs_this_cpu(desc: irq_to_desc(irq)); | 
|---|
| 923 | } | 
|---|
| 924 |  | 
|---|
| 925 | /** | 
|---|
| 926 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu | 
|---|
| 927 | * @irq:	The interrupt number | 
|---|
| 928 | * @cpu:	The cpu number | 
|---|
| 929 | * | 
|---|
| 930 | * Returns the sum of interrupt counts on @cpu since boot for | 
|---|
| 931 | * @irq. The caller must ensure that the interrupt is not removed | 
|---|
| 932 | * concurrently. | 
|---|
| 933 | */ | 
|---|
| 934 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 
|---|
| 935 | { | 
|---|
| 936 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 937 |  | 
|---|
| 938 | return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; | 
|---|
| 939 | } | 
|---|
| 940 |  | 
|---|
| 941 | static unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask) | 
|---|
| 942 | { | 
|---|
| 943 | unsigned int sum = 0; | 
|---|
| 944 | int cpu; | 
|---|
| 945 |  | 
|---|
| 946 | if (!irq_settings_is_per_cpu_devid(desc) && | 
|---|
| 947 | !irq_settings_is_per_cpu(desc) && | 
|---|
| 948 | !irq_is_nmi(desc)) | 
|---|
| 949 | return data_race(desc->tot_count); | 
|---|
| 950 |  | 
|---|
| 951 | for_each_cpu(cpu, cpumask) | 
|---|
| 952 | sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu)); | 
|---|
| 953 | return sum; | 
|---|
| 954 | } | 
|---|
| 955 |  | 
|---|
| 956 | static unsigned int kstat_irqs(unsigned int irq) | 
|---|
| 957 | { | 
|---|
| 958 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 959 |  | 
|---|
| 960 | if (!desc || !desc->kstat_irqs) | 
|---|
| 961 | return 0; | 
|---|
| 962 | return kstat_irqs_desc(desc, cpu_possible_mask); | 
|---|
| 963 | } | 
|---|
| 964 |  | 
|---|
| 965 | #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT | 
|---|
| 966 |  | 
|---|
| 967 | void kstat_snapshot_irqs(void) | 
|---|
| 968 | { | 
|---|
| 969 | struct irq_desc *desc; | 
|---|
| 970 | unsigned int irq; | 
|---|
| 971 |  | 
|---|
| 972 | for_each_irq_desc(irq, desc) { | 
|---|
| 973 | if (!desc->kstat_irqs) | 
|---|
| 974 | continue; | 
|---|
| 975 | this_cpu_write(desc->kstat_irqs->ref, this_cpu_read(desc->kstat_irqs->cnt)); | 
|---|
| 976 | } | 
|---|
| 977 | } | 
|---|
| 978 |  | 
|---|
| 979 | unsigned int kstat_get_irq_since_snapshot(unsigned int irq) | 
|---|
| 980 | { | 
|---|
| 981 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 982 |  | 
|---|
| 983 | if (!desc || !desc->kstat_irqs) | 
|---|
| 984 | return 0; | 
|---|
| 985 | return this_cpu_read(desc->kstat_irqs->cnt) - this_cpu_read(desc->kstat_irqs->ref); | 
|---|
| 986 | } | 
|---|
| 987 |  | 
|---|
| 988 | #endif | 
|---|
| 989 |  | 
|---|
| 990 | /** | 
|---|
| 991 | * kstat_irqs_usr - Get the statistics for an interrupt from thread context | 
|---|
| 992 | * @irq:	The interrupt number | 
|---|
| 993 | * | 
|---|
| 994 | * Returns the sum of interrupt counts on all cpus since boot for @irq. | 
|---|
| 995 | * | 
|---|
| 996 | * It uses rcu to protect the access since a concurrent removal of an | 
|---|
| 997 | * interrupt descriptor is observing an rcu grace period before | 
|---|
| 998 | * delayed_free_desc()/irq_kobj_release(). | 
|---|
| 999 | */ | 
|---|
| 1000 | unsigned int kstat_irqs_usr(unsigned int irq) | 
|---|
| 1001 | { | 
|---|
| 1002 | unsigned int sum; | 
|---|
| 1003 |  | 
|---|
| 1004 | rcu_read_lock(); | 
|---|
| 1005 | sum = kstat_irqs(irq); | 
|---|
| 1006 | rcu_read_unlock(); | 
|---|
| 1007 | return sum; | 
|---|
| 1008 | } | 
|---|
| 1009 |  | 
|---|
| 1010 | #ifdef CONFIG_LOCKDEP | 
|---|
| 1011 | void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, | 
|---|
| 1012 | struct lock_class_key *request_class) | 
|---|
| 1013 | { | 
|---|
| 1014 | struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| 1015 |  | 
|---|
| 1016 | if (desc) { | 
|---|
| 1017 | lockdep_set_class(&desc->lock, lock_class); | 
|---|
| 1018 | lockdep_set_class(&desc->request_mutex, request_class); | 
|---|
| 1019 | } | 
|---|
| 1020 | } | 
|---|
| 1021 | EXPORT_SYMBOL_GPL(__irq_set_lockdep_class); | 
|---|
| 1022 | #endif | 
|---|
| 1023 |  | 
|---|