| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Local APIC related interfaces to support IOAPIC, MSI, etc. | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | 
|---|
| 6 | *	Moved from arch/x86/kernel/apic/io_apic.c. | 
|---|
| 7 | * Jiang Liu <jiang.liu@linux.intel.com> | 
|---|
| 8 | *	Enable support of hierarchical irqdomains | 
|---|
| 9 | */ | 
|---|
| 10 | #include <linux/interrupt.h> | 
|---|
| 11 | #include <linux/irq.h> | 
|---|
| 12 | #include <linux/seq_file.h> | 
|---|
| 13 | #include <linux/init.h> | 
|---|
| 14 | #include <linux/compiler.h> | 
|---|
| 15 | #include <linux/slab.h> | 
|---|
| 16 | #include <asm/irqdomain.h> | 
|---|
| 17 | #include <asm/hw_irq.h> | 
|---|
| 18 | #include <asm/traps.h> | 
|---|
| 19 | #include <asm/apic.h> | 
|---|
| 20 | #include <asm/i8259.h> | 
|---|
| 21 | #include <asm/desc.h> | 
|---|
| 22 | #include <asm/irq_remapping.h> | 
|---|
| 23 |  | 
|---|
| 24 | #include <asm/trace/irq_vectors.h> | 
|---|
| 25 |  | 
|---|
| 26 | struct apic_chip_data { | 
|---|
| 27 | struct irq_cfg		hw_irq_cfg; | 
|---|
| 28 | unsigned int		vector; | 
|---|
| 29 | unsigned int		prev_vector; | 
|---|
| 30 | unsigned int		cpu; | 
|---|
| 31 | unsigned int		prev_cpu; | 
|---|
| 32 | unsigned int		irq; | 
|---|
| 33 | struct hlist_node	clist; | 
|---|
| 34 | unsigned int		move_in_progress	: 1, | 
|---|
| 35 | is_managed		: 1, | 
|---|
| 36 | can_reserve		: 1, | 
|---|
| 37 | has_reserved		: 1; | 
|---|
| 38 | }; | 
|---|
| 39 |  | 
|---|
| 40 | struct irq_domain *x86_vector_domain; | 
|---|
| 41 | EXPORT_SYMBOL_GPL(x86_vector_domain); | 
|---|
| 42 | static DEFINE_RAW_SPINLOCK(vector_lock); | 
|---|
| 43 | static cpumask_var_t vector_searchmask; | 
|---|
| 44 | static struct irq_chip lapic_controller; | 
|---|
| 45 | static struct irq_matrix *vector_matrix; | 
|---|
| 46 | #ifdef CONFIG_SMP | 
|---|
| 47 |  | 
|---|
| 48 | static void vector_cleanup_callback(struct timer_list *tmr); | 
|---|
| 49 |  | 
|---|
| 50 | struct vector_cleanup { | 
|---|
| 51 | struct hlist_head	head; | 
|---|
| 52 | struct timer_list	timer; | 
|---|
| 53 | }; | 
|---|
| 54 |  | 
|---|
| 55 | static DEFINE_PER_CPU(struct vector_cleanup, vector_cleanup) = { | 
|---|
| 56 | .head	= HLIST_HEAD_INIT, | 
|---|
| 57 | .timer	= __TIMER_INITIALIZER(vector_cleanup_callback, TIMER_PINNED), | 
|---|
| 58 | }; | 
|---|
| 59 | #endif | 
|---|
| 60 |  | 
|---|
| 61 | void lock_vector_lock(void) | 
|---|
| 62 | { | 
|---|
| 63 | /* Used to the online set of cpus does not change | 
|---|
| 64 | * during assign_irq_vector. | 
|---|
| 65 | */ | 
|---|
| 66 | raw_spin_lock(&vector_lock); | 
|---|
| 67 | } | 
|---|
| 68 |  | 
|---|
| 69 | void unlock_vector_lock(void) | 
|---|
| 70 | { | 
|---|
| 71 | raw_spin_unlock(&vector_lock); | 
|---|
| 72 | } | 
|---|
| 73 |  | 
|---|
| 74 | void init_irq_alloc_info(struct irq_alloc_info *info, | 
|---|
| 75 | const struct cpumask *mask) | 
|---|
| 76 | { | 
|---|
| 77 | memset(s: info, c: 0, n: sizeof(*info)); | 
|---|
| 78 | info->mask = mask; | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) | 
|---|
| 82 | { | 
|---|
| 83 | if (src) | 
|---|
| 84 | *dst = *src; | 
|---|
| 85 | else | 
|---|
| 86 | memset(s: dst, c: 0, n: sizeof(*dst)); | 
|---|
| 87 | } | 
|---|
| 88 |  | 
|---|
| 89 | static struct apic_chip_data *apic_chip_data(struct irq_data *irqd) | 
|---|
| 90 | { | 
|---|
| 91 | if (!irqd) | 
|---|
| 92 | return NULL; | 
|---|
| 93 |  | 
|---|
| 94 | while (irqd->parent_data) | 
|---|
| 95 | irqd = irqd->parent_data; | 
|---|
| 96 |  | 
|---|
| 97 | return irqd->chip_data; | 
|---|
| 98 | } | 
|---|
| 99 |  | 
|---|
| 100 | struct irq_cfg *irqd_cfg(struct irq_data *irqd) | 
|---|
| 101 | { | 
|---|
| 102 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 103 |  | 
|---|
| 104 | return apicd ? &apicd->hw_irq_cfg : NULL; | 
|---|
| 105 | } | 
|---|
| 106 | EXPORT_SYMBOL_GPL(irqd_cfg); | 
|---|
| 107 |  | 
|---|
| 108 | struct irq_cfg *irq_cfg(unsigned int irq) | 
|---|
| 109 | { | 
|---|
| 110 | return irqd_cfg(irq_get_irq_data(irq)); | 
|---|
| 111 | } | 
|---|
| 112 |  | 
|---|
| 113 | static struct apic_chip_data *alloc_apic_chip_data(int node) | 
|---|
| 114 | { | 
|---|
| 115 | struct apic_chip_data *apicd; | 
|---|
| 116 |  | 
|---|
| 117 | apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node); | 
|---|
| 118 | if (apicd) | 
|---|
| 119 | INIT_HLIST_NODE(h: &apicd->clist); | 
|---|
| 120 | return apicd; | 
|---|
| 121 | } | 
|---|
| 122 |  | 
|---|
| 123 | static void free_apic_chip_data(struct apic_chip_data *apicd) | 
|---|
| 124 | { | 
|---|
| 125 | kfree(objp: apicd); | 
|---|
| 126 | } | 
|---|
| 127 |  | 
|---|
| 128 | static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector, | 
|---|
| 129 | unsigned int cpu) | 
|---|
| 130 | { | 
|---|
| 131 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 132 |  | 
|---|
| 133 | lockdep_assert_held(&vector_lock); | 
|---|
| 134 |  | 
|---|
| 135 | apicd->hw_irq_cfg.vector = vector; | 
|---|
| 136 | apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); | 
|---|
| 137 |  | 
|---|
| 138 | apic_update_vector(cpu, vector, set: true); | 
|---|
| 139 |  | 
|---|
| 140 | irq_data_update_effective_affinity(d: irqd, cpumask_of(cpu)); | 
|---|
| 141 | trace_vector_config(irq: irqd->irq, vector, cpu, apicdest: apicd->hw_irq_cfg.dest_apicid); | 
|---|
| 142 | } | 
|---|
| 143 |  | 
|---|
| 144 | static void apic_free_vector(unsigned int cpu, unsigned int vector, bool managed) | 
|---|
| 145 | { | 
|---|
| 146 | apic_update_vector(cpu, vector, set: false); | 
|---|
| 147 | irq_matrix_free(m: vector_matrix, cpu, bit: vector, managed); | 
|---|
| 148 | } | 
|---|
| 149 |  | 
|---|
| 150 | static void chip_data_update(struct irq_data *irqd, unsigned int newvec, unsigned int newcpu) | 
|---|
| 151 | { | 
|---|
| 152 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 153 | struct irq_desc *desc = irq_data_to_desc(data: irqd); | 
|---|
| 154 | bool managed = irqd_affinity_is_managed(d: irqd); | 
|---|
| 155 |  | 
|---|
| 156 | lockdep_assert_held(&vector_lock); | 
|---|
| 157 |  | 
|---|
| 158 | trace_vector_update(irq: irqd->irq, vector: newvec, cpu: newcpu, prev_vector: apicd->vector, | 
|---|
| 159 | prev_cpu: apicd->cpu); | 
|---|
| 160 |  | 
|---|
| 161 | /* | 
|---|
| 162 | * If there is no vector associated or if the associated vector is | 
|---|
| 163 | * the shutdown vector, which is associated to make PCI/MSI | 
|---|
| 164 | * shutdown mode work, then there is nothing to release. Clear out | 
|---|
| 165 | * prev_vector for this and the offlined target case. | 
|---|
| 166 | */ | 
|---|
| 167 | apicd->prev_vector = 0; | 
|---|
| 168 | if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) | 
|---|
| 169 | goto setnew; | 
|---|
| 170 | /* | 
|---|
| 171 | * If the target CPU of the previous vector is online, then mark | 
|---|
| 172 | * the vector as move in progress and store it for cleanup when the | 
|---|
| 173 | * first interrupt on the new vector arrives. If the target CPU is | 
|---|
| 174 | * offline then the regular release mechanism via the cleanup | 
|---|
| 175 | * vector is not possible and the vector can be immediately freed | 
|---|
| 176 | * in the underlying matrix allocator. | 
|---|
| 177 | */ | 
|---|
| 178 | if (cpu_online(cpu: apicd->cpu)) { | 
|---|
| 179 | apicd->move_in_progress = true; | 
|---|
| 180 | apicd->prev_vector = apicd->vector; | 
|---|
| 181 | apicd->prev_cpu = apicd->cpu; | 
|---|
| 182 | WARN_ON_ONCE(apicd->cpu == newcpu); | 
|---|
| 183 | } else { | 
|---|
| 184 | apic_free_vector(cpu: apicd->cpu, vector: apicd->vector, managed); | 
|---|
| 185 | } | 
|---|
| 186 |  | 
|---|
| 187 | setnew: | 
|---|
| 188 | apicd->vector = newvec; | 
|---|
| 189 | apicd->cpu = newcpu; | 
|---|
| 190 | BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); | 
|---|
| 191 | per_cpu(vector_irq, newcpu)[newvec] = desc; | 
|---|
| 192 | apic_update_irq_cfg(irqd, vector: newvec, cpu: newcpu); | 
|---|
| 193 | } | 
|---|
| 194 |  | 
|---|
| 195 | static void vector_assign_managed_shutdown(struct irq_data *irqd) | 
|---|
| 196 | { | 
|---|
| 197 | unsigned int cpu = cpumask_first(cpu_online_mask); | 
|---|
| 198 |  | 
|---|
| 199 | apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu); | 
|---|
| 200 | } | 
|---|
| 201 |  | 
|---|
| 202 | static int reserve_managed_vector(struct irq_data *irqd) | 
|---|
| 203 | { | 
|---|
| 204 | const struct cpumask *affmsk = irq_data_get_affinity_mask(d: irqd); | 
|---|
| 205 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 206 | unsigned long flags; | 
|---|
| 207 | int ret; | 
|---|
| 208 |  | 
|---|
| 209 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 210 | apicd->is_managed = true; | 
|---|
| 211 | ret = irq_matrix_reserve_managed(m: vector_matrix, msk: affmsk); | 
|---|
| 212 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 213 | trace_vector_reserve_managed(irq: irqd->irq, ret); | 
|---|
| 214 | return ret; | 
|---|
| 215 | } | 
|---|
| 216 |  | 
|---|
| 217 | static void reserve_irq_vector_locked(struct irq_data *irqd) | 
|---|
| 218 | { | 
|---|
| 219 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 220 |  | 
|---|
| 221 | irq_matrix_reserve(m: vector_matrix); | 
|---|
| 222 | apicd->can_reserve = true; | 
|---|
| 223 | apicd->has_reserved = true; | 
|---|
| 224 | irqd_set_can_reserve(d: irqd); | 
|---|
| 225 | trace_vector_reserve(irq: irqd->irq, ret: 0); | 
|---|
| 226 | vector_assign_managed_shutdown(irqd); | 
|---|
| 227 | } | 
|---|
| 228 |  | 
|---|
| 229 | static int reserve_irq_vector(struct irq_data *irqd) | 
|---|
| 230 | { | 
|---|
| 231 | unsigned long flags; | 
|---|
| 232 |  | 
|---|
| 233 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 234 | reserve_irq_vector_locked(irqd); | 
|---|
| 235 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 236 | return 0; | 
|---|
| 237 | } | 
|---|
| 238 |  | 
|---|
| 239 | static int | 
|---|
| 240 | assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest) | 
|---|
| 241 | { | 
|---|
| 242 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 243 | bool resvd = apicd->has_reserved; | 
|---|
| 244 | unsigned int cpu = apicd->cpu; | 
|---|
| 245 | int vector = apicd->vector; | 
|---|
| 246 |  | 
|---|
| 247 | lockdep_assert_held(&vector_lock); | 
|---|
| 248 |  | 
|---|
| 249 | /* | 
|---|
| 250 | * If the current target CPU is online and in the new requested | 
|---|
| 251 | * affinity mask, there is no point in moving the interrupt from | 
|---|
| 252 | * one CPU to another. | 
|---|
| 253 | */ | 
|---|
| 254 | if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, cpumask: dest)) | 
|---|
| 255 | return 0; | 
|---|
| 256 |  | 
|---|
| 257 | /* | 
|---|
| 258 | * Careful here. @apicd might either have move_in_progress set or | 
|---|
| 259 | * be enqueued for cleanup. Assigning a new vector would either | 
|---|
| 260 | * leave a stale vector on some CPU around or in case of a pending | 
|---|
| 261 | * cleanup corrupt the hlist. | 
|---|
| 262 | */ | 
|---|
| 263 | if (apicd->move_in_progress || !hlist_unhashed(h: &apicd->clist)) | 
|---|
| 264 | return -EBUSY; | 
|---|
| 265 |  | 
|---|
| 266 | vector = irq_matrix_alloc(m: vector_matrix, msk: dest, reserved: resvd, mapped_cpu: &cpu); | 
|---|
| 267 | trace_vector_alloc(irq: irqd->irq, vector, reserved: resvd, ret: vector); | 
|---|
| 268 | if (vector < 0) | 
|---|
| 269 | return vector; | 
|---|
| 270 | chip_data_update(irqd, newvec: vector, newcpu: cpu); | 
|---|
| 271 |  | 
|---|
| 272 | return 0; | 
|---|
| 273 | } | 
|---|
| 274 |  | 
|---|
| 275 | static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest) | 
|---|
| 276 | { | 
|---|
| 277 | unsigned long flags; | 
|---|
| 278 | int ret; | 
|---|
| 279 |  | 
|---|
| 280 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 281 | cpumask_and(dstp: vector_searchmask, src1p: dest, cpu_online_mask); | 
|---|
| 282 | ret = assign_vector_locked(irqd, dest: vector_searchmask); | 
|---|
| 283 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 284 | return ret; | 
|---|
| 285 | } | 
|---|
| 286 |  | 
|---|
| 287 | static int assign_irq_vector_any_locked(struct irq_data *irqd) | 
|---|
| 288 | { | 
|---|
| 289 | /* Get the affinity mask - either irq_default_affinity or (user) set */ | 
|---|
| 290 | const struct cpumask *affmsk = irq_data_get_affinity_mask(d: irqd); | 
|---|
| 291 | int node = irq_data_get_node(d: irqd); | 
|---|
| 292 |  | 
|---|
| 293 | if (node != NUMA_NO_NODE) { | 
|---|
| 294 | /* Try the intersection of @affmsk and node mask */ | 
|---|
| 295 | cpumask_and(dstp: vector_searchmask, src1p: cpumask_of_node(node), src2p: affmsk); | 
|---|
| 296 | if (!assign_vector_locked(irqd, dest: vector_searchmask)) | 
|---|
| 297 | return 0; | 
|---|
| 298 | } | 
|---|
| 299 |  | 
|---|
| 300 | /* Try the full affinity mask */ | 
|---|
| 301 | cpumask_and(dstp: vector_searchmask, src1p: affmsk, cpu_online_mask); | 
|---|
| 302 | if (!assign_vector_locked(irqd, dest: vector_searchmask)) | 
|---|
| 303 | return 0; | 
|---|
| 304 |  | 
|---|
| 305 | if (node != NUMA_NO_NODE) { | 
|---|
| 306 | /* Try the node mask */ | 
|---|
| 307 | if (!assign_vector_locked(irqd, dest: cpumask_of_node(node))) | 
|---|
| 308 | return 0; | 
|---|
| 309 | } | 
|---|
| 310 |  | 
|---|
| 311 | /* Try the full online mask */ | 
|---|
| 312 | return assign_vector_locked(irqd, cpu_online_mask); | 
|---|
| 313 | } | 
|---|
| 314 |  | 
|---|
| 315 | static int | 
|---|
| 316 | assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info) | 
|---|
| 317 | { | 
|---|
| 318 | if (irqd_affinity_is_managed(d: irqd)) | 
|---|
| 319 | return reserve_managed_vector(irqd); | 
|---|
| 320 | if (info->mask) | 
|---|
| 321 | return assign_irq_vector(irqd, dest: info->mask); | 
|---|
| 322 | /* | 
|---|
| 323 | * Make only a global reservation with no guarantee. A real vector | 
|---|
| 324 | * is associated at activation time. | 
|---|
| 325 | */ | 
|---|
| 326 | return reserve_irq_vector(irqd); | 
|---|
| 327 | } | 
|---|
| 328 |  | 
|---|
| 329 | static int | 
|---|
| 330 | assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) | 
|---|
| 331 | { | 
|---|
| 332 | const struct cpumask *affmsk = irq_data_get_affinity_mask(d: irqd); | 
|---|
| 333 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 334 | int vector, cpu; | 
|---|
| 335 |  | 
|---|
| 336 | cpumask_and(dstp: vector_searchmask, src1p: dest, src2p: affmsk); | 
|---|
| 337 |  | 
|---|
| 338 | /* set_affinity might call here for nothing */ | 
|---|
| 339 | if (apicd->vector && cpumask_test_cpu(cpu: apicd->cpu, cpumask: vector_searchmask)) | 
|---|
| 340 | return 0; | 
|---|
| 341 | vector = irq_matrix_alloc_managed(m: vector_matrix, msk: vector_searchmask, | 
|---|
| 342 | mapped_cpu: &cpu); | 
|---|
| 343 | trace_vector_alloc_managed(irq: irqd->irq, vector, ret: vector); | 
|---|
| 344 | if (vector < 0) | 
|---|
| 345 | return vector; | 
|---|
| 346 | chip_data_update(irqd, newvec: vector, newcpu: cpu); | 
|---|
| 347 |  | 
|---|
| 348 | return 0; | 
|---|
| 349 | } | 
|---|
| 350 |  | 
|---|
| 351 | static void clear_irq_vector(struct irq_data *irqd) | 
|---|
| 352 | { | 
|---|
| 353 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 354 | bool managed = irqd_affinity_is_managed(d: irqd); | 
|---|
| 355 | unsigned int vector = apicd->vector; | 
|---|
| 356 |  | 
|---|
| 357 | lockdep_assert_held(&vector_lock); | 
|---|
| 358 |  | 
|---|
| 359 | if (!vector) | 
|---|
| 360 | return; | 
|---|
| 361 |  | 
|---|
| 362 | trace_vector_clear(irq: irqd->irq, vector, cpu: apicd->cpu, prev_vector: apicd->prev_vector, | 
|---|
| 363 | prev_cpu: apicd->prev_cpu); | 
|---|
| 364 |  | 
|---|
| 365 | per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; | 
|---|
| 366 | apic_free_vector(cpu: apicd->cpu, vector, managed); | 
|---|
| 367 | apicd->vector = 0; | 
|---|
| 368 |  | 
|---|
| 369 | /* Clean up move in progress */ | 
|---|
| 370 | vector = apicd->prev_vector; | 
|---|
| 371 | if (!vector) | 
|---|
| 372 | return; | 
|---|
| 373 |  | 
|---|
| 374 | per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; | 
|---|
| 375 | apic_free_vector(cpu: apicd->prev_cpu, vector, managed); | 
|---|
| 376 | apicd->prev_vector = 0; | 
|---|
| 377 | apicd->move_in_progress = 0; | 
|---|
| 378 | hlist_del_init(n: &apicd->clist); | 
|---|
| 379 | } | 
|---|
| 380 |  | 
|---|
| 381 | static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd) | 
|---|
| 382 | { | 
|---|
| 383 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 384 | unsigned long flags; | 
|---|
| 385 |  | 
|---|
| 386 | trace_vector_deactivate(irq: irqd->irq, is_managed: apicd->is_managed, | 
|---|
| 387 | can_reserve: apicd->can_reserve, reserve: false); | 
|---|
| 388 |  | 
|---|
| 389 | /* Regular fixed assigned interrupt */ | 
|---|
| 390 | if (!apicd->is_managed && !apicd->can_reserve) | 
|---|
| 391 | return; | 
|---|
| 392 | /* If the interrupt has a global reservation, nothing to do */ | 
|---|
| 393 | if (apicd->has_reserved) | 
|---|
| 394 | return; | 
|---|
| 395 |  | 
|---|
| 396 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 397 | clear_irq_vector(irqd); | 
|---|
| 398 | if (apicd->can_reserve) | 
|---|
| 399 | reserve_irq_vector_locked(irqd); | 
|---|
| 400 | else | 
|---|
| 401 | vector_assign_managed_shutdown(irqd); | 
|---|
| 402 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 403 | } | 
|---|
| 404 |  | 
|---|
| 405 | static int activate_reserved(struct irq_data *irqd) | 
|---|
| 406 | { | 
|---|
| 407 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 408 | int ret; | 
|---|
| 409 |  | 
|---|
| 410 | ret = assign_irq_vector_any_locked(irqd); | 
|---|
| 411 | if (!ret) { | 
|---|
| 412 | apicd->has_reserved = false; | 
|---|
| 413 | /* | 
|---|
| 414 | * Core might have disabled reservation mode after | 
|---|
| 415 | * allocating the irq descriptor. Ideally this should | 
|---|
| 416 | * happen before allocation time, but that would require | 
|---|
| 417 | * completely convoluted ways of transporting that | 
|---|
| 418 | * information. | 
|---|
| 419 | */ | 
|---|
| 420 | if (!irqd_can_reserve(d: irqd)) | 
|---|
| 421 | apicd->can_reserve = false; | 
|---|
| 422 | } | 
|---|
| 423 |  | 
|---|
| 424 | /* | 
|---|
| 425 | * Check to ensure that the effective affinity mask is a subset | 
|---|
| 426 | * the user supplied affinity mask, and warn the user if it is not | 
|---|
| 427 | */ | 
|---|
| 428 | if (!cpumask_subset(src1p: irq_data_get_effective_affinity_mask(d: irqd), | 
|---|
| 429 | src2p: irq_data_get_affinity_mask(d: irqd))) { | 
|---|
| 430 | pr_warn( "irq %u: Affinity broken due to vector space exhaustion.\n", | 
|---|
| 431 | irqd->irq); | 
|---|
| 432 | } | 
|---|
| 433 |  | 
|---|
| 434 | return ret; | 
|---|
| 435 | } | 
|---|
| 436 |  | 
|---|
| 437 | static int activate_managed(struct irq_data *irqd) | 
|---|
| 438 | { | 
|---|
| 439 | const struct cpumask *dest = irq_data_get_affinity_mask(d: irqd); | 
|---|
| 440 | int ret; | 
|---|
| 441 |  | 
|---|
| 442 | cpumask_and(dstp: vector_searchmask, src1p: dest, cpu_online_mask); | 
|---|
| 443 | if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { | 
|---|
| 444 | /* Something in the core code broke! Survive gracefully */ | 
|---|
| 445 | pr_err( "Managed startup for irq %u, but no CPU\n", irqd->irq); | 
|---|
| 446 | return -EINVAL; | 
|---|
| 447 | } | 
|---|
| 448 |  | 
|---|
| 449 | ret = assign_managed_vector(irqd, dest: vector_searchmask); | 
|---|
| 450 | /* | 
|---|
| 451 | * This should not happen. The vector reservation got buggered.  Handle | 
|---|
| 452 | * it gracefully. | 
|---|
| 453 | */ | 
|---|
| 454 | if (WARN_ON_ONCE(ret < 0)) { | 
|---|
| 455 | pr_err( "Managed startup irq %u, no vector available\n", | 
|---|
| 456 | irqd->irq); | 
|---|
| 457 | } | 
|---|
| 458 | return ret; | 
|---|
| 459 | } | 
|---|
| 460 |  | 
|---|
| 461 | static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, | 
|---|
| 462 | bool reserve) | 
|---|
| 463 | { | 
|---|
| 464 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 465 | unsigned long flags; | 
|---|
| 466 | int ret = 0; | 
|---|
| 467 |  | 
|---|
| 468 | trace_vector_activate(irq: irqd->irq, is_managed: apicd->is_managed, | 
|---|
| 469 | can_reserve: apicd->can_reserve, reserve); | 
|---|
| 470 |  | 
|---|
| 471 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 472 | if (!apicd->can_reserve && !apicd->is_managed) | 
|---|
| 473 | assign_irq_vector_any_locked(irqd); | 
|---|
| 474 | else if (reserve || irqd_is_managed_and_shutdown(d: irqd)) | 
|---|
| 475 | vector_assign_managed_shutdown(irqd); | 
|---|
| 476 | else if (apicd->is_managed) | 
|---|
| 477 | ret = activate_managed(irqd); | 
|---|
| 478 | else if (apicd->has_reserved) | 
|---|
| 479 | ret = activate_reserved(irqd); | 
|---|
| 480 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 481 | return ret; | 
|---|
| 482 | } | 
|---|
| 483 |  | 
|---|
| 484 | static void vector_free_reserved_and_managed(struct irq_data *irqd) | 
|---|
| 485 | { | 
|---|
| 486 | const struct cpumask *dest = irq_data_get_affinity_mask(d: irqd); | 
|---|
| 487 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 488 |  | 
|---|
| 489 | trace_vector_teardown(irq: irqd->irq, is_managed: apicd->is_managed, | 
|---|
| 490 | has_reserved: apicd->has_reserved); | 
|---|
| 491 |  | 
|---|
| 492 | if (apicd->has_reserved) | 
|---|
| 493 | irq_matrix_remove_reserved(m: vector_matrix); | 
|---|
| 494 | if (apicd->is_managed) | 
|---|
| 495 | irq_matrix_remove_managed(m: vector_matrix, msk: dest); | 
|---|
| 496 | } | 
|---|
| 497 |  | 
|---|
| 498 | static void x86_vector_free_irqs(struct irq_domain *domain, | 
|---|
| 499 | unsigned int virq, unsigned int nr_irqs) | 
|---|
| 500 | { | 
|---|
| 501 | struct apic_chip_data *apicd; | 
|---|
| 502 | struct irq_data *irqd; | 
|---|
| 503 | unsigned long flags; | 
|---|
| 504 | int i; | 
|---|
| 505 |  | 
|---|
| 506 | for (i = 0; i < nr_irqs; i++) { | 
|---|
| 507 | irqd = irq_domain_get_irq_data(domain: x86_vector_domain, virq: virq + i); | 
|---|
| 508 | if (irqd && irqd->chip_data) { | 
|---|
| 509 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 510 | clear_irq_vector(irqd); | 
|---|
| 511 | vector_free_reserved_and_managed(irqd); | 
|---|
| 512 | apicd = irqd->chip_data; | 
|---|
| 513 | irq_domain_reset_irq_data(irq_data: irqd); | 
|---|
| 514 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 515 | free_apic_chip_data(apicd); | 
|---|
| 516 | } | 
|---|
| 517 | } | 
|---|
| 518 | } | 
|---|
| 519 |  | 
|---|
| 520 | static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd, | 
|---|
| 521 | struct apic_chip_data *apicd) | 
|---|
| 522 | { | 
|---|
| 523 | unsigned long flags; | 
|---|
| 524 | bool realloc = false; | 
|---|
| 525 |  | 
|---|
| 526 | apicd->vector = ISA_IRQ_VECTOR(virq); | 
|---|
| 527 | apicd->cpu = 0; | 
|---|
| 528 |  | 
|---|
| 529 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 530 | /* | 
|---|
| 531 | * If the interrupt is activated, then it must stay at this vector | 
|---|
| 532 | * position. That's usually the timer interrupt (0). | 
|---|
| 533 | */ | 
|---|
| 534 | if (irqd_is_activated(d: irqd)) { | 
|---|
| 535 | trace_vector_setup(irq: virq, is_legacy: true, ret: 0); | 
|---|
| 536 | apic_update_irq_cfg(irqd, vector: apicd->vector, cpu: apicd->cpu); | 
|---|
| 537 | } else { | 
|---|
| 538 | /* Release the vector */ | 
|---|
| 539 | apicd->can_reserve = true; | 
|---|
| 540 | irqd_set_can_reserve(d: irqd); | 
|---|
| 541 | clear_irq_vector(irqd); | 
|---|
| 542 | realloc = true; | 
|---|
| 543 | } | 
|---|
| 544 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 545 | return realloc; | 
|---|
| 546 | } | 
|---|
| 547 |  | 
|---|
| 548 | static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | 
|---|
| 549 | unsigned int nr_irqs, void *arg) | 
|---|
| 550 | { | 
|---|
| 551 | struct irq_alloc_info *info = arg; | 
|---|
| 552 | struct apic_chip_data *apicd; | 
|---|
| 553 | struct irq_data *irqd; | 
|---|
| 554 | int i, err, node; | 
|---|
| 555 |  | 
|---|
| 556 | if (apic_is_disabled) | 
|---|
| 557 | return -ENXIO; | 
|---|
| 558 |  | 
|---|
| 559 | /* | 
|---|
| 560 | * Catch any attempt to touch the cascade interrupt on a PIC | 
|---|
| 561 | * equipped system. | 
|---|
| 562 | */ | 
|---|
| 563 | if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY && | 
|---|
| 564 | virq == PIC_CASCADE_IR)) | 
|---|
| 565 | return -EINVAL; | 
|---|
| 566 |  | 
|---|
| 567 | for (i = 0; i < nr_irqs; i++) { | 
|---|
| 568 | irqd = irq_domain_get_irq_data(domain, virq: virq + i); | 
|---|
| 569 | BUG_ON(!irqd); | 
|---|
| 570 | node = irq_data_get_node(d: irqd); | 
|---|
| 571 | WARN_ON_ONCE(irqd->chip_data); | 
|---|
| 572 | apicd = alloc_apic_chip_data(node); | 
|---|
| 573 | if (!apicd) { | 
|---|
| 574 | err = -ENOMEM; | 
|---|
| 575 | goto error; | 
|---|
| 576 | } | 
|---|
| 577 |  | 
|---|
| 578 | apicd->irq = virq + i; | 
|---|
| 579 | irqd->chip = &lapic_controller; | 
|---|
| 580 | irqd->chip_data = apicd; | 
|---|
| 581 | irqd->hwirq = virq + i; | 
|---|
| 582 | irqd_set_single_target(d: irqd); | 
|---|
| 583 | /* | 
|---|
| 584 | * Prevent that any of these interrupts is invoked in | 
|---|
| 585 | * non interrupt context via e.g. generic_handle_irq() | 
|---|
| 586 | * as that can corrupt the affinity move state. | 
|---|
| 587 | */ | 
|---|
| 588 | irqd_set_handle_enforce_irqctx(d: irqd); | 
|---|
| 589 |  | 
|---|
| 590 | /* Don't invoke affinity setter on deactivated interrupts */ | 
|---|
| 591 | irqd_set_affinity_on_activate(d: irqd); | 
|---|
| 592 |  | 
|---|
| 593 | /* | 
|---|
| 594 | * Legacy vectors are already assigned when the IOAPIC | 
|---|
| 595 | * takes them over. They stay on the same vector. This is | 
|---|
| 596 | * required for check_timer() to work correctly as it might | 
|---|
| 597 | * switch back to legacy mode. Only update the hardware | 
|---|
| 598 | * config. | 
|---|
| 599 | */ | 
|---|
| 600 | if (info->flags & X86_IRQ_ALLOC_LEGACY) { | 
|---|
| 601 | if (!vector_configure_legacy(virq: virq + i, irqd, apicd)) | 
|---|
| 602 | continue; | 
|---|
| 603 | } | 
|---|
| 604 |  | 
|---|
| 605 | err = assign_irq_vector_policy(irqd, info); | 
|---|
| 606 | trace_vector_setup(irq: virq + i, is_legacy: false, ret: err); | 
|---|
| 607 | if (err) { | 
|---|
| 608 | irqd->chip_data = NULL; | 
|---|
| 609 | free_apic_chip_data(apicd); | 
|---|
| 610 | goto error; | 
|---|
| 611 | } | 
|---|
| 612 | } | 
|---|
| 613 |  | 
|---|
| 614 | return 0; | 
|---|
| 615 |  | 
|---|
| 616 | error: | 
|---|
| 617 | x86_vector_free_irqs(domain, virq, nr_irqs: i); | 
|---|
| 618 | return err; | 
|---|
| 619 | } | 
|---|
| 620 |  | 
|---|
| 621 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS | 
|---|
| 622 | static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, | 
|---|
| 623 | struct irq_data *irqd, int ind) | 
|---|
| 624 | { | 
|---|
| 625 | struct apic_chip_data apicd; | 
|---|
| 626 | unsigned long flags; | 
|---|
| 627 | int irq; | 
|---|
| 628 |  | 
|---|
| 629 | if (!irqd) { | 
|---|
| 630 | irq_matrix_debug_show(m, vector_matrix, ind); | 
|---|
| 631 | return; | 
|---|
| 632 | } | 
|---|
| 633 |  | 
|---|
| 634 | irq = irqd->irq; | 
|---|
| 635 | if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) { | 
|---|
| 636 | seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq)); | 
|---|
| 637 | seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, ""); | 
|---|
| 638 | return; | 
|---|
| 639 | } | 
|---|
| 640 |  | 
|---|
| 641 | if (!irqd->chip_data) { | 
|---|
| 642 | seq_printf(m, "%*sVector: Not assigned\n", ind, ""); | 
|---|
| 643 | return; | 
|---|
| 644 | } | 
|---|
| 645 |  | 
|---|
| 646 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 647 | memcpy(&apicd, irqd->chip_data, sizeof(apicd)); | 
|---|
| 648 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 649 |  | 
|---|
| 650 | seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector); | 
|---|
| 651 | seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); | 
|---|
| 652 | if (apicd.prev_vector) { | 
|---|
| 653 | seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); | 
|---|
| 654 | seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); | 
|---|
| 655 | } | 
|---|
| 656 | seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); | 
|---|
| 657 | seq_printf(m, "%*sis_managed:       %u\n", ind, "", apicd.is_managed ? 1 : 0); | 
|---|
| 658 | seq_printf(m, "%*scan_reserve:      %u\n", ind, "", apicd.can_reserve ? 1 : 0); | 
|---|
| 659 | seq_printf(m, "%*shas_reserved:     %u\n", ind, "", apicd.has_reserved ? 1 : 0); | 
|---|
| 660 | seq_printf(m, "%*scleanup_pending:  %u\n", ind, "", !hlist_unhashed(&apicd.clist)); | 
|---|
| 661 | } | 
|---|
| 662 | #endif | 
|---|
| 663 |  | 
|---|
| 664 | int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec) | 
|---|
| 665 | { | 
|---|
| 666 | if (fwspec->param_count != 1) | 
|---|
| 667 | return 0; | 
|---|
| 668 |  | 
|---|
| 669 | if (is_fwnode_irqchip(fwnode: fwspec->fwnode)) { | 
|---|
| 670 | const char *fwname = fwnode_get_name(fwnode: fwspec->fwnode); | 
|---|
| 671 | return fwname && !strncmp(fwname, "IO-APIC-", 8) && | 
|---|
| 672 | simple_strtol(fwname+8, NULL, 10) == fwspec->param[0]; | 
|---|
| 673 | } | 
|---|
| 674 | return to_of_node(fwnode: fwspec->fwnode) && | 
|---|
| 675 | of_device_is_compatible(device: to_of_node(fwnode: fwspec->fwnode), | 
|---|
| 676 | name: "intel,ce4100-ioapic"); | 
|---|
| 677 | } | 
|---|
| 678 |  | 
|---|
| 679 | int x86_fwspec_is_hpet(struct irq_fwspec *fwspec) | 
|---|
| 680 | { | 
|---|
| 681 | if (fwspec->param_count != 1) | 
|---|
| 682 | return 0; | 
|---|
| 683 |  | 
|---|
| 684 | if (is_fwnode_irqchip(fwnode: fwspec->fwnode)) { | 
|---|
| 685 | const char *fwname = fwnode_get_name(fwnode: fwspec->fwnode); | 
|---|
| 686 | return fwname && !strncmp(fwname, "HPET-MSI-", 9) && | 
|---|
| 687 | simple_strtol(fwname+9, NULL, 10) == fwspec->param[0]; | 
|---|
| 688 | } | 
|---|
| 689 | return 0; | 
|---|
| 690 | } | 
|---|
| 691 |  | 
|---|
| 692 | static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec, | 
|---|
| 693 | enum irq_domain_bus_token bus_token) | 
|---|
| 694 | { | 
|---|
| 695 | /* | 
|---|
| 696 | * HPET and I/OAPIC cannot be parented in the vector domain | 
|---|
| 697 | * if IRQ remapping is enabled. APIC IDs above 15 bits are | 
|---|
| 698 | * only permitted if IRQ remapping is enabled, so check that. | 
|---|
| 699 | */ | 
|---|
| 700 | if (apic_id_valid(apic_id: 32768)) | 
|---|
| 701 | return 0; | 
|---|
| 702 |  | 
|---|
| 703 | return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec); | 
|---|
| 704 | } | 
|---|
| 705 |  | 
|---|
| 706 | static const struct irq_domain_ops x86_vector_domain_ops = { | 
|---|
| 707 | .select		= x86_vector_select, | 
|---|
| 708 | .alloc		= x86_vector_alloc_irqs, | 
|---|
| 709 | .free		= x86_vector_free_irqs, | 
|---|
| 710 | .activate	= x86_vector_activate, | 
|---|
| 711 | .deactivate	= x86_vector_deactivate, | 
|---|
| 712 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS | 
|---|
| 713 | .debug_show	= x86_vector_debug_show, | 
|---|
| 714 | #endif | 
|---|
| 715 | }; | 
|---|
| 716 |  | 
|---|
| 717 | int __init arch_probe_nr_irqs(void) | 
|---|
| 718 | { | 
|---|
| 719 | int nr; | 
|---|
| 720 |  | 
|---|
| 721 | if (irq_get_nr_irqs() > NR_VECTORS * nr_cpu_ids) | 
|---|
| 722 | irq_set_nr_irqs(NR_VECTORS * nr_cpu_ids); | 
|---|
| 723 |  | 
|---|
| 724 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | 
|---|
| 725 | #if defined(CONFIG_PCI_MSI) | 
|---|
| 726 | /* | 
|---|
| 727 | * for MSI and HT dyn irq | 
|---|
| 728 | */ | 
|---|
| 729 | if (gsi_top <= NR_IRQS_LEGACY) | 
|---|
| 730 | nr +=  8 * nr_cpu_ids; | 
|---|
| 731 | else | 
|---|
| 732 | nr += gsi_top * 16; | 
|---|
| 733 | #endif | 
|---|
| 734 | if (nr < irq_get_nr_irqs()) | 
|---|
| 735 | irq_set_nr_irqs(nr); | 
|---|
| 736 |  | 
|---|
| 737 | /* | 
|---|
| 738 | * We don't know if PIC is present at this point so we need to do | 
|---|
| 739 | * probe() to get the right number of legacy IRQs. | 
|---|
| 740 | */ | 
|---|
| 741 | return legacy_pic->probe(); | 
|---|
| 742 | } | 
|---|
| 743 |  | 
|---|
| 744 | void lapic_assign_legacy_vector(unsigned int irq, bool replace) | 
|---|
| 745 | { | 
|---|
| 746 | /* | 
|---|
| 747 | * Use assign system here so it won't get accounted as allocated | 
|---|
| 748 | * and movable in the cpu hotplug check and it prevents managed | 
|---|
| 749 | * irq reservation from touching it. | 
|---|
| 750 | */ | 
|---|
| 751 | irq_matrix_assign_system(m: vector_matrix, ISA_IRQ_VECTOR(irq), replace); | 
|---|
| 752 | } | 
|---|
| 753 |  | 
|---|
| 754 | void __init lapic_update_legacy_vectors(void) | 
|---|
| 755 | { | 
|---|
| 756 | unsigned int i; | 
|---|
| 757 |  | 
|---|
| 758 | if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0) | 
|---|
| 759 | return; | 
|---|
| 760 |  | 
|---|
| 761 | /* | 
|---|
| 762 | * If the IO/APIC is disabled via config, kernel command line or | 
|---|
| 763 | * lack of enumeration then all legacy interrupts are routed | 
|---|
| 764 | * through the PIC. Make sure that they are marked as legacy | 
|---|
| 765 | * vectors. PIC_CASCADE_IRQ has already been marked in | 
|---|
| 766 | * lapic_assign_system_vectors(). | 
|---|
| 767 | */ | 
|---|
| 768 | for (i = 0; i < nr_legacy_irqs(); i++) { | 
|---|
| 769 | if (i != PIC_CASCADE_IR) | 
|---|
| 770 | lapic_assign_legacy_vector(irq: i, replace: true); | 
|---|
| 771 | } | 
|---|
| 772 | } | 
|---|
| 773 |  | 
|---|
| 774 | void __init lapic_assign_system_vectors(void) | 
|---|
| 775 | { | 
|---|
| 776 | unsigned int i, vector; | 
|---|
| 777 |  | 
|---|
| 778 | for_each_set_bit(vector, system_vectors, NR_VECTORS) | 
|---|
| 779 | irq_matrix_assign_system(m: vector_matrix, bit: vector, replace: false); | 
|---|
| 780 |  | 
|---|
| 781 | if (nr_legacy_irqs() > 1) | 
|---|
| 782 | lapic_assign_legacy_vector(PIC_CASCADE_IR, replace: false); | 
|---|
| 783 |  | 
|---|
| 784 | /* System vectors are reserved, online it */ | 
|---|
| 785 | irq_matrix_online(m: vector_matrix); | 
|---|
| 786 |  | 
|---|
| 787 | /* Mark the preallocated legacy interrupts */ | 
|---|
| 788 | for (i = 0; i < nr_legacy_irqs(); i++) { | 
|---|
| 789 | /* | 
|---|
| 790 | * Don't touch the cascade interrupt. It's unusable | 
|---|
| 791 | * on PIC equipped machines. See the large comment | 
|---|
| 792 | * in the IO/APIC code. | 
|---|
| 793 | */ | 
|---|
| 794 | if (i != PIC_CASCADE_IR) | 
|---|
| 795 | irq_matrix_assign(m: vector_matrix, ISA_IRQ_VECTOR(i)); | 
|---|
| 796 | } | 
|---|
| 797 | } | 
|---|
| 798 |  | 
|---|
| 799 | int __init arch_early_irq_init(void) | 
|---|
| 800 | { | 
|---|
| 801 | struct fwnode_handle *fn; | 
|---|
| 802 |  | 
|---|
| 803 | fn = irq_domain_alloc_named_fwnode(name: "VECTOR"); | 
|---|
| 804 | BUG_ON(!fn); | 
|---|
| 805 | x86_vector_domain = irq_domain_create_tree(fwnode: fn, ops: &x86_vector_domain_ops, | 
|---|
| 806 | NULL); | 
|---|
| 807 | BUG_ON(x86_vector_domain == NULL); | 
|---|
| 808 | irq_set_default_domain(domain: x86_vector_domain); | 
|---|
| 809 |  | 
|---|
| 810 | BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); | 
|---|
| 811 |  | 
|---|
| 812 | /* | 
|---|
| 813 | * Allocate the vector matrix allocator data structure and limit the | 
|---|
| 814 | * search area. | 
|---|
| 815 | */ | 
|---|
| 816 | vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR, | 
|---|
| 817 | FIRST_SYSTEM_VECTOR); | 
|---|
| 818 | BUG_ON(!vector_matrix); | 
|---|
| 819 |  | 
|---|
| 820 | return arch_early_ioapic_init(); | 
|---|
| 821 | } | 
|---|
| 822 |  | 
|---|
| 823 | #ifdef CONFIG_SMP | 
|---|
| 824 |  | 
|---|
| 825 | static struct irq_desc *__setup_vector_irq(int vector) | 
|---|
| 826 | { | 
|---|
| 827 | int isairq = vector - ISA_IRQ_VECTOR(0); | 
|---|
| 828 |  | 
|---|
| 829 | /* Check whether the irq is in the legacy space */ | 
|---|
| 830 | if (isairq < 0 || isairq >= nr_legacy_irqs()) | 
|---|
| 831 | return VECTOR_UNUSED; | 
|---|
| 832 | /* Check whether the irq is handled by the IOAPIC */ | 
|---|
| 833 | if (test_bit(isairq, &io_apic_irqs)) | 
|---|
| 834 | return VECTOR_UNUSED; | 
|---|
| 835 | return irq_to_desc(irq: isairq); | 
|---|
| 836 | } | 
|---|
| 837 |  | 
|---|
| 838 | /* Online the local APIC infrastructure and initialize the vectors */ | 
|---|
| 839 | void lapic_online(void) | 
|---|
| 840 | { | 
|---|
| 841 | unsigned int vector; | 
|---|
| 842 |  | 
|---|
| 843 | lockdep_assert_held(&vector_lock); | 
|---|
| 844 |  | 
|---|
| 845 | /* Online the vector matrix array for this CPU */ | 
|---|
| 846 | irq_matrix_online(m: vector_matrix); | 
|---|
| 847 |  | 
|---|
| 848 | /* | 
|---|
| 849 | * The interrupt affinity logic never targets interrupts to offline | 
|---|
| 850 | * CPUs. The exception are the legacy PIC interrupts. In general | 
|---|
| 851 | * they are only targeted to CPU0, but depending on the platform | 
|---|
| 852 | * they can be distributed to any online CPU in hardware. The | 
|---|
| 853 | * kernel has no influence on that. So all active legacy vectors | 
|---|
| 854 | * must be installed on all CPUs. All non legacy interrupts can be | 
|---|
| 855 | * cleared. | 
|---|
| 856 | */ | 
|---|
| 857 | for (vector = 0; vector < NR_VECTORS; vector++) | 
|---|
| 858 | this_cpu_write(vector_irq[vector], __setup_vector_irq(vector)); | 
|---|
| 859 | } | 
|---|
| 860 |  | 
|---|
| 861 | static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr); | 
|---|
| 862 |  | 
|---|
| 863 | void lapic_offline(void) | 
|---|
| 864 | { | 
|---|
| 865 | struct vector_cleanup *cl = this_cpu_ptr(&vector_cleanup); | 
|---|
| 866 |  | 
|---|
| 867 | lock_vector_lock(); | 
|---|
| 868 |  | 
|---|
| 869 | /* In case the vector cleanup timer has not expired */ | 
|---|
| 870 | __vector_cleanup(cl, check_irr: false); | 
|---|
| 871 |  | 
|---|
| 872 | irq_matrix_offline(m: vector_matrix); | 
|---|
| 873 | WARN_ON_ONCE(timer_delete_sync_try(&cl->timer) < 0); | 
|---|
| 874 | WARN_ON_ONCE(!hlist_empty(&cl->head)); | 
|---|
| 875 |  | 
|---|
| 876 | unlock_vector_lock(); | 
|---|
| 877 | } | 
|---|
| 878 |  | 
|---|
| 879 | static int apic_set_affinity(struct irq_data *irqd, | 
|---|
| 880 | const struct cpumask *dest, bool force) | 
|---|
| 881 | { | 
|---|
| 882 | int err; | 
|---|
| 883 |  | 
|---|
| 884 | if (WARN_ON_ONCE(!irqd_is_activated(irqd))) | 
|---|
| 885 | return -EIO; | 
|---|
| 886 |  | 
|---|
| 887 | raw_spin_lock(&vector_lock); | 
|---|
| 888 | cpumask_and(dstp: vector_searchmask, src1p: dest, cpu_online_mask); | 
|---|
| 889 | if (irqd_affinity_is_managed(d: irqd)) | 
|---|
| 890 | err = assign_managed_vector(irqd, dest: vector_searchmask); | 
|---|
| 891 | else | 
|---|
| 892 | err = assign_vector_locked(irqd, dest: vector_searchmask); | 
|---|
| 893 | raw_spin_unlock(&vector_lock); | 
|---|
| 894 | return err ? err : IRQ_SET_MASK_OK; | 
|---|
| 895 | } | 
|---|
| 896 |  | 
|---|
| 897 | static void free_moved_vector(struct apic_chip_data *apicd) | 
|---|
| 898 | { | 
|---|
| 899 | unsigned int vector = apicd->prev_vector; | 
|---|
| 900 | unsigned int cpu = apicd->prev_cpu; | 
|---|
| 901 | bool managed = apicd->is_managed; | 
|---|
| 902 |  | 
|---|
| 903 | /* | 
|---|
| 904 | * Managed interrupts are usually not migrated away | 
|---|
| 905 | * from an online CPU, but CPU isolation 'managed_irq' | 
|---|
| 906 | * can make that happen. | 
|---|
| 907 | * 1) Activation does not take the isolation into account | 
|---|
| 908 | *    to keep the code simple | 
|---|
| 909 | * 2) Migration away from an isolated CPU can happen when | 
|---|
| 910 | *    a non-isolated CPU which is in the calculated | 
|---|
| 911 | *    affinity mask comes online. | 
|---|
| 912 | */ | 
|---|
| 913 | trace_vector_free_moved(irq: apicd->irq, cpu, vector, is_managed: managed); | 
|---|
| 914 | apic_free_vector(cpu, vector, managed); | 
|---|
| 915 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; | 
|---|
| 916 | hlist_del_init(n: &apicd->clist); | 
|---|
| 917 | apicd->prev_vector = 0; | 
|---|
| 918 | apicd->move_in_progress = 0; | 
|---|
| 919 | } | 
|---|
| 920 |  | 
|---|
| 921 | /* | 
|---|
| 922 | * Called from fixup_irqs() with @desc->lock held and interrupts disabled. | 
|---|
| 923 | */ | 
|---|
| 924 | static void apic_force_complete_move(struct irq_data *irqd) | 
|---|
| 925 | { | 
|---|
| 926 | unsigned int cpu = smp_processor_id(); | 
|---|
| 927 | struct apic_chip_data *apicd; | 
|---|
| 928 | unsigned int vector; | 
|---|
| 929 |  | 
|---|
| 930 | guard(raw_spinlock)(l: &vector_lock); | 
|---|
| 931 | apicd = apic_chip_data(irqd); | 
|---|
| 932 | if (!apicd) | 
|---|
| 933 | return; | 
|---|
| 934 |  | 
|---|
| 935 | /* | 
|---|
| 936 | * If prev_vector is empty or the descriptor is neither currently | 
|---|
| 937 | * nor previously on the outgoing CPU no action required. | 
|---|
| 938 | */ | 
|---|
| 939 | vector = apicd->prev_vector; | 
|---|
| 940 | if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu)) | 
|---|
| 941 | return; | 
|---|
| 942 |  | 
|---|
| 943 | /* | 
|---|
| 944 | * This is tricky. If the cleanup of the old vector has not been | 
|---|
| 945 | * done yet, then the following setaffinity call will fail with | 
|---|
| 946 | * -EBUSY. This can leave the interrupt in a stale state. | 
|---|
| 947 | * | 
|---|
| 948 | * All CPUs are stuck in stop machine with interrupts disabled so | 
|---|
| 949 | * calling __irq_complete_move() would be completely pointless. | 
|---|
| 950 | * | 
|---|
| 951 | * 1) The interrupt is in move_in_progress state. That means that we | 
|---|
| 952 | *    have not seen an interrupt since the io_apic was reprogrammed to | 
|---|
| 953 | *    the new vector. | 
|---|
| 954 | * | 
|---|
| 955 | * 2) The interrupt has fired on the new vector, but the cleanup IPIs | 
|---|
| 956 | *    have not been processed yet. | 
|---|
| 957 | */ | 
|---|
| 958 | if (apicd->move_in_progress) { | 
|---|
| 959 | /* | 
|---|
| 960 | * In theory there is a race: | 
|---|
| 961 | * | 
|---|
| 962 | * set_ioapic(new_vector) <-- Interrupt is raised before update | 
|---|
| 963 | *			      is effective, i.e. it's raised on | 
|---|
| 964 | *			      the old vector. | 
|---|
| 965 | * | 
|---|
| 966 | * So if the target cpu cannot handle that interrupt before | 
|---|
| 967 | * the old vector is cleaned up, we get a spurious interrupt | 
|---|
| 968 | * and in the worst case the ioapic irq line becomes stale. | 
|---|
| 969 | * | 
|---|
| 970 | * But in case of cpu hotplug this should be a non issue | 
|---|
| 971 | * because if the affinity update happens right before all | 
|---|
| 972 | * cpus rendezvous in stop machine, there is no way that the | 
|---|
| 973 | * interrupt can be blocked on the target cpu because all cpus | 
|---|
| 974 | * loops first with interrupts enabled in stop machine, so the | 
|---|
| 975 | * old vector is not yet cleaned up when the interrupt fires. | 
|---|
| 976 | * | 
|---|
| 977 | * So the only way to run into this issue is if the delivery | 
|---|
| 978 | * of the interrupt on the apic/system bus would be delayed | 
|---|
| 979 | * beyond the point where the target cpu disables interrupts | 
|---|
| 980 | * in stop machine. I doubt that it can happen, but at least | 
|---|
| 981 | * there is a theoretical chance. Virtualization might be | 
|---|
| 982 | * able to expose this, but AFAICT the IOAPIC emulation is not | 
|---|
| 983 | * as stupid as the real hardware. | 
|---|
| 984 | * | 
|---|
| 985 | * Anyway, there is nothing we can do about that at this point | 
|---|
| 986 | * w/o refactoring the whole fixup_irq() business completely. | 
|---|
| 987 | * We print at least the irq number and the old vector number, | 
|---|
| 988 | * so we have the necessary information when a problem in that | 
|---|
| 989 | * area arises. | 
|---|
| 990 | */ | 
|---|
| 991 | pr_warn( "IRQ fixup: irq %d move in progress, old vector %d\n", | 
|---|
| 992 | irqd->irq, vector); | 
|---|
| 993 | } | 
|---|
| 994 | free_moved_vector(apicd); | 
|---|
| 995 | } | 
|---|
| 996 |  | 
|---|
| 997 | #else | 
|---|
| 998 | # define apic_set_affinity		NULL | 
|---|
| 999 | # define apic_force_complete_move	NULL | 
|---|
| 1000 | #endif | 
|---|
| 1001 |  | 
|---|
| 1002 | static int apic_retrigger_irq(struct irq_data *irqd) | 
|---|
| 1003 | { | 
|---|
| 1004 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 
|---|
| 1005 | unsigned long flags; | 
|---|
| 1006 |  | 
|---|
| 1007 | raw_spin_lock_irqsave(&vector_lock, flags); | 
|---|
| 1008 | __apic_send_IPI(cpu: apicd->cpu, vector: apicd->vector); | 
|---|
| 1009 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 
|---|
| 1010 |  | 
|---|
| 1011 | return 1; | 
|---|
| 1012 | } | 
|---|
| 1013 |  | 
|---|
| 1014 | void apic_ack_irq(struct irq_data *irqd) | 
|---|
| 1015 | { | 
|---|
| 1016 | irq_move_irq(data: irqd); | 
|---|
| 1017 | apic_eoi(); | 
|---|
| 1018 | } | 
|---|
| 1019 |  | 
|---|
| 1020 | void apic_ack_edge(struct irq_data *irqd) | 
|---|
| 1021 | { | 
|---|
| 1022 | irq_complete_move(cfg: irqd_cfg(irqd)); | 
|---|
| 1023 | apic_ack_irq(irqd); | 
|---|
| 1024 | } | 
|---|
| 1025 |  | 
|---|
| 1026 | static void x86_vector_msi_compose_msg(struct irq_data *data, | 
|---|
| 1027 | struct msi_msg *msg) | 
|---|
| 1028 | { | 
|---|
| 1029 | __irq_msi_compose_msg(cfg: irqd_cfg(data), msg, dmar: false); | 
|---|
| 1030 | } | 
|---|
| 1031 |  | 
|---|
| 1032 | static struct irq_chip lapic_controller = { | 
|---|
| 1033 | .name				= "APIC", | 
|---|
| 1034 | .irq_ack			= apic_ack_edge, | 
|---|
| 1035 | .irq_set_affinity		= apic_set_affinity, | 
|---|
| 1036 | .irq_compose_msi_msg		= x86_vector_msi_compose_msg, | 
|---|
| 1037 | .irq_force_complete_move	= apic_force_complete_move, | 
|---|
| 1038 | .irq_retrigger			= apic_retrigger_irq, | 
|---|
| 1039 | }; | 
|---|
| 1040 |  | 
|---|
| 1041 | #ifdef CONFIG_SMP | 
|---|
| 1042 |  | 
|---|
| 1043 | static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr) | 
|---|
| 1044 | { | 
|---|
| 1045 | struct apic_chip_data *apicd; | 
|---|
| 1046 | struct hlist_node *tmp; | 
|---|
| 1047 | bool rearm = false; | 
|---|
| 1048 |  | 
|---|
| 1049 | lockdep_assert_held(&vector_lock); | 
|---|
| 1050 |  | 
|---|
| 1051 | hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) { | 
|---|
| 1052 | unsigned int vector = apicd->prev_vector; | 
|---|
| 1053 |  | 
|---|
| 1054 | /* | 
|---|
| 1055 | * Paranoia: Check if the vector that needs to be cleaned | 
|---|
| 1056 | * up is registered at the APICs IRR. That's clearly a | 
|---|
| 1057 | * hardware issue if the vector arrived on the old target | 
|---|
| 1058 | * _after_ interrupts were disabled above. Keep @apicd | 
|---|
| 1059 | * on the list and schedule the timer again to give the CPU | 
|---|
| 1060 | * a chance to handle the pending interrupt. | 
|---|
| 1061 | * | 
|---|
| 1062 | * Do not check IRR when called from lapic_offline(), because | 
|---|
| 1063 | * fixup_irqs() was just called to scan IRR for set bits and | 
|---|
| 1064 | * forward them to new destination CPUs via IPIs. | 
|---|
| 1065 | */ | 
|---|
| 1066 | if (check_irr && is_vector_pending(vector)) { | 
|---|
| 1067 | pr_warn_once( "Moved interrupt pending in old target APIC %u\n", apicd->irq); | 
|---|
| 1068 | rearm = true; | 
|---|
| 1069 | continue; | 
|---|
| 1070 | } | 
|---|
| 1071 | free_moved_vector(apicd); | 
|---|
| 1072 | } | 
|---|
| 1073 |  | 
|---|
| 1074 | /* | 
|---|
| 1075 | * Must happen under vector_lock to make the timer_pending() check | 
|---|
| 1076 | * in __vector_schedule_cleanup() race free against the rearm here. | 
|---|
| 1077 | */ | 
|---|
| 1078 | if (rearm) | 
|---|
| 1079 | mod_timer(timer: &cl->timer, expires: jiffies + 1); | 
|---|
| 1080 | } | 
|---|
| 1081 |  | 
|---|
| 1082 | static void vector_cleanup_callback(struct timer_list *tmr) | 
|---|
| 1083 | { | 
|---|
| 1084 | struct vector_cleanup *cl = container_of(tmr, typeof(*cl), timer); | 
|---|
| 1085 |  | 
|---|
| 1086 | /* Prevent vectors vanishing under us */ | 
|---|
| 1087 | raw_spin_lock_irq(&vector_lock); | 
|---|
| 1088 | __vector_cleanup(cl, check_irr: true); | 
|---|
| 1089 | raw_spin_unlock_irq(&vector_lock); | 
|---|
| 1090 | } | 
|---|
| 1091 |  | 
|---|
| 1092 | static void __vector_schedule_cleanup(struct apic_chip_data *apicd) | 
|---|
| 1093 | { | 
|---|
| 1094 | unsigned int cpu = apicd->prev_cpu; | 
|---|
| 1095 |  | 
|---|
| 1096 | raw_spin_lock(&vector_lock); | 
|---|
| 1097 | apicd->move_in_progress = 0; | 
|---|
| 1098 | if (cpu_online(cpu)) { | 
|---|
| 1099 | struct vector_cleanup *cl = per_cpu_ptr(&vector_cleanup, cpu); | 
|---|
| 1100 |  | 
|---|
| 1101 | hlist_add_head(n: &apicd->clist, h: &cl->head); | 
|---|
| 1102 |  | 
|---|
| 1103 | /* | 
|---|
| 1104 | * The lockless timer_pending() check is safe here. If it | 
|---|
| 1105 | * returns true, then the callback will observe this new | 
|---|
| 1106 | * apic data in the hlist as everything is serialized by | 
|---|
| 1107 | * vector lock. | 
|---|
| 1108 | * | 
|---|
| 1109 | * If it returns false then the timer is either not armed | 
|---|
| 1110 | * or the other CPU executes the callback, which again | 
|---|
| 1111 | * would be blocked on vector lock. Rearming it in the | 
|---|
| 1112 | * latter case makes it fire for nothing. | 
|---|
| 1113 | * | 
|---|
| 1114 | * This is also safe against the callback rearming the timer | 
|---|
| 1115 | * because that's serialized via vector lock too. | 
|---|
| 1116 | */ | 
|---|
| 1117 | if (!timer_pending(timer: &cl->timer)) { | 
|---|
| 1118 | cl->timer.expires = jiffies + 1; | 
|---|
| 1119 | add_timer_on(timer: &cl->timer, cpu); | 
|---|
| 1120 | } | 
|---|
| 1121 | } else { | 
|---|
| 1122 | pr_warn( "IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu); | 
|---|
| 1123 | free_moved_vector(apicd); | 
|---|
| 1124 | } | 
|---|
| 1125 | raw_spin_unlock(&vector_lock); | 
|---|
| 1126 | } | 
|---|
| 1127 |  | 
|---|
| 1128 | void vector_schedule_cleanup(struct irq_cfg *cfg) | 
|---|
| 1129 | { | 
|---|
| 1130 | struct apic_chip_data *apicd; | 
|---|
| 1131 |  | 
|---|
| 1132 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); | 
|---|
| 1133 | if (apicd->move_in_progress) | 
|---|
| 1134 | __vector_schedule_cleanup(apicd); | 
|---|
| 1135 | } | 
|---|
| 1136 |  | 
|---|
| 1137 | void irq_complete_move(struct irq_cfg *cfg) | 
|---|
| 1138 | { | 
|---|
| 1139 | struct apic_chip_data *apicd; | 
|---|
| 1140 |  | 
|---|
| 1141 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); | 
|---|
| 1142 | if (likely(!apicd->move_in_progress)) | 
|---|
| 1143 | return; | 
|---|
| 1144 |  | 
|---|
| 1145 | /* | 
|---|
| 1146 | * If the interrupt arrived on the new target CPU, cleanup the | 
|---|
| 1147 | * vector on the old target CPU. A vector check is not required | 
|---|
| 1148 | * because an interrupt can never move from one vector to another | 
|---|
| 1149 | * on the same CPU. | 
|---|
| 1150 | */ | 
|---|
| 1151 | if (apicd->cpu == smp_processor_id()) | 
|---|
| 1152 | __vector_schedule_cleanup(apicd); | 
|---|
| 1153 | } | 
|---|
| 1154 |  | 
|---|
| 1155 | #ifdef CONFIG_HOTPLUG_CPU | 
|---|
| 1156 | /* | 
|---|
| 1157 | * Note, this is not accurate accounting, but at least good enough to | 
|---|
| 1158 | * prevent that the actual interrupt move will run out of vectors. | 
|---|
| 1159 | */ | 
|---|
| 1160 | int lapic_can_unplug_cpu(void) | 
|---|
| 1161 | { | 
|---|
| 1162 | unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); | 
|---|
| 1163 | int ret = 0; | 
|---|
| 1164 |  | 
|---|
| 1165 | raw_spin_lock(&vector_lock); | 
|---|
| 1166 | tomove = irq_matrix_allocated(m: vector_matrix); | 
|---|
| 1167 | avl = irq_matrix_available(m: vector_matrix, cpudown: true); | 
|---|
| 1168 | if (avl < tomove) { | 
|---|
| 1169 | pr_warn( "CPU %u has %u vectors, %u available. Cannot disable CPU\n", | 
|---|
| 1170 | cpu, tomove, avl); | 
|---|
| 1171 | ret = -ENOSPC; | 
|---|
| 1172 | goto out; | 
|---|
| 1173 | } | 
|---|
| 1174 | rsvd = irq_matrix_reserved(m: vector_matrix); | 
|---|
| 1175 | if (avl < rsvd) { | 
|---|
| 1176 | pr_warn( "Reserved vectors %u > available %u. IRQ request may fail\n", | 
|---|
| 1177 | rsvd, avl); | 
|---|
| 1178 | } | 
|---|
| 1179 | out: | 
|---|
| 1180 | raw_spin_unlock(&vector_lock); | 
|---|
| 1181 | return ret; | 
|---|
| 1182 | } | 
|---|
| 1183 | #endif /* HOTPLUG_CPU */ | 
|---|
| 1184 | #endif /* SMP */ | 
|---|
| 1185 |  | 
|---|
| 1186 | static void __init print_APIC_field(int base) | 
|---|
| 1187 | { | 
|---|
| 1188 | int i; | 
|---|
| 1189 |  | 
|---|
| 1190 | printk(KERN_DEBUG); | 
|---|
| 1191 |  | 
|---|
| 1192 | for (i = 0; i < 8; i++) | 
|---|
| 1193 | pr_cont( "%08x", apic_read(base + i*0x10)); | 
|---|
| 1194 |  | 
|---|
| 1195 | pr_cont( "\n"); | 
|---|
| 1196 | } | 
|---|
| 1197 |  | 
|---|
| 1198 | static void __init print_local_APIC(void *dummy) | 
|---|
| 1199 | { | 
|---|
| 1200 | unsigned int i, v, ver, maxlvt; | 
|---|
| 1201 | u64 icr; | 
|---|
| 1202 |  | 
|---|
| 1203 | pr_debug( "printing local APIC contents on CPU#%d/%d:\n", | 
|---|
| 1204 | smp_processor_id(), read_apic_id()); | 
|---|
| 1205 | v = apic_read(APIC_ID); | 
|---|
| 1206 | pr_info( "... APIC ID:      %08x (%01x)\n", v, read_apic_id()); | 
|---|
| 1207 | v = apic_read(APIC_LVR); | 
|---|
| 1208 | pr_info( "... APIC VERSION: %08x\n", v); | 
|---|
| 1209 | ver = GET_APIC_VERSION(v); | 
|---|
| 1210 | maxlvt = lapic_get_maxlvt(); | 
|---|
| 1211 |  | 
|---|
| 1212 | v = apic_read(APIC_TASKPRI); | 
|---|
| 1213 | pr_debug( "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); | 
|---|
| 1214 |  | 
|---|
| 1215 | /* !82489DX */ | 
|---|
| 1216 | if (APIC_INTEGRATED(ver)) { | 
|---|
| 1217 | if (!APIC_XAPIC(ver)) { | 
|---|
| 1218 | v = apic_read(APIC_ARBPRI); | 
|---|
| 1219 | pr_debug( "... APIC ARBPRI: %08x (%02x)\n", | 
|---|
| 1220 | v, v & APIC_ARBPRI_MASK); | 
|---|
| 1221 | } | 
|---|
| 1222 | v = apic_read(APIC_PROCPRI); | 
|---|
| 1223 | pr_debug( "... APIC PROCPRI: %08x\n", v); | 
|---|
| 1224 | } | 
|---|
| 1225 |  | 
|---|
| 1226 | /* | 
|---|
| 1227 | * Remote read supported only in the 82489DX and local APIC for | 
|---|
| 1228 | * Pentium processors. | 
|---|
| 1229 | */ | 
|---|
| 1230 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | 
|---|
| 1231 | v = apic_read(APIC_RRR); | 
|---|
| 1232 | pr_debug( "... APIC RRR: %08x\n", v); | 
|---|
| 1233 | } | 
|---|
| 1234 |  | 
|---|
| 1235 | v = apic_read(APIC_LDR); | 
|---|
| 1236 | pr_debug( "... APIC LDR: %08x\n", v); | 
|---|
| 1237 | if (!x2apic_enabled()) { | 
|---|
| 1238 | v = apic_read(APIC_DFR); | 
|---|
| 1239 | pr_debug( "... APIC DFR: %08x\n", v); | 
|---|
| 1240 | } | 
|---|
| 1241 | v = apic_read(APIC_SPIV); | 
|---|
| 1242 | pr_debug( "... APIC SPIV: %08x\n", v); | 
|---|
| 1243 |  | 
|---|
| 1244 | pr_debug( "... APIC ISR field:\n"); | 
|---|
| 1245 | print_APIC_field(APIC_ISR); | 
|---|
| 1246 | pr_debug( "... APIC TMR field:\n"); | 
|---|
| 1247 | print_APIC_field(APIC_TMR); | 
|---|
| 1248 | pr_debug( "... APIC IRR field:\n"); | 
|---|
| 1249 | print_APIC_field(APIC_IRR); | 
|---|
| 1250 |  | 
|---|
| 1251 | /* !82489DX */ | 
|---|
| 1252 | if (APIC_INTEGRATED(ver)) { | 
|---|
| 1253 | /* Due to the Pentium erratum 3AP. */ | 
|---|
| 1254 | if (maxlvt > 3) | 
|---|
| 1255 | apic_write(APIC_ESR, val: 0); | 
|---|
| 1256 |  | 
|---|
| 1257 | v = apic_read(APIC_ESR); | 
|---|
| 1258 | pr_debug( "... APIC ESR: %08x\n", v); | 
|---|
| 1259 | } | 
|---|
| 1260 |  | 
|---|
| 1261 | icr = apic_icr_read(); | 
|---|
| 1262 | pr_debug( "... APIC ICR: %08x\n", (u32)icr); | 
|---|
| 1263 | pr_debug( "... APIC ICR2: %08x\n", (u32)(icr >> 32)); | 
|---|
| 1264 |  | 
|---|
| 1265 | v = apic_read(APIC_LVTT); | 
|---|
| 1266 | pr_debug( "... APIC LVTT: %08x\n", v); | 
|---|
| 1267 |  | 
|---|
| 1268 | if (maxlvt > 3) { | 
|---|
| 1269 | /* PC is LVT#4. */ | 
|---|
| 1270 | v = apic_read(APIC_LVTPC); | 
|---|
| 1271 | pr_debug( "... APIC LVTPC: %08x\n", v); | 
|---|
| 1272 | } | 
|---|
| 1273 | v = apic_read(APIC_LVT0); | 
|---|
| 1274 | pr_debug( "... APIC LVT0: %08x\n", v); | 
|---|
| 1275 | v = apic_read(APIC_LVT1); | 
|---|
| 1276 | pr_debug( "... APIC LVT1: %08x\n", v); | 
|---|
| 1277 |  | 
|---|
| 1278 | if (maxlvt > 2) { | 
|---|
| 1279 | /* ERR is LVT#3. */ | 
|---|
| 1280 | v = apic_read(APIC_LVTERR); | 
|---|
| 1281 | pr_debug( "... APIC LVTERR: %08x\n", v); | 
|---|
| 1282 | } | 
|---|
| 1283 |  | 
|---|
| 1284 | v = apic_read(APIC_TMICT); | 
|---|
| 1285 | pr_debug( "... APIC TMICT: %08x\n", v); | 
|---|
| 1286 | v = apic_read(APIC_TMCCT); | 
|---|
| 1287 | pr_debug( "... APIC TMCCT: %08x\n", v); | 
|---|
| 1288 | v = apic_read(APIC_TDCR); | 
|---|
| 1289 | pr_debug( "... APIC TDCR: %08x\n", v); | 
|---|
| 1290 |  | 
|---|
| 1291 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | 
|---|
| 1292 | v = apic_read(APIC_EFEAT); | 
|---|
| 1293 | maxlvt = (v >> 16) & 0xff; | 
|---|
| 1294 | pr_debug( "... APIC EFEAT: %08x\n", v); | 
|---|
| 1295 | v = apic_read(APIC_ECTRL); | 
|---|
| 1296 | pr_debug( "... APIC ECTRL: %08x\n", v); | 
|---|
| 1297 | for (i = 0; i < maxlvt; i++) { | 
|---|
| 1298 | v = apic_read(APIC_EILVTn(i)); | 
|---|
| 1299 | pr_debug( "... APIC EILVT%d: %08x\n", i, v); | 
|---|
| 1300 | } | 
|---|
| 1301 | } | 
|---|
| 1302 | pr_cont( "\n"); | 
|---|
| 1303 | } | 
|---|
| 1304 |  | 
|---|
| 1305 | static void __init print_local_APICs(int maxcpu) | 
|---|
| 1306 | { | 
|---|
| 1307 | int cpu; | 
|---|
| 1308 |  | 
|---|
| 1309 | if (!maxcpu) | 
|---|
| 1310 | return; | 
|---|
| 1311 |  | 
|---|
| 1312 | preempt_disable(); | 
|---|
| 1313 | for_each_online_cpu(cpu) { | 
|---|
| 1314 | if (cpu >= maxcpu) | 
|---|
| 1315 | break; | 
|---|
| 1316 | smp_call_function_single(cpuid: cpu, func: print_local_APIC, NULL, wait: 1); | 
|---|
| 1317 | } | 
|---|
| 1318 | preempt_enable(); | 
|---|
| 1319 | } | 
|---|
| 1320 |  | 
|---|
| 1321 | static void __init print_PIC(void) | 
|---|
| 1322 | { | 
|---|
| 1323 | unsigned int v; | 
|---|
| 1324 | unsigned long flags; | 
|---|
| 1325 |  | 
|---|
| 1326 | if (!nr_legacy_irqs()) | 
|---|
| 1327 | return; | 
|---|
| 1328 |  | 
|---|
| 1329 | pr_debug( "\nprinting PIC contents\n"); | 
|---|
| 1330 |  | 
|---|
| 1331 | raw_spin_lock_irqsave(&i8259A_lock, flags); | 
|---|
| 1332 |  | 
|---|
| 1333 | v = inb(port: 0xa1) << 8 | inb(port: 0x21); | 
|---|
| 1334 | pr_debug( "... PIC  IMR: %04x\n", v); | 
|---|
| 1335 |  | 
|---|
| 1336 | v = inb(port: 0xa0) << 8 | inb(port: 0x20); | 
|---|
| 1337 | pr_debug( "... PIC  IRR: %04x\n", v); | 
|---|
| 1338 |  | 
|---|
| 1339 | outb(value: 0x0b, port: 0xa0); | 
|---|
| 1340 | outb(value: 0x0b, port: 0x20); | 
|---|
| 1341 | v = inb(port: 0xa0) << 8 | inb(port: 0x20); | 
|---|
| 1342 | outb(value: 0x0a, port: 0xa0); | 
|---|
| 1343 | outb(value: 0x0a, port: 0x20); | 
|---|
| 1344 |  | 
|---|
| 1345 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 
|---|
| 1346 |  | 
|---|
| 1347 | pr_debug( "... PIC  ISR: %04x\n", v); | 
|---|
| 1348 |  | 
|---|
| 1349 | v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1); | 
|---|
| 1350 | pr_debug( "... PIC ELCR: %04x\n", v); | 
|---|
| 1351 | } | 
|---|
| 1352 |  | 
|---|
| 1353 | static int show_lapic __initdata = 1; | 
|---|
| 1354 | static __init int setup_show_lapic(char *arg) | 
|---|
| 1355 | { | 
|---|
| 1356 | int num = -1; | 
|---|
| 1357 |  | 
|---|
| 1358 | if (strcmp(arg, "all") == 0) { | 
|---|
| 1359 | show_lapic = CONFIG_NR_CPUS; | 
|---|
| 1360 | } else { | 
|---|
| 1361 | get_option(str: &arg, pint: &num); | 
|---|
| 1362 | if (num >= 0) | 
|---|
| 1363 | show_lapic = num; | 
|---|
| 1364 | } | 
|---|
| 1365 |  | 
|---|
| 1366 | return 1; | 
|---|
| 1367 | } | 
|---|
| 1368 | __setup( "show_lapic=", setup_show_lapic); | 
|---|
| 1369 |  | 
|---|
| 1370 | static int __init print_ICs(void) | 
|---|
| 1371 | { | 
|---|
| 1372 | if (apic_verbosity == APIC_QUIET) | 
|---|
| 1373 | return 0; | 
|---|
| 1374 |  | 
|---|
| 1375 | print_PIC(); | 
|---|
| 1376 |  | 
|---|
| 1377 | /* don't print out if apic is not there */ | 
|---|
| 1378 | if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config()) | 
|---|
| 1379 | return 0; | 
|---|
| 1380 |  | 
|---|
| 1381 | print_local_APICs(maxcpu: show_lapic); | 
|---|
| 1382 | print_IO_APICs(); | 
|---|
| 1383 |  | 
|---|
| 1384 | return 0; | 
|---|
| 1385 | } | 
|---|
| 1386 |  | 
|---|
| 1387 | late_initcall(print_ICs); | 
|---|
| 1388 |  | 
|---|