Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Common interrupt code for 32 and 64 bit
4 : */
5 : #include <linux/cpu.h>
6 : #include <linux/interrupt.h>
7 : #include <linux/kernel_stat.h>
8 : #include <linux/of.h>
9 : #include <linux/seq_file.h>
10 : #include <linux/smp.h>
11 : #include <linux/ftrace.h>
12 : #include <linux/delay.h>
13 : #include <linux/export.h>
14 : #include <linux/irq.h>
15 :
16 : #include <asm/irq_stack.h>
17 : #include <asm/apic.h>
18 : #include <asm/io_apic.h>
19 : #include <asm/irq.h>
20 : #include <asm/mce.h>
21 : #include <asm/hw_irq.h>
22 : #include <asm/desc.h>
23 : #include <asm/traps.h>
24 : #include <asm/thermal.h>
25 :
26 : #define CREATE_TRACE_POINTS
27 : #include <asm/trace/irq_vectors.h>
28 :
29 : DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30 : EXPORT_PER_CPU_SYMBOL(irq_stat);
31 :
32 : atomic_t irq_err_count;
33 :
34 : /*
35 : * 'what should we do if we get a hw irq event on an illegal vector'.
36 : * each architecture has to answer this themselves.
37 : */
38 0 : void ack_bad_irq(unsigned int irq)
39 : {
40 0 : if (printk_ratelimit())
41 0 : pr_err("unexpected IRQ trap at vector %02x\n", irq);
42 :
43 : /*
44 : * Currently unexpected vectors happen only on SMP and APIC.
45 : * We _must_ ack these because every local APIC has only N
46 : * irq slots per priority level, and a 'hanging, unacked' IRQ
47 : * holds up an irq slot - in excessive cases (when multiple
48 : * unexpected vectors occur) that might lock up the APIC
49 : * completely.
50 : * But only ack when the APIC is enabled -AK
51 : */
52 0 : ack_APIC_irq();
53 0 : }
54 :
55 : #define irq_stats(x) (&per_cpu(irq_stat, x))
56 : /*
57 : * /proc/interrupts printing for arch specific interrupts
58 : */
59 0 : int arch_show_interrupts(struct seq_file *p, int prec)
60 : {
61 0 : int j;
62 :
63 0 : seq_printf(p, "%*s: ", prec, "NMI");
64 0 : for_each_online_cpu(j)
65 0 : seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
66 0 : seq_puts(p, " Non-maskable interrupts\n");
67 : #ifdef CONFIG_X86_LOCAL_APIC
68 0 : seq_printf(p, "%*s: ", prec, "LOC");
69 0 : for_each_online_cpu(j)
70 0 : seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
71 0 : seq_puts(p, " Local timer interrupts\n");
72 :
73 0 : seq_printf(p, "%*s: ", prec, "SPU");
74 0 : for_each_online_cpu(j)
75 0 : seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
76 0 : seq_puts(p, " Spurious interrupts\n");
77 0 : seq_printf(p, "%*s: ", prec, "PMI");
78 0 : for_each_online_cpu(j)
79 0 : seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
80 0 : seq_puts(p, " Performance monitoring interrupts\n");
81 0 : seq_printf(p, "%*s: ", prec, "IWI");
82 0 : for_each_online_cpu(j)
83 0 : seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
84 0 : seq_puts(p, " IRQ work interrupts\n");
85 0 : seq_printf(p, "%*s: ", prec, "RTR");
86 0 : for_each_online_cpu(j)
87 0 : seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
88 0 : seq_puts(p, " APIC ICR read retries\n");
89 0 : if (x86_platform_ipi_callback) {
90 0 : seq_printf(p, "%*s: ", prec, "PLT");
91 0 : for_each_online_cpu(j)
92 0 : seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
93 0 : seq_puts(p, " Platform interrupts\n");
94 : }
95 : #endif
96 : #ifdef CONFIG_SMP
97 0 : seq_printf(p, "%*s: ", prec, "RES");
98 0 : for_each_online_cpu(j)
99 0 : seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
100 0 : seq_puts(p, " Rescheduling interrupts\n");
101 0 : seq_printf(p, "%*s: ", prec, "CAL");
102 0 : for_each_online_cpu(j)
103 0 : seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
104 0 : seq_puts(p, " Function call interrupts\n");
105 0 : seq_printf(p, "%*s: ", prec, "TLB");
106 0 : for_each_online_cpu(j)
107 0 : seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
108 0 : seq_puts(p, " TLB shootdowns\n");
109 : #endif
110 : #ifdef CONFIG_X86_THERMAL_VECTOR
111 : seq_printf(p, "%*s: ", prec, "TRM");
112 : for_each_online_cpu(j)
113 : seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
114 : seq_puts(p, " Thermal event interrupts\n");
115 : #endif
116 : #ifdef CONFIG_X86_MCE_THRESHOLD
117 : seq_printf(p, "%*s: ", prec, "THR");
118 : for_each_online_cpu(j)
119 : seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
120 : seq_puts(p, " Threshold APIC interrupts\n");
121 : #endif
122 : #ifdef CONFIG_X86_MCE_AMD
123 : seq_printf(p, "%*s: ", prec, "DFR");
124 : for_each_online_cpu(j)
125 : seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
126 : seq_puts(p, " Deferred Error APIC interrupts\n");
127 : #endif
128 : #ifdef CONFIG_X86_MCE
129 : seq_printf(p, "%*s: ", prec, "MCE");
130 : for_each_online_cpu(j)
131 : seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
132 : seq_puts(p, " Machine check exceptions\n");
133 : seq_printf(p, "%*s: ", prec, "MCP");
134 : for_each_online_cpu(j)
135 : seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
136 : seq_puts(p, " Machine check polls\n");
137 : #endif
138 : #ifdef CONFIG_X86_HV_CALLBACK_VECTOR
139 0 : if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
140 0 : seq_printf(p, "%*s: ", prec, "HYP");
141 0 : for_each_online_cpu(j)
142 0 : seq_printf(p, "%10u ",
143 0 : irq_stats(j)->irq_hv_callback_count);
144 0 : seq_puts(p, " Hypervisor callback interrupts\n");
145 : }
146 : #endif
147 : #if IS_ENABLED(CONFIG_HYPERV)
148 : if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
149 : seq_printf(p, "%*s: ", prec, "HRE");
150 : for_each_online_cpu(j)
151 : seq_printf(p, "%10u ",
152 : irq_stats(j)->irq_hv_reenlightenment_count);
153 : seq_puts(p, " Hyper-V reenlightenment interrupts\n");
154 : }
155 : if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
156 : seq_printf(p, "%*s: ", prec, "HVS");
157 : for_each_online_cpu(j)
158 : seq_printf(p, "%10u ",
159 : irq_stats(j)->hyperv_stimer0_count);
160 : seq_puts(p, " Hyper-V stimer0 interrupts\n");
161 : }
162 : #endif
163 0 : seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
164 : #if defined(CONFIG_X86_IO_APIC)
165 0 : seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
166 : #endif
167 : #ifdef CONFIG_HAVE_KVM
168 0 : seq_printf(p, "%*s: ", prec, "PIN");
169 0 : for_each_online_cpu(j)
170 0 : seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
171 0 : seq_puts(p, " Posted-interrupt notification event\n");
172 :
173 0 : seq_printf(p, "%*s: ", prec, "NPI");
174 0 : for_each_online_cpu(j)
175 0 : seq_printf(p, "%10u ",
176 0 : irq_stats(j)->kvm_posted_intr_nested_ipis);
177 0 : seq_puts(p, " Nested posted-interrupt event\n");
178 :
179 0 : seq_printf(p, "%*s: ", prec, "PIW");
180 0 : for_each_online_cpu(j)
181 0 : seq_printf(p, "%10u ",
182 0 : irq_stats(j)->kvm_posted_intr_wakeup_ipis);
183 0 : seq_puts(p, " Posted-interrupt wakeup event\n");
184 : #endif
185 0 : return 0;
186 : }
187 :
188 : /*
189 : * /proc/stat helpers
190 : */
191 0 : u64 arch_irq_stat_cpu(unsigned int cpu)
192 : {
193 0 : u64 sum = irq_stats(cpu)->__nmi_count;
194 :
195 : #ifdef CONFIG_X86_LOCAL_APIC
196 0 : sum += irq_stats(cpu)->apic_timer_irqs;
197 0 : sum += irq_stats(cpu)->irq_spurious_count;
198 0 : sum += irq_stats(cpu)->apic_perf_irqs;
199 0 : sum += irq_stats(cpu)->apic_irq_work_irqs;
200 0 : sum += irq_stats(cpu)->icr_read_retry_count;
201 0 : if (x86_platform_ipi_callback)
202 0 : sum += irq_stats(cpu)->x86_platform_ipis;
203 : #endif
204 : #ifdef CONFIG_SMP
205 0 : sum += irq_stats(cpu)->irq_resched_count;
206 0 : sum += irq_stats(cpu)->irq_call_count;
207 : #endif
208 : #ifdef CONFIG_X86_THERMAL_VECTOR
209 : sum += irq_stats(cpu)->irq_thermal_count;
210 : #endif
211 : #ifdef CONFIG_X86_MCE_THRESHOLD
212 : sum += irq_stats(cpu)->irq_threshold_count;
213 : #endif
214 : #ifdef CONFIG_X86_MCE
215 : sum += per_cpu(mce_exception_count, cpu);
216 : sum += per_cpu(mce_poll_count, cpu);
217 : #endif
218 0 : return sum;
219 : }
220 :
221 0 : u64 arch_irq_stat(void)
222 : {
223 0 : u64 sum = atomic_read(&irq_err_count);
224 0 : return sum;
225 : }
226 :
227 3719 : static __always_inline void handle_irq(struct irq_desc *desc,
228 : struct pt_regs *regs)
229 : {
230 3719 : if (IS_ENABLED(CONFIG_X86_64))
231 7438 : generic_handle_irq_desc(desc);
232 : else
233 : __handle_irq(desc, regs);
234 : }
235 :
236 : /*
237 : * common_interrupt() handles all normal device IRQ's (the special SMP
238 : * cross-CPU interrupts have their own entry points).
239 : */
240 7438 : DEFINE_IDTENTRY_IRQ(common_interrupt)
241 : {
242 3719 : struct pt_regs *old_regs = set_irq_regs(regs);
243 3719 : struct irq_desc *desc;
244 :
245 : /* entry code tells RCU that we're not quiescent. Check it. */
246 3719 : RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
247 :
248 3719 : desc = __this_cpu_read(vector_irq[vector]);
249 7438 : if (likely(!IS_ERR_OR_NULL(desc))) {
250 3719 : handle_irq(desc, regs);
251 : } else {
252 0 : ack_APIC_irq();
253 :
254 0 : if (desc == VECTOR_UNUSED) {
255 0 : pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
256 : __func__, smp_processor_id(),
257 : vector);
258 : } else {
259 0 : __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
260 : }
261 : }
262 :
263 3719 : set_irq_regs(old_regs);
264 3719 : }
265 :
266 : #ifdef CONFIG_X86_LOCAL_APIC
267 : /* Function pointer for generic interrupt vector handling */
268 : void (*x86_platform_ipi_callback)(void) = NULL;
269 : /*
270 : * Handler for X86_PLATFORM_IPI_VECTOR.
271 : */
272 0 : DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
273 : {
274 0 : struct pt_regs *old_regs = set_irq_regs(regs);
275 :
276 0 : ack_APIC_irq();
277 0 : trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
278 0 : inc_irq_stat(x86_platform_ipis);
279 0 : if (x86_platform_ipi_callback)
280 0 : x86_platform_ipi_callback();
281 0 : trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
282 0 : set_irq_regs(old_regs);
283 0 : }
284 : #endif
285 :
286 : #ifdef CONFIG_HAVE_KVM
287 0 : static void dummy_handler(void) {}
288 : static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
289 :
290 0 : void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
291 : {
292 0 : if (handler)
293 0 : kvm_posted_intr_wakeup_handler = handler;
294 : else
295 0 : kvm_posted_intr_wakeup_handler = dummy_handler;
296 0 : }
297 : EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
298 :
299 : /*
300 : * Handler for POSTED_INTERRUPT_VECTOR.
301 : */
302 0 : DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
303 : {
304 0 : ack_APIC_irq();
305 0 : inc_irq_stat(kvm_posted_intr_ipis);
306 : }
307 :
308 : /*
309 : * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
310 : */
311 0 : DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
312 : {
313 0 : ack_APIC_irq();
314 0 : inc_irq_stat(kvm_posted_intr_wakeup_ipis);
315 0 : kvm_posted_intr_wakeup_handler();
316 0 : }
317 :
318 : /*
319 : * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
320 : */
321 0 : DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
322 : {
323 0 : ack_APIC_irq();
324 0 : inc_irq_stat(kvm_posted_intr_nested_ipis);
325 : }
326 : #endif
327 :
328 :
329 : #ifdef CONFIG_HOTPLUG_CPU
330 : /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
331 0 : void fixup_irqs(void)
332 : {
333 0 : unsigned int irr, vector;
334 0 : struct irq_desc *desc;
335 0 : struct irq_data *data;
336 0 : struct irq_chip *chip;
337 :
338 0 : irq_migrate_all_off_this_cpu();
339 :
340 : /*
341 : * We can remove mdelay() and then send spuriuous interrupts to
342 : * new cpu targets for all the irqs that were handled previously by
343 : * this cpu. While it works, I have seen spurious interrupt messages
344 : * (nothing wrong but still...).
345 : *
346 : * So for now, retain mdelay(1) and check the IRR and then send those
347 : * interrupts to new targets as this cpu is already offlined...
348 : */
349 0 : mdelay(1);
350 :
351 : /*
352 : * We can walk the vector array of this cpu without holding
353 : * vector_lock because the cpu is already marked !online, so
354 : * nothing else will touch it.
355 : */
356 0 : for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
357 0 : if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
358 0 : continue;
359 :
360 0 : irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
361 0 : if (irr & (1 << (vector % 32))) {
362 0 : desc = __this_cpu_read(vector_irq[vector]);
363 :
364 0 : raw_spin_lock(&desc->lock);
365 0 : data = irq_desc_get_irq_data(desc);
366 0 : chip = irq_data_get_irq_chip(data);
367 0 : if (chip->irq_retrigger) {
368 0 : chip->irq_retrigger(data);
369 0 : __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
370 : }
371 0 : raw_spin_unlock(&desc->lock);
372 : }
373 0 : if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
374 0 : __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
375 : }
376 0 : }
377 : #endif
378 :
379 : #ifdef CONFIG_X86_THERMAL_VECTOR
380 : static void smp_thermal_vector(void)
381 : {
382 : if (x86_thermal_enabled())
383 : intel_thermal_interrupt();
384 : else
385 : pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
386 : smp_processor_id());
387 : }
388 :
389 : DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
390 : {
391 : trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
392 : inc_irq_stat(irq_thermal_count);
393 : smp_thermal_vector();
394 : trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
395 : ack_APIC_irq();
396 : }
397 : #endif
|