Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /* interrupt.h */
3 : #ifndef _LINUX_INTERRUPT_H
4 : #define _LINUX_INTERRUPT_H
5 :
6 : #include <linux/kernel.h>
7 : #include <linux/bitops.h>
8 : #include <linux/cpumask.h>
9 : #include <linux/irqreturn.h>
10 : #include <linux/irqnr.h>
11 : #include <linux/hardirq.h>
12 : #include <linux/irqflags.h>
13 : #include <linux/hrtimer.h>
14 : #include <linux/kref.h>
15 : #include <linux/workqueue.h>
16 :
17 : #include <linux/atomic.h>
18 : #include <asm/ptrace.h>
19 : #include <asm/irq.h>
20 : #include <asm/sections.h>
21 :
22 : /*
23 : * These correspond to the IORESOURCE_IRQ_* defines in
24 : * linux/ioport.h to select the interrupt line behaviour. When
25 : * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 : * setting should be assumed to be "as already configured", which
27 : * may be as per machine or firmware initialisation.
28 : */
29 : #define IRQF_TRIGGER_NONE 0x00000000
30 : #define IRQF_TRIGGER_RISING 0x00000001
31 : #define IRQF_TRIGGER_FALLING 0x00000002
32 : #define IRQF_TRIGGER_HIGH 0x00000004
33 : #define IRQF_TRIGGER_LOW 0x00000008
34 : #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 : IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 : #define IRQF_TRIGGER_PROBE 0x00000010
37 :
38 : /*
39 : * These flags used only by the kernel as part of the
40 : * irq handling routines.
41 : *
42 : * IRQF_SHARED - allow sharing the irq among several devices
43 : * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 : * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 : * IRQF_PERCPU - Interrupt is per cpu
46 : * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 : * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 : * registered first in a shared interrupt is considered for
49 : * performance reasons)
50 : * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 : * Used by threaded interrupts which need to keep the
52 : * irq line disabled until the threaded handler has been run.
53 : * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
54 : * that this interrupt will wake the system from a suspended
55 : * state. See Documentation/power/suspend-and-interrupts.rst
56 : * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 : * IRQF_NO_THREAD - Interrupt cannot be threaded
58 : * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 : * resume time.
60 : * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61 : * interrupt handler after suspending interrupts. For system
62 : * wakeup devices users need to implement wakeup detection in
63 : * their interrupt handlers.
64 : */
65 : #define IRQF_SHARED 0x00000080
66 : #define IRQF_PROBE_SHARED 0x00000100
67 : #define __IRQF_TIMER 0x00000200
68 : #define IRQF_PERCPU 0x00000400
69 : #define IRQF_NOBALANCING 0x00000800
70 : #define IRQF_IRQPOLL 0x00001000
71 : #define IRQF_ONESHOT 0x00002000
72 : #define IRQF_NO_SUSPEND 0x00004000
73 : #define IRQF_FORCE_RESUME 0x00008000
74 : #define IRQF_NO_THREAD 0x00010000
75 : #define IRQF_EARLY_RESUME 0x00020000
76 : #define IRQF_COND_SUSPEND 0x00040000
77 :
78 : #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79 :
80 : /*
81 : * These values can be returned by request_any_context_irq() and
82 : * describe the context the interrupt will be run in.
83 : *
84 : * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85 : * IRQC_IS_NESTED - interrupt runs in a nested threaded context
86 : */
87 : enum {
88 : IRQC_IS_HARDIRQ = 0,
89 : IRQC_IS_NESTED,
90 : };
91 :
92 : typedef irqreturn_t (*irq_handler_t)(int, void *);
93 :
94 : /**
95 : * struct irqaction - per interrupt action descriptor
96 : * @handler: interrupt handler function
97 : * @name: name of the device
98 : * @dev_id: cookie to identify the device
99 : * @percpu_dev_id: cookie to identify the device
100 : * @next: pointer to the next irqaction for shared interrupts
101 : * @irq: interrupt number
102 : * @flags: flags (see IRQF_* above)
103 : * @thread_fn: interrupt handler function for threaded interrupts
104 : * @thread: thread pointer for threaded interrupts
105 : * @secondary: pointer to secondary irqaction (force threading)
106 : * @thread_flags: flags related to @thread
107 : * @thread_mask: bitmask for keeping track of @thread activity
108 : * @dir: pointer to the proc/irq/NN/name entry
109 : */
110 : struct irqaction {
111 : irq_handler_t handler;
112 : void *dev_id;
113 : void __percpu *percpu_dev_id;
114 : struct irqaction *next;
115 : irq_handler_t thread_fn;
116 : struct task_struct *thread;
117 : struct irqaction *secondary;
118 : unsigned int irq;
119 : unsigned int flags;
120 : unsigned long thread_flags;
121 : unsigned long thread_mask;
122 : const char *name;
123 : struct proc_dir_entry *dir;
124 : } ____cacheline_internodealigned_in_smp;
125 :
126 : extern irqreturn_t no_action(int cpl, void *dev_id);
127 :
128 : /*
129 : * If a (PCI) device interrupt is not connected we set dev->irq to
130 : * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131 : * can distingiush that case from other error returns.
132 : *
133 : * 0x80000000 is guaranteed to be outside the available range of interrupts
134 : * and easy to distinguish from other possible incorrect values.
135 : */
136 : #define IRQ_NOTCONNECTED (1U << 31)
137 :
138 : extern int __must_check
139 : request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 : irq_handler_t thread_fn,
141 : unsigned long flags, const char *name, void *dev);
142 :
143 : /**
144 : * request_irq - Add a handler for an interrupt line
145 : * @irq: The interrupt line to allocate
146 : * @handler: Function to be called when the IRQ occurs.
147 : * Primary handler for threaded interrupts
148 : * If NULL, the default primary handler is installed
149 : * @flags: Handling flags
150 : * @name: Name of the device generating this interrupt
151 : * @dev: A cookie passed to the handler function
152 : *
153 : * This call allocates an interrupt and establishes a handler; see
154 : * the documentation for request_threaded_irq() for details.
155 : */
156 : static inline int __must_check
157 4 : request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
158 : const char *name, void *dev)
159 : {
160 4 : return request_threaded_irq(irq, handler, NULL, flags, name, dev);
161 : }
162 :
163 : extern int __must_check
164 : request_any_context_irq(unsigned int irq, irq_handler_t handler,
165 : unsigned long flags, const char *name, void *dev_id);
166 :
167 : extern int __must_check
168 : __request_percpu_irq(unsigned int irq, irq_handler_t handler,
169 : unsigned long flags, const char *devname,
170 : void __percpu *percpu_dev_id);
171 :
172 : extern int __must_check
173 : request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
174 : const char *name, void *dev);
175 :
176 : static inline int __must_check
177 : request_percpu_irq(unsigned int irq, irq_handler_t handler,
178 : const char *devname, void __percpu *percpu_dev_id)
179 : {
180 : return __request_percpu_irq(irq, handler, 0,
181 : devname, percpu_dev_id);
182 : }
183 :
184 : extern int __must_check
185 : request_percpu_nmi(unsigned int irq, irq_handler_t handler,
186 : const char *devname, void __percpu *dev);
187 :
188 : extern const void *free_irq(unsigned int, void *);
189 : extern void free_percpu_irq(unsigned int, void __percpu *);
190 :
191 : extern const void *free_nmi(unsigned int irq, void *dev_id);
192 : extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
193 :
194 : struct device;
195 :
196 : extern int __must_check
197 : devm_request_threaded_irq(struct device *dev, unsigned int irq,
198 : irq_handler_t handler, irq_handler_t thread_fn,
199 : unsigned long irqflags, const char *devname,
200 : void *dev_id);
201 :
202 : static inline int __must_check
203 : devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
204 : unsigned long irqflags, const char *devname, void *dev_id)
205 : {
206 : return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
207 : devname, dev_id);
208 : }
209 :
210 : extern int __must_check
211 : devm_request_any_context_irq(struct device *dev, unsigned int irq,
212 : irq_handler_t handler, unsigned long irqflags,
213 : const char *devname, void *dev_id);
214 :
215 : extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
216 :
217 : /*
218 : * On lockdep we dont want to enable hardirqs in hardirq
219 : * context. Use local_irq_enable_in_hardirq() to annotate
220 : * kernel code that has to do this nevertheless (pretty much
221 : * the only valid case is for old/broken hardware that is
222 : * insanely slow).
223 : *
224 : * NOTE: in theory this might break fragile code that relies
225 : * on hardirq delivery - in practice we dont seem to have such
226 : * places left. So the only effect should be slightly increased
227 : * irqs-off latencies.
228 : */
229 : #ifdef CONFIG_LOCKDEP
230 : # define local_irq_enable_in_hardirq() do { } while (0)
231 : #else
232 : # define local_irq_enable_in_hardirq() local_irq_enable()
233 : #endif
234 :
235 : bool irq_has_action(unsigned int irq);
236 : extern void disable_irq_nosync(unsigned int irq);
237 : extern bool disable_hardirq(unsigned int irq);
238 : extern void disable_irq(unsigned int irq);
239 : extern void disable_percpu_irq(unsigned int irq);
240 : extern void enable_irq(unsigned int irq);
241 : extern void enable_percpu_irq(unsigned int irq, unsigned int type);
242 : extern bool irq_percpu_is_enabled(unsigned int irq);
243 : extern void irq_wake_thread(unsigned int irq, void *dev_id);
244 :
245 : extern void disable_nmi_nosync(unsigned int irq);
246 : extern void disable_percpu_nmi(unsigned int irq);
247 : extern void enable_nmi(unsigned int irq);
248 : extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
249 : extern int prepare_percpu_nmi(unsigned int irq);
250 : extern void teardown_percpu_nmi(unsigned int irq);
251 :
252 : extern int irq_inject_interrupt(unsigned int irq);
253 :
254 : /* The following three functions are for the core kernel use only. */
255 : extern void suspend_device_irqs(void);
256 : extern void resume_device_irqs(void);
257 : extern void rearm_wake_irq(unsigned int irq);
258 :
259 : /**
260 : * struct irq_affinity_notify - context for notification of IRQ affinity changes
261 : * @irq: Interrupt to which notification applies
262 : * @kref: Reference count, for internal use
263 : * @work: Work item, for internal use
264 : * @notify: Function to be called on change. This will be
265 : * called in process context.
266 : * @release: Function to be called on release. This will be
267 : * called in process context. Once registered, the
268 : * structure must only be freed when this function is
269 : * called or later.
270 : */
271 : struct irq_affinity_notify {
272 : unsigned int irq;
273 : struct kref kref;
274 : struct work_struct work;
275 : void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
276 : void (*release)(struct kref *ref);
277 : };
278 :
279 : #define IRQ_AFFINITY_MAX_SETS 4
280 :
281 : /**
282 : * struct irq_affinity - Description for automatic irq affinity assignements
283 : * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
284 : * the MSI(-X) vector space
285 : * @post_vectors: Don't apply affinity to @post_vectors at end of
286 : * the MSI(-X) vector space
287 : * @nr_sets: The number of interrupt sets for which affinity
288 : * spreading is required
289 : * @set_size: Array holding the size of each interrupt set
290 : * @calc_sets: Callback for calculating the number and size
291 : * of interrupt sets
292 : * @priv: Private data for usage by @calc_sets, usually a
293 : * pointer to driver/device specific data.
294 : */
295 : struct irq_affinity {
296 : unsigned int pre_vectors;
297 : unsigned int post_vectors;
298 : unsigned int nr_sets;
299 : unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
300 : void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
301 : void *priv;
302 : };
303 :
304 : /**
305 : * struct irq_affinity_desc - Interrupt affinity descriptor
306 : * @mask: cpumask to hold the affinity assignment
307 : * @is_managed: 1 if the interrupt is managed internally
308 : */
309 : struct irq_affinity_desc {
310 : struct cpumask mask;
311 : unsigned int is_managed : 1;
312 : };
313 :
314 : #if defined(CONFIG_SMP)
315 :
316 : extern cpumask_var_t irq_default_affinity;
317 :
318 : /* Internal implementation. Use the helpers below */
319 : extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
320 : bool force);
321 :
322 : /**
323 : * irq_set_affinity - Set the irq affinity of a given irq
324 : * @irq: Interrupt to set affinity
325 : * @cpumask: cpumask
326 : *
327 : * Fails if cpumask does not contain an online CPU
328 : */
329 : static inline int
330 0 : irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
331 : {
332 0 : return __irq_set_affinity(irq, cpumask, false);
333 : }
334 :
335 : /**
336 : * irq_force_affinity - Force the irq affinity of a given irq
337 : * @irq: Interrupt to set affinity
338 : * @cpumask: cpumask
339 : *
340 : * Same as irq_set_affinity, but without checking the mask against
341 : * online cpus.
342 : *
343 : * Solely for low level cpu hotplug code, where we need to make per
344 : * cpu interrupts affine before the cpu becomes online.
345 : */
346 : static inline int
347 : irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
348 : {
349 : return __irq_set_affinity(irq, cpumask, true);
350 : }
351 :
352 : extern int irq_can_set_affinity(unsigned int irq);
353 : extern int irq_select_affinity(unsigned int irq);
354 :
355 : extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
356 : extern int irq_update_affinity_desc(unsigned int irq,
357 : struct irq_affinity_desc *affinity);
358 :
359 : extern int
360 : irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
361 :
362 : struct irq_affinity_desc *
363 : irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
364 :
365 : unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
366 : const struct irq_affinity *affd);
367 :
368 : #else /* CONFIG_SMP */
369 :
370 : static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
371 : {
372 : return -EINVAL;
373 : }
374 :
375 : static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
376 : {
377 : return 0;
378 : }
379 :
380 : static inline int irq_can_set_affinity(unsigned int irq)
381 : {
382 : return 0;
383 : }
384 :
385 : static inline int irq_select_affinity(unsigned int irq) { return 0; }
386 :
387 : static inline int irq_set_affinity_hint(unsigned int irq,
388 : const struct cpumask *m)
389 : {
390 : return -EINVAL;
391 : }
392 :
393 : static inline int irq_update_affinity_desc(unsigned int irq,
394 : struct irq_affinity_desc *affinity)
395 : {
396 : return -EINVAL;
397 : }
398 :
399 : static inline int
400 : irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
401 : {
402 : return 0;
403 : }
404 :
405 : static inline struct irq_affinity_desc *
406 : irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
407 : {
408 : return NULL;
409 : }
410 :
411 : static inline unsigned int
412 : irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
413 : const struct irq_affinity *affd)
414 : {
415 : return maxvec;
416 : }
417 :
418 : #endif /* CONFIG_SMP */
419 :
420 : /*
421 : * Special lockdep variants of irq disabling/enabling.
422 : * These should be used for locking constructs that
423 : * know that a particular irq context which is disabled,
424 : * and which is the only irq-context user of a lock,
425 : * that it's safe to take the lock in the irq-disabled
426 : * section without disabling hardirqs.
427 : *
428 : * On !CONFIG_LOCKDEP they are equivalent to the normal
429 : * irq disable/enable methods.
430 : */
431 : static inline void disable_irq_nosync_lockdep(unsigned int irq)
432 : {
433 : disable_irq_nosync(irq);
434 : #ifdef CONFIG_LOCKDEP
435 : local_irq_disable();
436 : #endif
437 : }
438 :
439 : static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
440 : {
441 : disable_irq_nosync(irq);
442 : #ifdef CONFIG_LOCKDEP
443 : local_irq_save(*flags);
444 : #endif
445 : }
446 :
447 : static inline void disable_irq_lockdep(unsigned int irq)
448 : {
449 : disable_irq(irq);
450 : #ifdef CONFIG_LOCKDEP
451 : local_irq_disable();
452 : #endif
453 : }
454 :
455 : static inline void enable_irq_lockdep(unsigned int irq)
456 : {
457 : #ifdef CONFIG_LOCKDEP
458 : local_irq_enable();
459 : #endif
460 : enable_irq(irq);
461 : }
462 :
463 : static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
464 : {
465 : #ifdef CONFIG_LOCKDEP
466 : local_irq_restore(*flags);
467 : #endif
468 : enable_irq(irq);
469 : }
470 :
471 : /* IRQ wakeup (PM) control: */
472 : extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
473 :
474 0 : static inline int enable_irq_wake(unsigned int irq)
475 : {
476 0 : return irq_set_irq_wake(irq, 1);
477 : }
478 :
479 0 : static inline int disable_irq_wake(unsigned int irq)
480 : {
481 0 : return irq_set_irq_wake(irq, 0);
482 : }
483 :
484 : /*
485 : * irq_get_irqchip_state/irq_set_irqchip_state specific flags
486 : */
487 : enum irqchip_irq_state {
488 : IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
489 : IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
490 : IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
491 : IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
492 : };
493 :
494 : extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
495 : bool *state);
496 : extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
497 : bool state);
498 :
499 : #ifdef CONFIG_IRQ_FORCED_THREADING
500 : # ifdef CONFIG_PREEMPT_RT
501 : # define force_irqthreads (true)
502 : # else
503 : extern bool force_irqthreads;
504 : # endif
505 : #else
506 : #define force_irqthreads (0)
507 : #endif
508 :
509 : #ifndef local_softirq_pending
510 :
511 : #ifndef local_softirq_pending_ref
512 : #define local_softirq_pending_ref irq_stat.__softirq_pending
513 : #endif
514 :
515 : #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
516 : #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
517 : #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
518 :
519 : #endif /* local_softirq_pending */
520 :
521 : /* Some architectures might implement lazy enabling/disabling of
522 : * interrupts. In some cases, such as stop_machine, we might want
523 : * to ensure that after a local_irq_disable(), interrupts have
524 : * really been disabled in hardware. Such architectures need to
525 : * implement the following hook.
526 : */
527 : #ifndef hard_irq_disable
528 : #define hard_irq_disable() do { } while(0)
529 : #endif
530 :
531 : /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
532 : frequency threaded job scheduling. For almost all the purposes
533 : tasklets are more than enough. F.e. all serial device BHs et
534 : al. should be converted to tasklets, not to softirqs.
535 : */
536 :
537 : enum
538 : {
539 : HI_SOFTIRQ=0,
540 : TIMER_SOFTIRQ,
541 : NET_TX_SOFTIRQ,
542 : NET_RX_SOFTIRQ,
543 : BLOCK_SOFTIRQ,
544 : IRQ_POLL_SOFTIRQ,
545 : TASKLET_SOFTIRQ,
546 : SCHED_SOFTIRQ,
547 : HRTIMER_SOFTIRQ,
548 : RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
549 :
550 : NR_SOFTIRQS
551 : };
552 :
553 : #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
554 :
555 : /* map softirq index to softirq name. update 'softirq_to_name' in
556 : * kernel/softirq.c when adding a new softirq.
557 : */
558 : extern const char * const softirq_to_name[NR_SOFTIRQS];
559 :
560 : /* softirq mask and active fields moved to irq_cpustat_t in
561 : * asm/hardirq.h to get better cache usage. KAO
562 : */
563 :
564 : struct softirq_action
565 : {
566 : void (*action)(struct softirq_action *);
567 : };
568 :
569 : asmlinkage void do_softirq(void);
570 : asmlinkage void __do_softirq(void);
571 :
572 : extern void open_softirq(int nr, void (*action)(struct softirq_action *));
573 : extern void softirq_init(void);
574 : extern void __raise_softirq_irqoff(unsigned int nr);
575 :
576 : extern void raise_softirq_irqoff(unsigned int nr);
577 : extern void raise_softirq(unsigned int nr);
578 :
579 : DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
580 :
581 39 : static inline struct task_struct *this_cpu_ksoftirqd(void)
582 : {
583 39 : return this_cpu_read(ksoftirqd);
584 : }
585 :
586 : /* Tasklets --- multithreaded analogue of BHs.
587 :
588 : This API is deprecated. Please consider using threaded IRQs instead:
589 : https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
590 :
591 : Main feature differing them of generic softirqs: tasklet
592 : is running only on one CPU simultaneously.
593 :
594 : Main feature differing them of BHs: different tasklets
595 : may be run simultaneously on different CPUs.
596 :
597 : Properties:
598 : * If tasklet_schedule() is called, then tasklet is guaranteed
599 : to be executed on some cpu at least once after this.
600 : * If the tasklet is already scheduled, but its execution is still not
601 : started, it will be executed only once.
602 : * If this tasklet is already running on another CPU (or schedule is called
603 : from tasklet itself), it is rescheduled for later.
604 : * Tasklet is strictly serialized wrt itself, but not
605 : wrt another tasklets. If client needs some intertask synchronization,
606 : he makes it with spinlocks.
607 : */
608 :
609 : struct tasklet_struct
610 : {
611 : struct tasklet_struct *next;
612 : unsigned long state;
613 : atomic_t count;
614 : bool use_callback;
615 : union {
616 : void (*func)(unsigned long data);
617 : void (*callback)(struct tasklet_struct *t);
618 : };
619 : unsigned long data;
620 : };
621 :
622 : #define DECLARE_TASKLET(name, _callback) \
623 : struct tasklet_struct name = { \
624 : .count = ATOMIC_INIT(0), \
625 : .callback = _callback, \
626 : .use_callback = true, \
627 : }
628 :
629 : #define DECLARE_TASKLET_DISABLED(name, _callback) \
630 : struct tasklet_struct name = { \
631 : .count = ATOMIC_INIT(1), \
632 : .callback = _callback, \
633 : .use_callback = true, \
634 : }
635 :
636 : #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
637 : container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
638 :
639 : #define DECLARE_TASKLET_OLD(name, _func) \
640 : struct tasklet_struct name = { \
641 : .count = ATOMIC_INIT(0), \
642 : .func = _func, \
643 : }
644 :
645 : #define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
646 : struct tasklet_struct name = { \
647 : .count = ATOMIC_INIT(1), \
648 : .func = _func, \
649 : }
650 :
651 : enum
652 : {
653 : TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
654 : TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
655 : };
656 :
657 : #ifdef CONFIG_SMP
658 : static inline int tasklet_trylock(struct tasklet_struct *t)
659 : {
660 : return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
661 : }
662 :
663 : static inline void tasklet_unlock(struct tasklet_struct *t)
664 : {
665 : smp_mb__before_atomic();
666 : clear_bit(TASKLET_STATE_RUN, &(t)->state);
667 : }
668 :
669 0 : static inline void tasklet_unlock_wait(struct tasklet_struct *t)
670 : {
671 0 : while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
672 0 : }
673 : #else
674 : #define tasklet_trylock(t) 1
675 : #define tasklet_unlock_wait(t) do { } while (0)
676 : #define tasklet_unlock(t) do { } while (0)
677 : #endif
678 :
679 : extern void __tasklet_schedule(struct tasklet_struct *t);
680 :
681 59 : static inline void tasklet_schedule(struct tasklet_struct *t)
682 : {
683 59 : if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
684 59 : __tasklet_schedule(t);
685 59 : }
686 :
687 : extern void __tasklet_hi_schedule(struct tasklet_struct *t);
688 :
689 : static inline void tasklet_hi_schedule(struct tasklet_struct *t)
690 : {
691 : if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
692 : __tasklet_hi_schedule(t);
693 : }
694 :
695 0 : static inline void tasklet_disable_nosync(struct tasklet_struct *t)
696 : {
697 0 : atomic_inc(&t->count);
698 0 : smp_mb__after_atomic();
699 0 : }
700 :
701 0 : static inline void tasklet_disable(struct tasklet_struct *t)
702 : {
703 0 : tasklet_disable_nosync(t);
704 0 : tasklet_unlock_wait(t);
705 0 : smp_mb();
706 0 : }
707 :
708 1 : static inline void tasklet_enable(struct tasklet_struct *t)
709 : {
710 1 : smp_mb__before_atomic();
711 1 : atomic_dec(&t->count);
712 1 : }
713 :
714 : extern void tasklet_kill(struct tasklet_struct *t);
715 : extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
716 : extern void tasklet_init(struct tasklet_struct *t,
717 : void (*func)(unsigned long), unsigned long data);
718 : extern void tasklet_setup(struct tasklet_struct *t,
719 : void (*callback)(struct tasklet_struct *));
720 :
721 : /*
722 : * Autoprobing for irqs:
723 : *
724 : * probe_irq_on() and probe_irq_off() provide robust primitives
725 : * for accurate IRQ probing during kernel initialization. They are
726 : * reasonably simple to use, are not "fooled" by spurious interrupts,
727 : * and, unlike other attempts at IRQ probing, they do not get hung on
728 : * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
729 : *
730 : * For reasonably foolproof probing, use them as follows:
731 : *
732 : * 1. clear and/or mask the device's internal interrupt.
733 : * 2. sti();
734 : * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
735 : * 4. enable the device and cause it to trigger an interrupt.
736 : * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
737 : * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
738 : * 7. service the device to clear its pending interrupt.
739 : * 8. loop again if paranoia is required.
740 : *
741 : * probe_irq_on() returns a mask of allocated irq's.
742 : *
743 : * probe_irq_off() takes the mask as a parameter,
744 : * and returns the irq number which occurred,
745 : * or zero if none occurred, or a negative irq number
746 : * if more than one irq occurred.
747 : */
748 :
749 : #if !defined(CONFIG_GENERIC_IRQ_PROBE)
750 : static inline unsigned long probe_irq_on(void)
751 : {
752 : return 0;
753 : }
754 : static inline int probe_irq_off(unsigned long val)
755 : {
756 : return 0;
757 : }
758 : static inline unsigned int probe_irq_mask(unsigned long val)
759 : {
760 : return 0;
761 : }
762 : #else
763 : extern unsigned long probe_irq_on(void); /* returns 0 on failure */
764 : extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
765 : extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
766 : #endif
767 :
768 : #ifdef CONFIG_PROC_FS
769 : /* Initialize /proc/irq/ */
770 : extern void init_irq_proc(void);
771 : #else
772 : static inline void init_irq_proc(void)
773 : {
774 : }
775 : #endif
776 :
777 : #ifdef CONFIG_IRQ_TIMINGS
778 : void irq_timings_enable(void);
779 : void irq_timings_disable(void);
780 : u64 irq_timings_next_event(u64 now);
781 : #endif
782 :
783 : struct seq_file;
784 : int show_interrupts(struct seq_file *p, void *v);
785 : int arch_show_interrupts(struct seq_file *p, int prec);
786 :
787 : extern int early_irq_init(void);
788 : extern int arch_probe_nr_irqs(void);
789 : extern int arch_early_irq_init(void);
790 :
791 : /*
792 : * We want to know which function is an entrypoint of a hardirq or a softirq.
793 : */
794 : #ifndef __irq_entry
795 : # define __irq_entry __section(".irqentry.text")
796 : #endif
797 :
798 : #define __softirq_entry __section(".softirqentry.text")
799 :
800 : #endif
|