LCOV - code coverage report
Current view: top level - include/linux - nmi.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 11 21 52.4 %
Date: 2021-04-22 12:43:58 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  *  linux/include/linux/nmi.h
       4             :  */
       5             : #ifndef LINUX_NMI_H
       6             : #define LINUX_NMI_H
       7             : 
       8             : #include <linux/sched.h>
       9             : #include <asm/irq.h>
      10             : #if defined(CONFIG_HAVE_NMI_WATCHDOG)
      11             : #include <asm/nmi.h>
      12             : #endif
      13             : 
      14             : #ifdef CONFIG_LOCKUP_DETECTOR
      15             : void lockup_detector_init(void);
      16             : void lockup_detector_soft_poweroff(void);
      17             : void lockup_detector_cleanup(void);
      18             : bool is_hardlockup(void);
      19             : 
      20             : extern int watchdog_user_enabled;
      21             : extern int nmi_watchdog_user_enabled;
      22             : extern int soft_watchdog_user_enabled;
      23             : extern int watchdog_thresh;
      24             : extern unsigned long watchdog_enabled;
      25             : 
      26             : extern struct cpumask watchdog_cpumask;
      27             : extern unsigned long *watchdog_cpumask_bits;
      28             : #ifdef CONFIG_SMP
      29             : extern int sysctl_softlockup_all_cpu_backtrace;
      30             : extern int sysctl_hardlockup_all_cpu_backtrace;
      31             : #else
      32             : #define sysctl_softlockup_all_cpu_backtrace 0
      33             : #define sysctl_hardlockup_all_cpu_backtrace 0
      34             : #endif /* !CONFIG_SMP */
      35             : 
      36             : #else /* CONFIG_LOCKUP_DETECTOR */
      37           1 : static inline void lockup_detector_init(void) { }
      38             : static inline void lockup_detector_soft_poweroff(void) { }
      39           0 : static inline void lockup_detector_cleanup(void) { }
      40             : #endif /* !CONFIG_LOCKUP_DETECTOR */
      41             : 
      42             : #ifdef CONFIG_SOFTLOCKUP_DETECTOR
      43             : extern void touch_softlockup_watchdog_sched(void);
      44             : extern void touch_softlockup_watchdog(void);
      45             : extern void touch_softlockup_watchdog_sync(void);
      46             : extern void touch_all_softlockup_watchdogs(void);
      47             : extern unsigned int  softlockup_panic;
      48             : 
      49             : extern int lockup_detector_online_cpu(unsigned int cpu);
      50             : extern int lockup_detector_offline_cpu(unsigned int cpu);
      51             : #else /* CONFIG_SOFTLOCKUP_DETECTOR */
      52        1568 : static inline void touch_softlockup_watchdog_sched(void) { }
      53       53685 : static inline void touch_softlockup_watchdog(void) { }
      54           0 : static inline void touch_softlockup_watchdog_sync(void) { }
      55           0 : static inline void touch_all_softlockup_watchdogs(void) { }
      56             : 
      57             : #define lockup_detector_online_cpu      NULL
      58             : #define lockup_detector_offline_cpu     NULL
      59             : #endif /* CONFIG_SOFTLOCKUP_DETECTOR */
      60             : 
      61             : #ifdef CONFIG_DETECT_HUNG_TASK
      62             : void reset_hung_task_detector(void);
      63             : #else
      64           0 : static inline void reset_hung_task_detector(void) { }
      65             : #endif
      66             : 
      67             : /*
      68             :  * The run state of the lockup detectors is controlled by the content of the
      69             :  * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
      70             :  * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
      71             :  *
      72             :  * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
      73             :  * 'soft_watchdog_user_enabled' are variables that are only used as an
      74             :  * 'interface' between the parameters in /proc/sys/kernel and the internal
      75             :  * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
      76             :  * handled differently because its value is not boolean, and the lockup
      77             :  * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
      78             :  */
      79             : #define NMI_WATCHDOG_ENABLED_BIT   0
      80             : #define SOFT_WATCHDOG_ENABLED_BIT  1
      81             : #define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
      82             : #define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
      83             : 
      84             : #if defined(CONFIG_HARDLOCKUP_DETECTOR)
      85             : extern void hardlockup_detector_disable(void);
      86             : extern unsigned int hardlockup_panic;
      87             : #else
      88           1 : static inline void hardlockup_detector_disable(void) {}
      89             : #endif
      90             : 
      91             : #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
      92             : # define NMI_WATCHDOG_SYSCTL_PERM       0644
      93             : #else
      94             : # define NMI_WATCHDOG_SYSCTL_PERM       0444
      95             : #endif
      96             : 
      97             : #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
      98             : extern void arch_touch_nmi_watchdog(void);
      99             : extern void hardlockup_detector_perf_stop(void);
     100             : extern void hardlockup_detector_perf_restart(void);
     101             : extern void hardlockup_detector_perf_disable(void);
     102             : extern void hardlockup_detector_perf_enable(void);
     103             : extern void hardlockup_detector_perf_cleanup(void);
     104             : extern int hardlockup_detector_perf_init(void);
     105             : #else
     106           1 : static inline void hardlockup_detector_perf_stop(void) { }
     107           1 : static inline void hardlockup_detector_perf_restart(void) { }
     108             : static inline void hardlockup_detector_perf_disable(void) { }
     109             : static inline void hardlockup_detector_perf_enable(void) { }
     110             : static inline void hardlockup_detector_perf_cleanup(void) { }
     111             : # if !defined(CONFIG_HAVE_NMI_WATCHDOG)
     112             : static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
     113       53685 : static inline void arch_touch_nmi_watchdog(void) {}
     114             : # else
     115             : static inline int hardlockup_detector_perf_init(void) { return 0; }
     116             : # endif
     117             : #endif
     118             : 
     119             : void watchdog_nmi_stop(void);
     120             : void watchdog_nmi_start(void);
     121             : int watchdog_nmi_probe(void);
     122             : int watchdog_nmi_enable(unsigned int cpu);
     123             : void watchdog_nmi_disable(unsigned int cpu);
     124             : 
     125             : /**
     126             :  * touch_nmi_watchdog - restart NMI watchdog timeout.
     127             :  *
     128             :  * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
     129             :  * may be used to reset the timeout - for code which intentionally
     130             :  * disables interrupts for a long time. This call is stateless.
     131             :  */
     132       53685 : static inline void touch_nmi_watchdog(void)
     133             : {
     134       53685 :         arch_touch_nmi_watchdog();
     135       53685 :         touch_softlockup_watchdog();
     136       53395 : }
     137             : 
     138             : /*
     139             :  * Create trigger_all_cpu_backtrace() out of the arch-provided
     140             :  * base function. Return whether such support was available,
     141             :  * to allow calling code to fall back to some other mechanism:
     142             :  */
     143             : #ifdef arch_trigger_cpumask_backtrace
     144           0 : static inline bool trigger_all_cpu_backtrace(void)
     145             : {
     146           0 :         arch_trigger_cpumask_backtrace(cpu_online_mask, false);
     147           0 :         return true;
     148             : }
     149             : 
     150             : static inline bool trigger_allbutself_cpu_backtrace(void)
     151             : {
     152             :         arch_trigger_cpumask_backtrace(cpu_online_mask, true);
     153             :         return true;
     154             : }
     155             : 
     156             : static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
     157             : {
     158             :         arch_trigger_cpumask_backtrace(mask, false);
     159             :         return true;
     160             : }
     161             : 
     162           0 : static inline bool trigger_single_cpu_backtrace(int cpu)
     163             : {
     164           0 :         arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
     165           0 :         return true;
     166             : }
     167             : 
     168             : /* generic implementation */
     169             : void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
     170             :                                    bool exclude_self,
     171             :                                    void (*raise)(cpumask_t *mask));
     172             : bool nmi_cpu_backtrace(struct pt_regs *regs);
     173             : 
     174             : #else
     175             : static inline bool trigger_all_cpu_backtrace(void)
     176             : {
     177             :         return false;
     178             : }
     179             : static inline bool trigger_allbutself_cpu_backtrace(void)
     180             : {
     181             :         return false;
     182             : }
     183             : static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
     184             : {
     185             :         return false;
     186             : }
     187             : static inline bool trigger_single_cpu_backtrace(int cpu)
     188             : {
     189             :         return false;
     190             : }
     191             : #endif
     192             : 
     193             : #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
     194             : u64 hw_nmi_get_sample_period(int watchdog_thresh);
     195             : #endif
     196             : 
     197             : #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
     198             :     defined(CONFIG_HARDLOCKUP_DETECTOR)
     199             : void watchdog_update_hrtimer_threshold(u64 period);
     200             : #else
     201             : static inline void watchdog_update_hrtimer_threshold(u64 period) { }
     202             : #endif
     203             : 
     204             : struct ctl_table;
     205             : int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
     206             : int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
     207             : int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
     208             : int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
     209             : int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
     210             : 
     211             : #ifdef CONFIG_HAVE_ACPI_APEI_NMI
     212             : #include <asm/nmi.h>
     213             : #endif
     214             : 
     215             : #endif

Generated by: LCOV version 1.14