nmi.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /*
  2. * linux/include/linux/nmi.h
  3. */
  4. #ifndef LINUX_NMI_H
  5. #define LINUX_NMI_H
  6. #include <linux/sched.h>
  7. #include <asm/irq.h>
  8. /*
  9. * The run state of the lockup detectors is controlled by the content of the
  10. * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
  11. * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
  12. *
  13. * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
  14. * are variables that are only used as an 'interface' between the parameters
  15. * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
  16. * 'watchdog_thresh' variable is handled differently because its value is not
  17. * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
  18. * is equal zero.
  19. */
  20. #define NMI_WATCHDOG_ENABLED_BIT 0
  21. #define SOFT_WATCHDOG_ENABLED_BIT 1
  22. #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
  23. #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
  24. /**
  25. * touch_nmi_watchdog - restart NMI watchdog timeout.
  26. *
  27. * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
  28. * may be used to reset the timeout - for code which intentionally
  29. * disables interrupts for a long time. This call is stateless.
  30. */
  31. #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
  32. #include <asm/nmi.h>
  33. extern void touch_nmi_watchdog(void);
  34. #else
  35. static inline void touch_nmi_watchdog(void)
  36. {
  37. touch_softlockup_watchdog();
  38. }
  39. #endif
  40. #if defined(CONFIG_HARDLOCKUP_DETECTOR)
  41. extern void hardlockup_detector_disable(void);
  42. #else
  43. static inline void hardlockup_detector_disable(void) {}
  44. #endif
  45. /*
  46. * Create trigger_all_cpu_backtrace() out of the arch-provided
  47. * base function. Return whether such support was available,
  48. * to allow calling code to fall back to some other mechanism:
  49. */
  50. #ifdef arch_trigger_cpumask_backtrace
  51. static inline bool trigger_all_cpu_backtrace(void)
  52. {
  53. arch_trigger_cpumask_backtrace(cpu_online_mask, false);
  54. return true;
  55. }
  56. static inline bool trigger_allbutself_cpu_backtrace(void)
  57. {
  58. arch_trigger_cpumask_backtrace(cpu_online_mask, true);
  59. return true;
  60. }
  61. static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
  62. {
  63. arch_trigger_cpumask_backtrace(mask, false);
  64. return true;
  65. }
  66. static inline bool trigger_single_cpu_backtrace(int cpu)
  67. {
  68. arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
  69. return true;
  70. }
  71. /* generic implementation */
  72. void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
  73. bool exclude_self,
  74. void (*raise)(cpumask_t *mask));
  75. bool nmi_cpu_backtrace(struct pt_regs *regs);
  76. #else
  77. static inline bool trigger_all_cpu_backtrace(void)
  78. {
  79. return false;
  80. }
  81. static inline bool trigger_allbutself_cpu_backtrace(void)
  82. {
  83. return false;
  84. }
  85. static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
  86. {
  87. return false;
  88. }
  89. static inline bool trigger_single_cpu_backtrace(int cpu)
  90. {
  91. return false;
  92. }
  93. #endif
  94. #ifdef CONFIG_LOCKUP_DETECTOR
  95. u64 hw_nmi_get_sample_period(int watchdog_thresh);
  96. extern int nmi_watchdog_enabled;
  97. extern int soft_watchdog_enabled;
  98. extern int watchdog_user_enabled;
  99. extern int watchdog_thresh;
  100. extern unsigned long watchdog_enabled;
  101. extern unsigned long *watchdog_cpumask_bits;
  102. extern atomic_t watchdog_park_in_progress;
  103. #ifdef CONFIG_SMP
  104. extern int sysctl_softlockup_all_cpu_backtrace;
  105. extern int sysctl_hardlockup_all_cpu_backtrace;
  106. #else
  107. #define sysctl_softlockup_all_cpu_backtrace 0
  108. #define sysctl_hardlockup_all_cpu_backtrace 0
  109. #endif
  110. extern bool is_hardlockup(void);
  111. struct ctl_table;
  112. extern int proc_watchdog(struct ctl_table *, int ,
  113. void __user *, size_t *, loff_t *);
  114. extern int proc_nmi_watchdog(struct ctl_table *, int ,
  115. void __user *, size_t *, loff_t *);
  116. extern int proc_soft_watchdog(struct ctl_table *, int ,
  117. void __user *, size_t *, loff_t *);
  118. extern int proc_watchdog_thresh(struct ctl_table *, int ,
  119. void __user *, size_t *, loff_t *);
  120. extern int proc_watchdog_cpumask(struct ctl_table *, int,
  121. void __user *, size_t *, loff_t *);
  122. extern int lockup_detector_suspend(void);
  123. extern void lockup_detector_resume(void);
  124. #else
  125. static inline int lockup_detector_suspend(void)
  126. {
  127. return 0;
  128. }
  129. static inline void lockup_detector_resume(void)
  130. {
  131. }
  132. #endif
  133. #ifdef CONFIG_HAVE_ACPI_APEI_NMI
  134. #include <asm/nmi.h>
  135. #endif
  136. #endif