perfctr-watchdog.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /*
  2. * local apic based NMI watchdog for various CPUs.
  3. *
  4. * This file also handles reservation of performance counters for coordination
  5. * with other users (like oprofile).
  6. *
  7. * Note that these events normally don't tick when the CPU idles. This means
  8. * the frequency varies with CPU load.
  9. *
  10. * Original code for K7/P6 written by Keith Owens
  11. *
  12. */
  13. #include <linux/percpu.h>
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/bitops.h>
  17. #include <linux/smp.h>
  18. #include <asm/nmi.h>
  19. #include <linux/kprobes.h>
  20. #include <asm/apic.h>
  21. #include <asm/perf_event.h>
  22. /*
  23. * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  24. * offset from MSR_P4_BSU_ESCR0.
  25. *
  26. * It will be the max for all platforms (for now)
  27. */
  28. #define NMI_MAX_COUNTER_BITS 66
  29. /*
  30. * perfctr_nmi_owner tracks the ownership of the perfctr registers:
  31. * evtsel_nmi_owner tracks the ownership of the event selection
  32. * - different performance counters/ event selection may be reserved for
  33. * different subsystems this reservation system just tries to coordinate
  34. * things a little
  35. */
  36. static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
  37. static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
  38. /* converts an msr to an appropriate reservation bit */
  39. static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  40. {
  41. /* returns the bit offset of the performance counter register */
  42. switch (boot_cpu_data.x86_vendor) {
  43. case X86_VENDOR_AMD:
  44. if (msr >= MSR_F15H_PERF_CTR)
  45. return (msr - MSR_F15H_PERF_CTR) >> 1;
  46. return msr - MSR_K7_PERFCTR0;
  47. case X86_VENDOR_INTEL:
  48. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  49. return msr - MSR_ARCH_PERFMON_PERFCTR0;
  50. switch (boot_cpu_data.x86) {
  51. case 6:
  52. return msr - MSR_P6_PERFCTR0;
  53. case 15:
  54. return msr - MSR_P4_BPU_PERFCTR0;
  55. }
  56. }
  57. return 0;
  58. }
  59. /*
  60. * converts an msr to an appropriate reservation bit
  61. * returns the bit offset of the event selection register
  62. */
  63. static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  64. {
  65. /* returns the bit offset of the event selection register */
  66. switch (boot_cpu_data.x86_vendor) {
  67. case X86_VENDOR_AMD:
  68. if (msr >= MSR_F15H_PERF_CTL)
  69. return (msr - MSR_F15H_PERF_CTL) >> 1;
  70. return msr - MSR_K7_EVNTSEL0;
  71. case X86_VENDOR_INTEL:
  72. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  73. return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  74. switch (boot_cpu_data.x86) {
  75. case 6:
  76. return msr - MSR_P6_EVNTSEL0;
  77. case 15:
  78. return msr - MSR_P4_BSU_ESCR0;
  79. }
  80. }
  81. return 0;
  82. }
  83. /* checks for a bit availability (hack for oprofile) */
  84. int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
  85. {
  86. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  87. return !test_bit(counter, perfctr_nmi_owner);
  88. }
  89. EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
  90. int reserve_perfctr_nmi(unsigned int msr)
  91. {
  92. unsigned int counter;
  93. counter = nmi_perfctr_msr_to_bit(msr);
  94. /* register not managed by the allocator? */
  95. if (counter > NMI_MAX_COUNTER_BITS)
  96. return 1;
  97. if (!test_and_set_bit(counter, perfctr_nmi_owner))
  98. return 1;
  99. return 0;
  100. }
  101. EXPORT_SYMBOL(reserve_perfctr_nmi);
  102. void release_perfctr_nmi(unsigned int msr)
  103. {
  104. unsigned int counter;
  105. counter = nmi_perfctr_msr_to_bit(msr);
  106. /* register not managed by the allocator? */
  107. if (counter > NMI_MAX_COUNTER_BITS)
  108. return;
  109. clear_bit(counter, perfctr_nmi_owner);
  110. }
  111. EXPORT_SYMBOL(release_perfctr_nmi);
  112. int reserve_evntsel_nmi(unsigned int msr)
  113. {
  114. unsigned int counter;
  115. counter = nmi_evntsel_msr_to_bit(msr);
  116. /* register not managed by the allocator? */
  117. if (counter > NMI_MAX_COUNTER_BITS)
  118. return 1;
  119. if (!test_and_set_bit(counter, evntsel_nmi_owner))
  120. return 1;
  121. return 0;
  122. }
  123. EXPORT_SYMBOL(reserve_evntsel_nmi);
  124. void release_evntsel_nmi(unsigned int msr)
  125. {
  126. unsigned int counter;
  127. counter = nmi_evntsel_msr_to_bit(msr);
  128. /* register not managed by the allocator? */
  129. if (counter > NMI_MAX_COUNTER_BITS)
  130. return;
  131. clear_bit(counter, evntsel_nmi_owner);
  132. }
  133. EXPORT_SYMBOL(release_evntsel_nmi);