perfctr-watchdog.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * local apic based NMI watchdog for various CPUs.
  3. *
  4. * This file also handles reservation of performance counters for coordination
  5. * with other users (like oprofile).
  6. *
  7. * Note that these events normally don't tick when the CPU idles. This means
  8. * the frequency varies with CPU load.
  9. *
  10. * Original code for K7/P6 written by Keith Owens
  11. *
  12. */
  13. #include <linux/percpu.h>
  14. #include <linux/export.h>
  15. #include <linux/kernel.h>
  16. #include <linux/bitops.h>
  17. #include <linux/smp.h>
  18. #include <asm/nmi.h>
  19. #include <linux/kprobes.h>
  20. #include <asm/apic.h>
  21. #include <asm/perf_event.h>
  22. /*
  23. * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  24. * offset from MSR_P4_BSU_ESCR0.
  25. *
  26. * It will be the max for all platforms (for now)
  27. */
  28. #define NMI_MAX_COUNTER_BITS 66
  29. /*
  30. * perfctr_nmi_owner tracks the ownership of the perfctr registers:
  31. * evtsel_nmi_owner tracks the ownership of the event selection
  32. * - different performance counters/ event selection may be reserved for
  33. * different subsystems this reservation system just tries to coordinate
  34. * things a little
  35. */
  36. static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
  37. static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
  38. /* converts an msr to an appropriate reservation bit */
  39. static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  40. {
  41. /* returns the bit offset of the performance counter register */
  42. switch (boot_cpu_data.x86_vendor) {
  43. case X86_VENDOR_AMD:
  44. if (msr >= MSR_F15H_PERF_CTR)
  45. return (msr - MSR_F15H_PERF_CTR) >> 1;
  46. return msr - MSR_K7_PERFCTR0;
  47. case X86_VENDOR_INTEL:
  48. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  49. return msr - MSR_ARCH_PERFMON_PERFCTR0;
  50. switch (boot_cpu_data.x86) {
  51. case 6:
  52. return msr - MSR_P6_PERFCTR0;
  53. case 11:
  54. return msr - MSR_KNC_PERFCTR0;
  55. case 15:
  56. return msr - MSR_P4_BPU_PERFCTR0;
  57. }
  58. }
  59. return 0;
  60. }
  61. /*
  62. * converts an msr to an appropriate reservation bit
  63. * returns the bit offset of the event selection register
  64. */
  65. static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  66. {
  67. /* returns the bit offset of the event selection register */
  68. switch (boot_cpu_data.x86_vendor) {
  69. case X86_VENDOR_AMD:
  70. if (msr >= MSR_F15H_PERF_CTL)
  71. return (msr - MSR_F15H_PERF_CTL) >> 1;
  72. return msr - MSR_K7_EVNTSEL0;
  73. case X86_VENDOR_INTEL:
  74. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  75. return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  76. switch (boot_cpu_data.x86) {
  77. case 6:
  78. return msr - MSR_P6_EVNTSEL0;
  79. case 11:
  80. return msr - MSR_KNC_EVNTSEL0;
  81. case 15:
  82. return msr - MSR_P4_BSU_ESCR0;
  83. }
  84. }
  85. return 0;
  86. }
  87. /* checks for a bit availability (hack for oprofile) */
  88. int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
  89. {
  90. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  91. return !test_bit(counter, perfctr_nmi_owner);
  92. }
  93. EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
  94. int reserve_perfctr_nmi(unsigned int msr)
  95. {
  96. unsigned int counter;
  97. counter = nmi_perfctr_msr_to_bit(msr);
  98. /* register not managed by the allocator? */
  99. if (counter > NMI_MAX_COUNTER_BITS)
  100. return 1;
  101. if (!test_and_set_bit(counter, perfctr_nmi_owner))
  102. return 1;
  103. return 0;
  104. }
  105. EXPORT_SYMBOL(reserve_perfctr_nmi);
  106. void release_perfctr_nmi(unsigned int msr)
  107. {
  108. unsigned int counter;
  109. counter = nmi_perfctr_msr_to_bit(msr);
  110. /* register not managed by the allocator? */
  111. if (counter > NMI_MAX_COUNTER_BITS)
  112. return;
  113. clear_bit(counter, perfctr_nmi_owner);
  114. }
  115. EXPORT_SYMBOL(release_perfctr_nmi);
  116. int reserve_evntsel_nmi(unsigned int msr)
  117. {
  118. unsigned int counter;
  119. counter = nmi_evntsel_msr_to_bit(msr);
  120. /* register not managed by the allocator? */
  121. if (counter > NMI_MAX_COUNTER_BITS)
  122. return 1;
  123. if (!test_and_set_bit(counter, evntsel_nmi_owner))
  124. return 1;
  125. return 0;
  126. }
  127. EXPORT_SYMBOL(reserve_evntsel_nmi);
  128. void release_evntsel_nmi(unsigned int msr)
  129. {
  130. unsigned int counter;
  131. counter = nmi_evntsel_msr_to_bit(msr);
  132. /* register not managed by the allocator? */
  133. if (counter > NMI_MAX_COUNTER_BITS)
  134. return;
  135. clear_bit(counter, evntsel_nmi_owner);
  136. }
  137. EXPORT_SYMBOL(release_evntsel_nmi);