therm_throt.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * Thermal throttle event support code (such as syslog messaging and rate
  3. * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
  4. *
  5. * This allows consistent reporting of CPU thermal throttle events.
  6. *
  7. * Maintains a counter in /sys that keeps track of the number of thermal
  8. * events, such that the user knows how bad the thermal problem might be
  9. * (since the logging to syslog and mcelog is rate limited).
  10. *
  11. * Author: Dmitriy Zavin (dmitriyz@google.com)
  12. *
  13. * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
  14. * Inspired by Ross Biro's and Al Borchers' counter code.
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/export.h>
  22. #include <linux/types.h>
  23. #include <linux/init.h>
  24. #include <linux/smp.h>
  25. #include <linux/cpu.h>
  26. #include <asm/processor.h>
  27. #include <asm/apic.h>
  28. #include <asm/idle.h>
  29. #include <asm/mce.h>
  30. #include <asm/msr.h>
  31. /* How long to wait between reporting thermal events */
  32. #define CHECK_INTERVAL (300 * HZ)
  33. #define THERMAL_THROTTLING_EVENT 0
  34. #define POWER_LIMIT_EVENT 1
  35. /*
  36. * Current thermal event state:
  37. */
  38. struct _thermal_state {
  39. bool new_event;
  40. int event;
  41. u64 next_check;
  42. unsigned long count;
  43. unsigned long last_count;
  44. };
  45. struct thermal_state {
  46. struct _thermal_state core_throttle;
  47. struct _thermal_state core_power_limit;
  48. struct _thermal_state package_throttle;
  49. struct _thermal_state package_power_limit;
  50. struct _thermal_state core_thresh0;
  51. struct _thermal_state core_thresh1;
  52. };
  53. /* Callback to handle core threshold interrupts */
  54. int (*platform_thermal_notify)(__u64 msr_val);
  55. EXPORT_SYMBOL(platform_thermal_notify);
  56. static DEFINE_PER_CPU(struct thermal_state, thermal_state);
  57. static atomic_t therm_throt_en = ATOMIC_INIT(0);
  58. static u32 lvtthmr_init __read_mostly;
  59. #ifdef CONFIG_SYSFS
  60. #define define_therm_throt_device_one_ro(_name) \
  61. static DEVICE_ATTR(_name, 0444, \
  62. therm_throt_device_show_##_name, \
  63. NULL) \
  64. #define define_therm_throt_device_show_func(event, name) \
  65. \
  66. static ssize_t therm_throt_device_show_##event##_##name( \
  67. struct device *dev, \
  68. struct device_attribute *attr, \
  69. char *buf) \
  70. { \
  71. unsigned int cpu = dev->id; \
  72. ssize_t ret; \
  73. \
  74. preempt_disable(); /* CPU hotplug */ \
  75. if (cpu_online(cpu)) { \
  76. ret = sprintf(buf, "%lu\n", \
  77. per_cpu(thermal_state, cpu).event.name); \
  78. } else \
  79. ret = 0; \
  80. preempt_enable(); \
  81. \
  82. return ret; \
  83. }
  84. define_therm_throt_device_show_func(core_throttle, count);
  85. define_therm_throt_device_one_ro(core_throttle_count);
  86. define_therm_throt_device_show_func(core_power_limit, count);
  87. define_therm_throt_device_one_ro(core_power_limit_count);
  88. define_therm_throt_device_show_func(package_throttle, count);
  89. define_therm_throt_device_one_ro(package_throttle_count);
  90. define_therm_throt_device_show_func(package_power_limit, count);
  91. define_therm_throt_device_one_ro(package_power_limit_count);
  92. static struct attribute *thermal_throttle_attrs[] = {
  93. &dev_attr_core_throttle_count.attr,
  94. NULL
  95. };
  96. static struct attribute_group thermal_attr_group = {
  97. .attrs = thermal_throttle_attrs,
  98. .name = "thermal_throttle"
  99. };
  100. #endif /* CONFIG_SYSFS */
  101. #define CORE_LEVEL 0
  102. #define PACKAGE_LEVEL 1
  103. /***
  104. * therm_throt_process - Process thermal throttling event from interrupt
  105. * @curr: Whether the condition is current or not (boolean), since the
  106. * thermal interrupt normally gets called both when the thermal
  107. * event begins and once the event has ended.
  108. *
  109. * This function is called by the thermal interrupt after the
  110. * IRQ has been acknowledged.
  111. *
  112. * It will take care of rate limiting and printing messages to the syslog.
  113. *
  114. * Returns: 0 : Event should NOT be further logged, i.e. still in
  115. * "timeout" from previous log message.
  116. * 1 : Event should be logged further, and a message has been
  117. * printed to the syslog.
  118. */
  119. static int therm_throt_process(bool new_event, int event, int level)
  120. {
  121. struct _thermal_state *state;
  122. unsigned int this_cpu = smp_processor_id();
  123. bool old_event;
  124. u64 now;
  125. struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
  126. now = get_jiffies_64();
  127. if (level == CORE_LEVEL) {
  128. if (event == THERMAL_THROTTLING_EVENT)
  129. state = &pstate->core_throttle;
  130. else if (event == POWER_LIMIT_EVENT)
  131. state = &pstate->core_power_limit;
  132. else
  133. return 0;
  134. } else if (level == PACKAGE_LEVEL) {
  135. if (event == THERMAL_THROTTLING_EVENT)
  136. state = &pstate->package_throttle;
  137. else if (event == POWER_LIMIT_EVENT)
  138. state = &pstate->package_power_limit;
  139. else
  140. return 0;
  141. } else
  142. return 0;
  143. old_event = state->new_event;
  144. state->new_event = new_event;
  145. if (new_event)
  146. state->count++;
  147. if (time_before64(now, state->next_check) &&
  148. state->count != state->last_count)
  149. return 0;
  150. state->next_check = now + CHECK_INTERVAL;
  151. state->last_count = state->count;
  152. /* if we just entered the thermal event */
  153. if (new_event) {
  154. if (event == THERMAL_THROTTLING_EVENT)
  155. printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
  156. this_cpu,
  157. level == CORE_LEVEL ? "Core" : "Package",
  158. state->count);
  159. else
  160. printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
  161. this_cpu,
  162. level == CORE_LEVEL ? "Core" : "Package",
  163. state->count);
  164. return 1;
  165. }
  166. if (old_event) {
  167. if (event == THERMAL_THROTTLING_EVENT)
  168. printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
  169. this_cpu,
  170. level == CORE_LEVEL ? "Core" : "Package");
  171. else
  172. printk(KERN_INFO "CPU%d: %s power limit normal\n",
  173. this_cpu,
  174. level == CORE_LEVEL ? "Core" : "Package");
  175. return 1;
  176. }
  177. return 0;
  178. }
  179. static int thresh_event_valid(int event)
  180. {
  181. struct _thermal_state *state;
  182. unsigned int this_cpu = smp_processor_id();
  183. struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
  184. u64 now = get_jiffies_64();
  185. state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1;
  186. if (time_before64(now, state->next_check))
  187. return 0;
  188. state->next_check = now + CHECK_INTERVAL;
  189. return 1;
  190. }
  191. #ifdef CONFIG_SYSFS
  192. /* Add/Remove thermal_throttle interface for CPU device: */
  193. static __cpuinit int thermal_throttle_add_dev(struct device *dev,
  194. unsigned int cpu)
  195. {
  196. int err;
  197. struct cpuinfo_x86 *c = &cpu_data(cpu);
  198. err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
  199. if (err)
  200. return err;
  201. if (cpu_has(c, X86_FEATURE_PLN))
  202. err = sysfs_add_file_to_group(&dev->kobj,
  203. &dev_attr_core_power_limit_count.attr,
  204. thermal_attr_group.name);
  205. if (cpu_has(c, X86_FEATURE_PTS)) {
  206. err = sysfs_add_file_to_group(&dev->kobj,
  207. &dev_attr_package_throttle_count.attr,
  208. thermal_attr_group.name);
  209. if (cpu_has(c, X86_FEATURE_PLN))
  210. err = sysfs_add_file_to_group(&dev->kobj,
  211. &dev_attr_package_power_limit_count.attr,
  212. thermal_attr_group.name);
  213. }
  214. return err;
  215. }
  216. static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
  217. {
  218. sysfs_remove_group(&dev->kobj, &thermal_attr_group);
  219. }
  220. /* Mutex protecting device creation against CPU hotplug: */
  221. static DEFINE_MUTEX(therm_cpu_lock);
  222. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  223. static __cpuinit int
  224. thermal_throttle_cpu_callback(struct notifier_block *nfb,
  225. unsigned long action,
  226. void *hcpu)
  227. {
  228. unsigned int cpu = (unsigned long)hcpu;
  229. struct device *dev;
  230. int err = 0;
  231. dev = get_cpu_device(cpu);
  232. switch (action) {
  233. case CPU_UP_PREPARE:
  234. case CPU_UP_PREPARE_FROZEN:
  235. mutex_lock(&therm_cpu_lock);
  236. err = thermal_throttle_add_dev(dev, cpu);
  237. mutex_unlock(&therm_cpu_lock);
  238. WARN_ON(err);
  239. break;
  240. case CPU_UP_CANCELED:
  241. case CPU_UP_CANCELED_FROZEN:
  242. case CPU_DEAD:
  243. case CPU_DEAD_FROZEN:
  244. mutex_lock(&therm_cpu_lock);
  245. thermal_throttle_remove_dev(dev);
  246. mutex_unlock(&therm_cpu_lock);
  247. break;
  248. }
  249. return notifier_from_errno(err);
  250. }
  251. static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
  252. {
  253. .notifier_call = thermal_throttle_cpu_callback,
  254. };
  255. static __init int thermal_throttle_init_device(void)
  256. {
  257. unsigned int cpu = 0;
  258. int err;
  259. if (!atomic_read(&therm_throt_en))
  260. return 0;
  261. register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
  262. #ifdef CONFIG_HOTPLUG_CPU
  263. mutex_lock(&therm_cpu_lock);
  264. #endif
  265. /* connect live CPUs to sysfs */
  266. for_each_online_cpu(cpu) {
  267. err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
  268. WARN_ON(err);
  269. }
  270. #ifdef CONFIG_HOTPLUG_CPU
  271. mutex_unlock(&therm_cpu_lock);
  272. #endif
  273. return 0;
  274. }
  275. device_initcall(thermal_throttle_init_device);
  276. #endif /* CONFIG_SYSFS */
  277. static void notify_thresholds(__u64 msr_val)
  278. {
  279. /* check whether the interrupt handler is defined;
  280. * otherwise simply return
  281. */
  282. if (!platform_thermal_notify)
  283. return;
  284. /* lower threshold reached */
  285. if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0))
  286. platform_thermal_notify(msr_val);
  287. /* higher threshold reached */
  288. if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1))
  289. platform_thermal_notify(msr_val);
  290. }
  291. /* Thermal transition interrupt handler */
  292. static void intel_thermal_interrupt(void)
  293. {
  294. __u64 msr_val;
  295. rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
  296. /* Check for violation of core thermal thresholds*/
  297. notify_thresholds(msr_val);
  298. if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
  299. THERMAL_THROTTLING_EVENT,
  300. CORE_LEVEL) != 0)
  301. mce_log_therm_throt_event(msr_val);
  302. if (this_cpu_has(X86_FEATURE_PLN))
  303. therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
  304. POWER_LIMIT_EVENT,
  305. CORE_LEVEL);
  306. if (this_cpu_has(X86_FEATURE_PTS)) {
  307. rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
  308. therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
  309. THERMAL_THROTTLING_EVENT,
  310. PACKAGE_LEVEL);
  311. if (this_cpu_has(X86_FEATURE_PLN))
  312. therm_throt_process(msr_val &
  313. PACKAGE_THERM_STATUS_POWER_LIMIT,
  314. POWER_LIMIT_EVENT,
  315. PACKAGE_LEVEL);
  316. }
  317. }
  318. static void unexpected_thermal_interrupt(void)
  319. {
  320. printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
  321. smp_processor_id());
  322. }
  323. static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
  324. asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
  325. {
  326. irq_enter();
  327. exit_idle();
  328. inc_irq_stat(irq_thermal_count);
  329. smp_thermal_vector();
  330. irq_exit();
  331. /* Ack only at the end to avoid potential reentry */
  332. ack_APIC_irq();
  333. }
  334. /* Thermal monitoring depends on APIC, ACPI and clock modulation */
  335. static int intel_thermal_supported(struct cpuinfo_x86 *c)
  336. {
  337. if (!cpu_has_apic)
  338. return 0;
  339. if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
  340. return 0;
  341. return 1;
  342. }
  343. void __init mcheck_intel_therm_init(void)
  344. {
  345. /*
  346. * This function is only called on boot CPU. Save the init thermal
  347. * LVT value on BSP and use that value to restore APs' thermal LVT
  348. * entry BIOS programmed later
  349. */
  350. if (intel_thermal_supported(&boot_cpu_data))
  351. lvtthmr_init = apic_read(APIC_LVTTHMR);
  352. }
  353. void intel_init_thermal(struct cpuinfo_x86 *c)
  354. {
  355. unsigned int cpu = smp_processor_id();
  356. int tm2 = 0;
  357. u32 l, h;
  358. if (!intel_thermal_supported(c))
  359. return;
  360. /*
  361. * First check if its enabled already, in which case there might
  362. * be some SMM goo which handles it, so we can't even put a handler
  363. * since it might be delivered via SMI already:
  364. */
  365. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  366. h = lvtthmr_init;
  367. /*
  368. * The initial value of thermal LVT entries on all APs always reads
  369. * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
  370. * sequence to them and LVT registers are reset to 0s except for
  371. * the mask bits which are set to 1s when APs receive INIT IPI.
  372. * If BIOS takes over the thermal interrupt and sets its interrupt
  373. * delivery mode to SMI (not fixed), it restores the value that the
  374. * BIOS has programmed on AP based on BSP's info we saved since BIOS
  375. * is always setting the same value for all threads/cores.
  376. */
  377. if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
  378. apic_write(APIC_LVTTHMR, lvtthmr_init);
  379. if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
  380. printk(KERN_DEBUG
  381. "CPU%d: Thermal monitoring handled by SMI\n", cpu);
  382. return;
  383. }
  384. /* Check whether a vector already exists */
  385. if (h & APIC_VECTOR_MASK) {
  386. printk(KERN_DEBUG
  387. "CPU%d: Thermal LVT vector (%#x) already installed\n",
  388. cpu, (h & APIC_VECTOR_MASK));
  389. return;
  390. }
  391. /* early Pentium M models use different method for enabling TM2 */
  392. if (cpu_has(c, X86_FEATURE_TM2)) {
  393. if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
  394. rdmsr(MSR_THERM2_CTL, l, h);
  395. if (l & MSR_THERM2_CTL_TM_SELECT)
  396. tm2 = 1;
  397. } else if (l & MSR_IA32_MISC_ENABLE_TM2)
  398. tm2 = 1;
  399. }
  400. /* We'll mask the thermal vector in the lapic till we're ready: */
  401. h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
  402. apic_write(APIC_LVTTHMR, h);
  403. rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
  404. if (cpu_has(c, X86_FEATURE_PLN))
  405. wrmsr(MSR_IA32_THERM_INTERRUPT,
  406. l | (THERM_INT_LOW_ENABLE
  407. | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
  408. else
  409. wrmsr(MSR_IA32_THERM_INTERRUPT,
  410. l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
  411. if (cpu_has(c, X86_FEATURE_PTS)) {
  412. rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
  413. if (cpu_has(c, X86_FEATURE_PLN))
  414. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  415. l | (PACKAGE_THERM_INT_LOW_ENABLE
  416. | PACKAGE_THERM_INT_HIGH_ENABLE
  417. | PACKAGE_THERM_INT_PLN_ENABLE), h);
  418. else
  419. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  420. l | (PACKAGE_THERM_INT_LOW_ENABLE
  421. | PACKAGE_THERM_INT_HIGH_ENABLE), h);
  422. }
  423. smp_thermal_vector = intel_thermal_interrupt;
  424. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  425. wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
  426. /* Unmask the thermal vector: */
  427. l = apic_read(APIC_LVTTHMR);
  428. apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
  429. printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
  430. tm2 ? "TM2" : "TM1");
  431. /* enable thermal throttle processing */
  432. atomic_set(&therm_throt_en, 1);
  433. }