intel_ips.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767
  1. /*
  2. * Copyright (c) 2009-2010 Intel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * The full GNU General Public License is included in this distribution in
  18. * the file called "COPYING".
  19. *
  20. * Authors:
  21. * Jesse Barnes <jbarnes@virtuousgeek.org>
  22. */
  23. /*
  24. * Some Intel Ibex Peak based platforms support so-called "intelligent
  25. * power sharing", which allows the CPU and GPU to cooperate to maximize
  26. * performance within a given TDP (thermal design point). This driver
  27. * performs the coordination between the CPU and GPU, monitors thermal and
  28. * power statistics in the platform, and initializes power monitoring
  29. * hardware. It also provides a few tunables to control behavior. Its
  30. * primary purpose is to safely allow CPU and GPU turbo modes to be enabled
  31. * by tracking power and thermal budget; secondarily it can boost turbo
  32. * performance by allocating more power or thermal budget to the CPU or GPU
  33. * based on available headroom and activity.
  34. *
  35. * The basic algorithm is driven by a 5s moving average of tempurature. If
  36. * thermal headroom is available, the CPU and/or GPU power clamps may be
  37. * adjusted upwards. If we hit the thermal ceiling or a thermal trigger,
  38. * we scale back the clamp. Aside from trigger events (when we're critically
  39. * close or over our TDP) we don't adjust the clamps more than once every
  40. * five seconds.
  41. *
  42. * The thermal device (device 31, function 6) has a set of registers that
  43. * are updated by the ME firmware. The ME should also take the clamp values
  44. * written to those registers and write them to the CPU, but we currently
  45. * bypass that functionality and write the CPU MSR directly.
  46. *
  47. * UNSUPPORTED:
  48. * - dual MCP configs
  49. *
  50. * TODO:
  51. * - handle CPU hotplug
  52. * - provide turbo enable/disable api
  53. *
  54. * Related documents:
  55. * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
  56. * - CDI 401376 - Ibex Peak EDS
  57. * - ref 26037, 26641 - IPS BIOS spec
  58. * - ref 26489 - Nehalem BIOS writer's guide
  59. * - ref 26921 - Ibex Peak BIOS Specification
  60. */
  61. #include <linux/debugfs.h>
  62. #include <linux/delay.h>
  63. #include <linux/interrupt.h>
  64. #include <linux/kernel.h>
  65. #include <linux/kthread.h>
  66. #include <linux/module.h>
  67. #include <linux/pci.h>
  68. #include <linux/sched.h>
  69. #include <linux/seq_file.h>
  70. #include <linux/string.h>
  71. #include <linux/tick.h>
  72. #include <linux/timer.h>
  73. #include <linux/dmi.h>
  74. #include <drm/i915_drm.h>
  75. #include <asm/msr.h>
  76. #include <asm/processor.h>
  77. #include "intel_ips.h"
  78. #include <asm-generic/io-64-nonatomic-lo-hi.h>
  79. #define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
  80. /*
  81. * Package level MSRs for monitor/control
  82. */
  83. #define PLATFORM_INFO 0xce
  84. #define PLATFORM_TDP (1<<29)
  85. #define PLATFORM_RATIO (1<<28)
  86. #define IA32_MISC_ENABLE 0x1a0
  87. #define IA32_MISC_TURBO_EN (1ULL<<38)
  88. #define TURBO_POWER_CURRENT_LIMIT 0x1ac
  89. #define TURBO_TDC_OVR_EN (1UL<<31)
  90. #define TURBO_TDC_MASK (0x000000007fff0000UL)
  91. #define TURBO_TDC_SHIFT (16)
  92. #define TURBO_TDP_OVR_EN (1UL<<15)
  93. #define TURBO_TDP_MASK (0x0000000000003fffUL)
  94. /*
  95. * Core/thread MSRs for monitoring
  96. */
  97. #define IA32_PERF_CTL 0x199
  98. #define IA32_PERF_TURBO_DIS (1ULL<<32)
  99. /*
  100. * Thermal PCI device regs
  101. */
  102. #define THM_CFG_TBAR 0x10
  103. #define THM_CFG_TBAR_HI 0x14
  104. #define THM_TSIU 0x00
  105. #define THM_TSE 0x01
  106. #define TSE_EN 0xb8
  107. #define THM_TSS 0x02
  108. #define THM_TSTR 0x03
  109. #define THM_TSTTP 0x04
  110. #define THM_TSCO 0x08
  111. #define THM_TSES 0x0c
  112. #define THM_TSGPEN 0x0d
  113. #define TSGPEN_HOT_LOHI (1<<1)
  114. #define TSGPEN_CRIT_LOHI (1<<2)
  115. #define THM_TSPC 0x0e
  116. #define THM_PPEC 0x10
  117. #define THM_CTA 0x12
  118. #define THM_PTA 0x14
  119. #define PTA_SLOPE_MASK (0xff00)
  120. #define PTA_SLOPE_SHIFT 8
  121. #define PTA_OFFSET_MASK (0x00ff)
  122. #define THM_MGTA 0x16
  123. #define MGTA_SLOPE_MASK (0xff00)
  124. #define MGTA_SLOPE_SHIFT 8
  125. #define MGTA_OFFSET_MASK (0x00ff)
  126. #define THM_TRC 0x1a
  127. #define TRC_CORE2_EN (1<<15)
  128. #define TRC_THM_EN (1<<12)
  129. #define TRC_C6_WAR (1<<8)
  130. #define TRC_CORE1_EN (1<<7)
  131. #define TRC_CORE_PWR (1<<6)
  132. #define TRC_PCH_EN (1<<5)
  133. #define TRC_MCH_EN (1<<4)
  134. #define TRC_DIMM4 (1<<3)
  135. #define TRC_DIMM3 (1<<2)
  136. #define TRC_DIMM2 (1<<1)
  137. #define TRC_DIMM1 (1<<0)
  138. #define THM_TES 0x20
  139. #define THM_TEN 0x21
  140. #define TEN_UPDATE_EN 1
  141. #define THM_PSC 0x24
  142. #define PSC_NTG (1<<0) /* No GFX turbo support */
  143. #define PSC_NTPC (1<<1) /* No CPU turbo support */
  144. #define PSC_PP_DEF (0<<2) /* Perf policy up to driver */
  145. #define PSP_PP_PC (1<<2) /* BIOS prefers CPU perf */
  146. #define PSP_PP_BAL (2<<2) /* BIOS wants balanced perf */
  147. #define PSP_PP_GFX (3<<2) /* BIOS prefers GFX perf */
  148. #define PSP_PBRT (1<<4) /* BIOS run time support */
  149. #define THM_CTV1 0x30
  150. #define CTV_TEMP_ERROR (1<<15)
  151. #define CTV_TEMP_MASK 0x3f
  152. #define CTV_
  153. #define THM_CTV2 0x32
  154. #define THM_CEC 0x34 /* undocumented power accumulator in joules */
  155. #define THM_AE 0x3f
  156. #define THM_HTS 0x50 /* 32 bits */
  157. #define HTS_PCPL_MASK (0x7fe00000)
  158. #define HTS_PCPL_SHIFT 21
  159. #define HTS_GPL_MASK (0x001ff000)
  160. #define HTS_GPL_SHIFT 12
  161. #define HTS_PP_MASK (0x00000c00)
  162. #define HTS_PP_SHIFT 10
  163. #define HTS_PP_DEF 0
  164. #define HTS_PP_PROC 1
  165. #define HTS_PP_BAL 2
  166. #define HTS_PP_GFX 3
  167. #define HTS_PCTD_DIS (1<<9)
  168. #define HTS_GTD_DIS (1<<8)
  169. #define HTS_PTL_MASK (0x000000fe)
  170. #define HTS_PTL_SHIFT 1
  171. #define HTS_NVV (1<<0)
  172. #define THM_HTSHI 0x54 /* 16 bits */
  173. #define HTS2_PPL_MASK (0x03ff)
  174. #define HTS2_PRST_MASK (0x3c00)
  175. #define HTS2_PRST_SHIFT 10
  176. #define HTS2_PRST_UNLOADED 0
  177. #define HTS2_PRST_RUNNING 1
  178. #define HTS2_PRST_TDISOP 2 /* turbo disabled due to power */
  179. #define HTS2_PRST_TDISHT 3 /* turbo disabled due to high temp */
  180. #define HTS2_PRST_TDISUSR 4 /* user disabled turbo */
  181. #define HTS2_PRST_TDISPLAT 5 /* platform disabled turbo */
  182. #define HTS2_PRST_TDISPM 6 /* power management disabled turbo */
  183. #define HTS2_PRST_TDISERR 7 /* some kind of error disabled turbo */
  184. #define THM_PTL 0x56
  185. #define THM_MGTV 0x58
  186. #define TV_MASK 0x000000000000ff00
  187. #define TV_SHIFT 8
  188. #define THM_PTV 0x60
  189. #define PTV_MASK 0x00ff
  190. #define THM_MMGPC 0x64
  191. #define THM_MPPC 0x66
  192. #define THM_MPCPC 0x68
  193. #define THM_TSPIEN 0x82
  194. #define TSPIEN_AUX_LOHI (1<<0)
  195. #define TSPIEN_HOT_LOHI (1<<1)
  196. #define TSPIEN_CRIT_LOHI (1<<2)
  197. #define TSPIEN_AUX2_LOHI (1<<3)
  198. #define THM_TSLOCK 0x83
  199. #define THM_ATR 0x84
  200. #define THM_TOF 0x87
  201. #define THM_STS 0x98
  202. #define STS_PCPL_MASK (0x7fe00000)
  203. #define STS_PCPL_SHIFT 21
  204. #define STS_GPL_MASK (0x001ff000)
  205. #define STS_GPL_SHIFT 12
  206. #define STS_PP_MASK (0x00000c00)
  207. #define STS_PP_SHIFT 10
  208. #define STS_PP_DEF 0
  209. #define STS_PP_PROC 1
  210. #define STS_PP_BAL 2
  211. #define STS_PP_GFX 3
  212. #define STS_PCTD_DIS (1<<9)
  213. #define STS_GTD_DIS (1<<8)
  214. #define STS_PTL_MASK (0x000000fe)
  215. #define STS_PTL_SHIFT 1
  216. #define STS_NVV (1<<0)
  217. #define THM_SEC 0x9c
  218. #define SEC_ACK (1<<0)
  219. #define THM_TC3 0xa4
  220. #define THM_TC1 0xa8
  221. #define STS_PPL_MASK (0x0003ff00)
  222. #define STS_PPL_SHIFT 16
  223. #define THM_TC2 0xac
  224. #define THM_DTV 0xb0
  225. #define THM_ITV 0xd8
  226. #define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
  227. #define ITV_ME_SEQNO_SHIFT (16)
  228. #define ITV_MCH_TEMP_MASK 0x0000ff00
  229. #define ITV_MCH_TEMP_SHIFT (8)
  230. #define ITV_PCH_TEMP_MASK 0x000000ff
  231. #define thm_readb(off) readb(ips->regmap + (off))
  232. #define thm_readw(off) readw(ips->regmap + (off))
  233. #define thm_readl(off) readl(ips->regmap + (off))
  234. #define thm_readq(off) readq(ips->regmap + (off))
  235. #define thm_writeb(off, val) writeb((val), ips->regmap + (off))
  236. #define thm_writew(off, val) writew((val), ips->regmap + (off))
  237. #define thm_writel(off, val) writel((val), ips->regmap + (off))
  238. static const int IPS_ADJUST_PERIOD = 5000; /* ms */
  239. static bool late_i915_load = false;
  240. /* For initial average collection */
  241. static const int IPS_SAMPLE_PERIOD = 200; /* ms */
  242. static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */
  243. #define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD)
  244. /* Per-SKU limits */
  245. struct ips_mcp_limits {
  246. int cpu_family;
  247. int cpu_model; /* includes extended model... */
  248. int mcp_power_limit; /* mW units */
  249. int core_power_limit;
  250. int mch_power_limit;
  251. int core_temp_limit; /* degrees C */
  252. int mch_temp_limit;
  253. };
  254. /* Max temps are -10 degrees C to avoid PROCHOT# */
  255. struct ips_mcp_limits ips_sv_limits = {
  256. .mcp_power_limit = 35000,
  257. .core_power_limit = 29000,
  258. .mch_power_limit = 20000,
  259. .core_temp_limit = 95,
  260. .mch_temp_limit = 90
  261. };
  262. struct ips_mcp_limits ips_lv_limits = {
  263. .mcp_power_limit = 25000,
  264. .core_power_limit = 21000,
  265. .mch_power_limit = 13000,
  266. .core_temp_limit = 95,
  267. .mch_temp_limit = 90
  268. };
  269. struct ips_mcp_limits ips_ulv_limits = {
  270. .mcp_power_limit = 18000,
  271. .core_power_limit = 14000,
  272. .mch_power_limit = 11000,
  273. .core_temp_limit = 95,
  274. .mch_temp_limit = 90
  275. };
  276. struct ips_driver {
  277. struct pci_dev *dev;
  278. void *regmap;
  279. struct task_struct *monitor;
  280. struct task_struct *adjust;
  281. struct dentry *debug_root;
  282. /* Average CPU core temps (all averages in .01 degrees C for precision) */
  283. u16 ctv1_avg_temp;
  284. u16 ctv2_avg_temp;
  285. /* GMCH average */
  286. u16 mch_avg_temp;
  287. /* Average for the CPU (both cores?) */
  288. u16 mcp_avg_temp;
  289. /* Average power consumption (in mW) */
  290. u32 cpu_avg_power;
  291. u32 mch_avg_power;
  292. /* Offset values */
  293. u16 cta_val;
  294. u16 pta_val;
  295. u16 mgta_val;
  296. /* Maximums & prefs, protected by turbo status lock */
  297. spinlock_t turbo_status_lock;
  298. u16 mcp_temp_limit;
  299. u16 mcp_power_limit;
  300. u16 core_power_limit;
  301. u16 mch_power_limit;
  302. bool cpu_turbo_enabled;
  303. bool __cpu_turbo_on;
  304. bool gpu_turbo_enabled;
  305. bool __gpu_turbo_on;
  306. bool gpu_preferred;
  307. bool poll_turbo_status;
  308. bool second_cpu;
  309. bool turbo_toggle_allowed;
  310. struct ips_mcp_limits *limits;
  311. /* Optional MCH interfaces for if i915 is in use */
  312. unsigned long (*read_mch_val)(void);
  313. bool (*gpu_raise)(void);
  314. bool (*gpu_lower)(void);
  315. bool (*gpu_busy)(void);
  316. bool (*gpu_turbo_disable)(void);
  317. /* For restoration at unload */
  318. u64 orig_turbo_limit;
  319. u64 orig_turbo_ratios;
  320. };
  321. static bool
  322. ips_gpu_turbo_enabled(struct ips_driver *ips);
  323. /**
  324. * ips_cpu_busy - is CPU busy?
  325. * @ips: IPS driver struct
  326. *
  327. * Check CPU for load to see whether we should increase its thermal budget.
  328. *
  329. * RETURNS:
  330. * True if the CPU could use more power, false otherwise.
  331. */
  332. static bool ips_cpu_busy(struct ips_driver *ips)
  333. {
  334. if ((avenrun[0] >> FSHIFT) > 1)
  335. return true;
  336. return false;
  337. }
  338. /**
  339. * ips_cpu_raise - raise CPU power clamp
  340. * @ips: IPS driver struct
  341. *
  342. * Raise the CPU power clamp by %IPS_CPU_STEP, in accordance with TDP for
  343. * this platform.
  344. *
  345. * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR upwards (as
  346. * long as we haven't hit the TDP limit for the SKU).
  347. */
  348. static void ips_cpu_raise(struct ips_driver *ips)
  349. {
  350. u64 turbo_override;
  351. u16 cur_tdp_limit, new_tdp_limit;
  352. if (!ips->cpu_turbo_enabled)
  353. return;
  354. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  355. cur_tdp_limit = turbo_override & TURBO_TDP_MASK;
  356. new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */
  357. /* Clamp to SKU TDP limit */
  358. if (((new_tdp_limit * 10) / 8) > ips->core_power_limit)
  359. new_tdp_limit = cur_tdp_limit;
  360. thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
  361. turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
  362. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  363. turbo_override &= ~TURBO_TDP_MASK;
  364. turbo_override |= new_tdp_limit;
  365. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  366. }
  367. /**
  368. * ips_cpu_lower - lower CPU power clamp
  369. * @ips: IPS driver struct
  370. *
  371. * Lower CPU power clamp b %IPS_CPU_STEP if possible.
  372. *
  373. * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR down, going
  374. * as low as the platform limits will allow (though we could go lower there
  375. * wouldn't be much point).
  376. */
  377. static void ips_cpu_lower(struct ips_driver *ips)
  378. {
  379. u64 turbo_override;
  380. u16 cur_limit, new_limit;
  381. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  382. cur_limit = turbo_override & TURBO_TDP_MASK;
  383. new_limit = cur_limit - 8; /* 1W decrease */
  384. /* Clamp to SKU TDP limit */
  385. if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
  386. new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
  387. thm_writew(THM_MPCPC, (new_limit * 10) / 8);
  388. turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
  389. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  390. turbo_override &= ~TURBO_TDP_MASK;
  391. turbo_override |= new_limit;
  392. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  393. }
  394. /**
  395. * do_enable_cpu_turbo - internal turbo enable function
  396. * @data: unused
  397. *
  398. * Internal function for actually updating MSRs. When we enable/disable
  399. * turbo, we need to do it on each CPU; this function is the one called
  400. * by on_each_cpu() when needed.
  401. */
  402. static void do_enable_cpu_turbo(void *data)
  403. {
  404. u64 perf_ctl;
  405. rdmsrl(IA32_PERF_CTL, perf_ctl);
  406. if (perf_ctl & IA32_PERF_TURBO_DIS) {
  407. perf_ctl &= ~IA32_PERF_TURBO_DIS;
  408. wrmsrl(IA32_PERF_CTL, perf_ctl);
  409. }
  410. }
  411. /**
  412. * ips_enable_cpu_turbo - enable turbo mode on all CPUs
  413. * @ips: IPS driver struct
  414. *
  415. * Enable turbo mode by clearing the disable bit in IA32_PERF_CTL on
  416. * all logical threads.
  417. */
  418. static void ips_enable_cpu_turbo(struct ips_driver *ips)
  419. {
  420. /* Already on, no need to mess with MSRs */
  421. if (ips->__cpu_turbo_on)
  422. return;
  423. if (ips->turbo_toggle_allowed)
  424. on_each_cpu(do_enable_cpu_turbo, ips, 1);
  425. ips->__cpu_turbo_on = true;
  426. }
  427. /**
  428. * do_disable_cpu_turbo - internal turbo disable function
  429. * @data: unused
  430. *
  431. * Internal function for actually updating MSRs. When we enable/disable
  432. * turbo, we need to do it on each CPU; this function is the one called
  433. * by on_each_cpu() when needed.
  434. */
  435. static void do_disable_cpu_turbo(void *data)
  436. {
  437. u64 perf_ctl;
  438. rdmsrl(IA32_PERF_CTL, perf_ctl);
  439. if (!(perf_ctl & IA32_PERF_TURBO_DIS)) {
  440. perf_ctl |= IA32_PERF_TURBO_DIS;
  441. wrmsrl(IA32_PERF_CTL, perf_ctl);
  442. }
  443. }
  444. /**
  445. * ips_disable_cpu_turbo - disable turbo mode on all CPUs
  446. * @ips: IPS driver struct
  447. *
  448. * Disable turbo mode by setting the disable bit in IA32_PERF_CTL on
  449. * all logical threads.
  450. */
  451. static void ips_disable_cpu_turbo(struct ips_driver *ips)
  452. {
  453. /* Already off, leave it */
  454. if (!ips->__cpu_turbo_on)
  455. return;
  456. if (ips->turbo_toggle_allowed)
  457. on_each_cpu(do_disable_cpu_turbo, ips, 1);
  458. ips->__cpu_turbo_on = false;
  459. }
  460. /**
  461. * ips_gpu_busy - is GPU busy?
  462. * @ips: IPS driver struct
  463. *
  464. * Check GPU for load to see whether we should increase its thermal budget.
  465. * We need to call into the i915 driver in this case.
  466. *
  467. * RETURNS:
  468. * True if the GPU could use more power, false otherwise.
  469. */
  470. static bool ips_gpu_busy(struct ips_driver *ips)
  471. {
  472. if (!ips_gpu_turbo_enabled(ips))
  473. return false;
  474. return ips->gpu_busy();
  475. }
  476. /**
  477. * ips_gpu_raise - raise GPU power clamp
  478. * @ips: IPS driver struct
  479. *
  480. * Raise the GPU frequency/power if possible. We need to call into the
  481. * i915 driver in this case.
  482. */
  483. static void ips_gpu_raise(struct ips_driver *ips)
  484. {
  485. if (!ips_gpu_turbo_enabled(ips))
  486. return;
  487. if (!ips->gpu_raise())
  488. ips->gpu_turbo_enabled = false;
  489. return;
  490. }
  491. /**
  492. * ips_gpu_lower - lower GPU power clamp
  493. * @ips: IPS driver struct
  494. *
  495. * Lower GPU frequency/power if possible. Need to call i915.
  496. */
  497. static void ips_gpu_lower(struct ips_driver *ips)
  498. {
  499. if (!ips_gpu_turbo_enabled(ips))
  500. return;
  501. if (!ips->gpu_lower())
  502. ips->gpu_turbo_enabled = false;
  503. return;
  504. }
  505. /**
  506. * ips_enable_gpu_turbo - notify the gfx driver turbo is available
  507. * @ips: IPS driver struct
  508. *
  509. * Call into the graphics driver indicating that it can safely use
  510. * turbo mode.
  511. */
  512. static void ips_enable_gpu_turbo(struct ips_driver *ips)
  513. {
  514. if (ips->__gpu_turbo_on)
  515. return;
  516. ips->__gpu_turbo_on = true;
  517. }
  518. /**
  519. * ips_disable_gpu_turbo - notify the gfx driver to disable turbo mode
  520. * @ips: IPS driver struct
  521. *
  522. * Request that the graphics driver disable turbo mode.
  523. */
  524. static void ips_disable_gpu_turbo(struct ips_driver *ips)
  525. {
  526. /* Avoid calling i915 if turbo is already disabled */
  527. if (!ips->__gpu_turbo_on)
  528. return;
  529. if (!ips->gpu_turbo_disable())
  530. dev_err(&ips->dev->dev, "failed to disable graphis turbo\n");
  531. else
  532. ips->__gpu_turbo_on = false;
  533. }
  534. /**
  535. * mcp_exceeded - check whether we're outside our thermal & power limits
  536. * @ips: IPS driver struct
  537. *
  538. * Check whether the MCP is over its thermal or power budget.
  539. */
  540. static bool mcp_exceeded(struct ips_driver *ips)
  541. {
  542. unsigned long flags;
  543. bool ret = false;
  544. u32 temp_limit;
  545. u32 avg_power;
  546. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  547. temp_limit = ips->mcp_temp_limit * 100;
  548. if (ips->mcp_avg_temp > temp_limit)
  549. ret = true;
  550. avg_power = ips->cpu_avg_power + ips->mch_avg_power;
  551. if (avg_power > ips->mcp_power_limit)
  552. ret = true;
  553. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  554. return ret;
  555. }
  556. /**
  557. * cpu_exceeded - check whether a CPU core is outside its limits
  558. * @ips: IPS driver struct
  559. * @cpu: CPU number to check
  560. *
  561. * Check a given CPU's average temp or power is over its limit.
  562. */
  563. static bool cpu_exceeded(struct ips_driver *ips, int cpu)
  564. {
  565. unsigned long flags;
  566. int avg;
  567. bool ret = false;
  568. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  569. avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp;
  570. if (avg > (ips->limits->core_temp_limit * 100))
  571. ret = true;
  572. if (ips->cpu_avg_power > ips->core_power_limit * 100)
  573. ret = true;
  574. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  575. if (ret)
  576. dev_info(&ips->dev->dev,
  577. "CPU power or thermal limit exceeded\n");
  578. return ret;
  579. }
  580. /**
  581. * mch_exceeded - check whether the GPU is over budget
  582. * @ips: IPS driver struct
  583. *
  584. * Check the MCH temp & power against their maximums.
  585. */
  586. static bool mch_exceeded(struct ips_driver *ips)
  587. {
  588. unsigned long flags;
  589. bool ret = false;
  590. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  591. if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100))
  592. ret = true;
  593. if (ips->mch_avg_power > ips->mch_power_limit)
  594. ret = true;
  595. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  596. return ret;
  597. }
  598. /**
  599. * verify_limits - verify BIOS provided limits
  600. * @ips: IPS structure
  601. *
  602. * BIOS can optionally provide non-default limits for power and temp. Check
  603. * them here and use the defaults if the BIOS values are not provided or
  604. * are otherwise unusable.
  605. */
  606. static void verify_limits(struct ips_driver *ips)
  607. {
  608. if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
  609. ips->mcp_power_limit > 35000)
  610. ips->mcp_power_limit = ips->limits->mcp_power_limit;
  611. if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
  612. ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
  613. ips->mcp_temp_limit > 150)
  614. ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
  615. ips->limits->mch_temp_limit);
  616. }
  617. /**
  618. * update_turbo_limits - get various limits & settings from regs
  619. * @ips: IPS driver struct
  620. *
  621. * Update the IPS power & temp limits, along with turbo enable flags,
  622. * based on latest register contents.
  623. *
  624. * Used at init time and for runtime BIOS support, which requires polling
  625. * the regs for updates (as a result of AC->DC transition for example).
  626. *
  627. * LOCKING:
  628. * Caller must hold turbo_status_lock (outside of init)
  629. */
  630. static void update_turbo_limits(struct ips_driver *ips)
  631. {
  632. u32 hts = thm_readl(THM_HTS);
  633. ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
  634. /*
  635. * Disable turbo for now, until we can figure out why the power figures
  636. * are wrong
  637. */
  638. ips->cpu_turbo_enabled = false;
  639. if (ips->gpu_busy)
  640. ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
  641. ips->core_power_limit = thm_readw(THM_MPCPC);
  642. ips->mch_power_limit = thm_readw(THM_MMGPC);
  643. ips->mcp_temp_limit = thm_readw(THM_PTL);
  644. ips->mcp_power_limit = thm_readw(THM_MPPC);
  645. verify_limits(ips);
  646. /* Ignore BIOS CPU vs GPU pref */
  647. }
  648. /**
  649. * ips_adjust - adjust power clamp based on thermal state
  650. * @data: ips driver structure
  651. *
  652. * Wake up every 5s or so and check whether we should adjust the power clamp.
  653. * Check CPU and GPU load to determine which needs adjustment. There are
  654. * several things to consider here:
  655. * - do we need to adjust up or down?
  656. * - is CPU busy?
  657. * - is GPU busy?
  658. * - is CPU in turbo?
  659. * - is GPU in turbo?
  660. * - is CPU or GPU preferred? (CPU is default)
  661. *
  662. * So, given the above, we do the following:
  663. * - up (TDP available)
  664. * - CPU not busy, GPU not busy - nothing
  665. * - CPU busy, GPU not busy - adjust CPU up
  666. * - CPU not busy, GPU busy - adjust GPU up
  667. * - CPU busy, GPU busy - adjust preferred unit up, taking headroom from
  668. * non-preferred unit if necessary
  669. * - down (at TDP limit)
  670. * - adjust both CPU and GPU down if possible
  671. *
  672. cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
  673. cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing
  674. cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
  675. cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
  676. cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
  677. *
  678. */
  679. static int ips_adjust(void *data)
  680. {
  681. struct ips_driver *ips = data;
  682. unsigned long flags;
  683. dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n");
  684. /*
  685. * Adjust CPU and GPU clamps every 5s if needed. Doing it more
  686. * often isn't recommended due to ME interaction.
  687. */
  688. do {
  689. bool cpu_busy = ips_cpu_busy(ips);
  690. bool gpu_busy = ips_gpu_busy(ips);
  691. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  692. if (ips->poll_turbo_status)
  693. update_turbo_limits(ips);
  694. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  695. /* Update turbo status if necessary */
  696. if (ips->cpu_turbo_enabled)
  697. ips_enable_cpu_turbo(ips);
  698. else
  699. ips_disable_cpu_turbo(ips);
  700. if (ips->gpu_turbo_enabled)
  701. ips_enable_gpu_turbo(ips);
  702. else
  703. ips_disable_gpu_turbo(ips);
  704. /* We're outside our comfort zone, crank them down */
  705. if (mcp_exceeded(ips)) {
  706. ips_cpu_lower(ips);
  707. ips_gpu_lower(ips);
  708. goto sleep;
  709. }
  710. if (!cpu_exceeded(ips, 0) && cpu_busy)
  711. ips_cpu_raise(ips);
  712. else
  713. ips_cpu_lower(ips);
  714. if (!mch_exceeded(ips) && gpu_busy)
  715. ips_gpu_raise(ips);
  716. else
  717. ips_gpu_lower(ips);
  718. sleep:
  719. schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
  720. } while (!kthread_should_stop());
  721. dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
  722. return 0;
  723. }
  724. /*
  725. * Helpers for reading out temp/power values and calculating their
  726. * averages for the decision making and monitoring functions.
  727. */
  728. static u16 calc_avg_temp(struct ips_driver *ips, u16 *array)
  729. {
  730. u64 total = 0;
  731. int i;
  732. u16 avg;
  733. for (i = 0; i < IPS_SAMPLE_COUNT; i++)
  734. total += (u64)(array[i] * 100);
  735. do_div(total, IPS_SAMPLE_COUNT);
  736. avg = (u16)total;
  737. return avg;
  738. }
  739. static u16 read_mgtv(struct ips_driver *ips)
  740. {
  741. u16 ret;
  742. u64 slope, offset;
  743. u64 val;
  744. val = thm_readq(THM_MGTV);
  745. val = (val & TV_MASK) >> TV_SHIFT;
  746. slope = offset = thm_readw(THM_MGTA);
  747. slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT;
  748. offset = offset & MGTA_OFFSET_MASK;
  749. ret = ((val * slope + 0x40) >> 7) + offset;
  750. return 0; /* MCH temp reporting buggy */
  751. }
  752. static u16 read_ptv(struct ips_driver *ips)
  753. {
  754. u16 val, slope, offset;
  755. slope = (ips->pta_val & PTA_SLOPE_MASK) >> PTA_SLOPE_SHIFT;
  756. offset = ips->pta_val & PTA_OFFSET_MASK;
  757. val = thm_readw(THM_PTV) & PTV_MASK;
  758. return val;
  759. }
  760. static u16 read_ctv(struct ips_driver *ips, int cpu)
  761. {
  762. int reg = cpu ? THM_CTV2 : THM_CTV1;
  763. u16 val;
  764. val = thm_readw(reg);
  765. if (!(val & CTV_TEMP_ERROR))
  766. val = (val) >> 6; /* discard fractional component */
  767. else
  768. val = 0;
  769. return val;
  770. }
  771. static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
  772. {
  773. u32 val;
  774. u32 ret;
  775. /*
  776. * CEC is in joules/65535. Take difference over time to
  777. * get watts.
  778. */
  779. val = thm_readl(THM_CEC);
  780. /* period is in ms and we want mW */
  781. ret = (((val - *last) * 1000) / period);
  782. ret = (ret * 1000) / 65535;
  783. *last = val;
  784. return 0;
  785. }
  786. static const u16 temp_decay_factor = 2;
  787. static u16 update_average_temp(u16 avg, u16 val)
  788. {
  789. u16 ret;
  790. /* Multiply by 100 for extra precision */
  791. ret = (val * 100 / temp_decay_factor) +
  792. (((temp_decay_factor - 1) * avg) / temp_decay_factor);
  793. return ret;
  794. }
  795. static const u16 power_decay_factor = 2;
  796. static u16 update_average_power(u32 avg, u32 val)
  797. {
  798. u32 ret;
  799. ret = (val / power_decay_factor) +
  800. (((power_decay_factor - 1) * avg) / power_decay_factor);
  801. return ret;
  802. }
  803. static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
  804. {
  805. u64 total = 0;
  806. u32 avg;
  807. int i;
  808. for (i = 0; i < IPS_SAMPLE_COUNT; i++)
  809. total += array[i];
  810. do_div(total, IPS_SAMPLE_COUNT);
  811. avg = (u32)total;
  812. return avg;
  813. }
  814. static void monitor_timeout(unsigned long arg)
  815. {
  816. wake_up_process((struct task_struct *)arg);
  817. }
  818. /**
  819. * ips_monitor - temp/power monitoring thread
  820. * @data: ips driver structure
  821. *
  822. * This is the main function for the IPS driver. It monitors power and
  823. * tempurature in the MCP and adjusts CPU and GPU power clams accordingly.
  824. *
  825. * We keep a 5s moving average of power consumption and tempurature. Using
  826. * that data, along with CPU vs GPU preference, we adjust the power clamps
  827. * up or down.
  828. */
  829. static int ips_monitor(void *data)
  830. {
  831. struct ips_driver *ips = data;
  832. struct timer_list timer;
  833. unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
  834. int i;
  835. u32 *cpu_samples, *mchp_samples, old_cpu_power;
  836. u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples;
  837. u8 cur_seqno, last_seqno;
  838. mcp_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  839. ctv1_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  840. ctv2_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  841. mch_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  842. cpu_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  843. mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  844. if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
  845. !cpu_samples || !mchp_samples) {
  846. dev_err(&ips->dev->dev,
  847. "failed to allocate sample array, ips disabled\n");
  848. kfree(mcp_samples);
  849. kfree(ctv1_samples);
  850. kfree(ctv2_samples);
  851. kfree(mch_samples);
  852. kfree(cpu_samples);
  853. kfree(mchp_samples);
  854. return -ENOMEM;
  855. }
  856. last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
  857. ITV_ME_SEQNO_SHIFT;
  858. seqno_timestamp = get_jiffies_64();
  859. old_cpu_power = thm_readl(THM_CEC);
  860. schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
  861. /* Collect an initial average */
  862. for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
  863. u32 mchp, cpu_power;
  864. u16 val;
  865. mcp_samples[i] = read_ptv(ips);
  866. val = read_ctv(ips, 0);
  867. ctv1_samples[i] = val;
  868. val = read_ctv(ips, 1);
  869. ctv2_samples[i] = val;
  870. val = read_mgtv(ips);
  871. mch_samples[i] = val;
  872. cpu_power = get_cpu_power(ips, &old_cpu_power,
  873. IPS_SAMPLE_PERIOD);
  874. cpu_samples[i] = cpu_power;
  875. if (ips->read_mch_val) {
  876. mchp = ips->read_mch_val();
  877. mchp_samples[i] = mchp;
  878. }
  879. schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
  880. if (kthread_should_stop())
  881. break;
  882. }
  883. ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples);
  884. ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples);
  885. ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples);
  886. ips->mch_avg_temp = calc_avg_temp(ips, mch_samples);
  887. ips->cpu_avg_power = calc_avg_power(ips, cpu_samples);
  888. ips->mch_avg_power = calc_avg_power(ips, mchp_samples);
  889. kfree(mcp_samples);
  890. kfree(ctv1_samples);
  891. kfree(ctv2_samples);
  892. kfree(mch_samples);
  893. kfree(cpu_samples);
  894. kfree(mchp_samples);
  895. /* Start the adjustment thread now that we have data */
  896. wake_up_process(ips->adjust);
  897. /*
  898. * Ok, now we have an initial avg. From here on out, we track the
  899. * running avg using a decaying average calculation. This allows
  900. * us to reduce the sample frequency if the CPU and GPU are idle.
  901. */
  902. old_cpu_power = thm_readl(THM_CEC);
  903. schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
  904. last_sample_period = IPS_SAMPLE_PERIOD;
  905. setup_deferrable_timer_on_stack(&timer, monitor_timeout,
  906. (unsigned long)current);
  907. do {
  908. u32 cpu_val, mch_val;
  909. u16 val;
  910. /* MCP itself */
  911. val = read_ptv(ips);
  912. ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val);
  913. /* Processor 0 */
  914. val = read_ctv(ips, 0);
  915. ips->ctv1_avg_temp =
  916. update_average_temp(ips->ctv1_avg_temp, val);
  917. /* Power */
  918. cpu_val = get_cpu_power(ips, &old_cpu_power,
  919. last_sample_period);
  920. ips->cpu_avg_power =
  921. update_average_power(ips->cpu_avg_power, cpu_val);
  922. if (ips->second_cpu) {
  923. /* Processor 1 */
  924. val = read_ctv(ips, 1);
  925. ips->ctv2_avg_temp =
  926. update_average_temp(ips->ctv2_avg_temp, val);
  927. }
  928. /* MCH */
  929. val = read_mgtv(ips);
  930. ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val);
  931. /* Power */
  932. if (ips->read_mch_val) {
  933. mch_val = ips->read_mch_val();
  934. ips->mch_avg_power =
  935. update_average_power(ips->mch_avg_power,
  936. mch_val);
  937. }
  938. /*
  939. * Make sure ME is updating thermal regs.
  940. * Note:
  941. * If it's been more than a second since the last update,
  942. * the ME is probably hung.
  943. */
  944. cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
  945. ITV_ME_SEQNO_SHIFT;
  946. if (cur_seqno == last_seqno &&
  947. time_after(jiffies, seqno_timestamp + HZ)) {
  948. dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n");
  949. } else {
  950. seqno_timestamp = get_jiffies_64();
  951. last_seqno = cur_seqno;
  952. }
  953. last_msecs = jiffies_to_msecs(jiffies);
  954. expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
  955. __set_current_state(TASK_INTERRUPTIBLE);
  956. mod_timer(&timer, expire);
  957. schedule();
  958. /* Calculate actual sample period for power averaging */
  959. last_sample_period = jiffies_to_msecs(jiffies) - last_msecs;
  960. if (!last_sample_period)
  961. last_sample_period = 1;
  962. } while (!kthread_should_stop());
  963. del_timer_sync(&timer);
  964. destroy_timer_on_stack(&timer);
  965. dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n");
  966. return 0;
  967. }
  968. #if 0
  969. #define THM_DUMPW(reg) \
  970. { \
  971. u16 val = thm_readw(reg); \
  972. dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \
  973. }
  974. #define THM_DUMPL(reg) \
  975. { \
  976. u32 val = thm_readl(reg); \
  977. dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \
  978. }
  979. #define THM_DUMPQ(reg) \
  980. { \
  981. u64 val = thm_readq(reg); \
  982. dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \
  983. }
  984. static void dump_thermal_info(struct ips_driver *ips)
  985. {
  986. u16 ptl;
  987. ptl = thm_readw(THM_PTL);
  988. dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl);
  989. THM_DUMPW(THM_CTA);
  990. THM_DUMPW(THM_TRC);
  991. THM_DUMPW(THM_CTV1);
  992. THM_DUMPL(THM_STS);
  993. THM_DUMPW(THM_PTV);
  994. THM_DUMPQ(THM_MGTV);
  995. }
  996. #endif
  997. /**
  998. * ips_irq_handler - handle temperature triggers and other IPS events
  999. * @irq: irq number
  1000. * @arg: unused
  1001. *
  1002. * Handle temperature limit trigger events, generally by lowering the clamps.
  1003. * If we're at a critical limit, we clamp back to the lowest possible value
  1004. * to prevent emergency shutdown.
  1005. */
  1006. static irqreturn_t ips_irq_handler(int irq, void *arg)
  1007. {
  1008. struct ips_driver *ips = arg;
  1009. u8 tses = thm_readb(THM_TSES);
  1010. u8 tes = thm_readb(THM_TES);
  1011. if (!tses && !tes)
  1012. return IRQ_NONE;
  1013. dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses);
  1014. dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes);
  1015. /* STS update from EC? */
  1016. if (tes & 1) {
  1017. u32 sts, tc1;
  1018. sts = thm_readl(THM_STS);
  1019. tc1 = thm_readl(THM_TC1);
  1020. if (sts & STS_NVV) {
  1021. spin_lock(&ips->turbo_status_lock);
  1022. ips->core_power_limit = (sts & STS_PCPL_MASK) >>
  1023. STS_PCPL_SHIFT;
  1024. ips->mch_power_limit = (sts & STS_GPL_MASK) >>
  1025. STS_GPL_SHIFT;
  1026. /* ignore EC CPU vs GPU pref */
  1027. ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
  1028. /*
  1029. * Disable turbo for now, until we can figure
  1030. * out why the power figures are wrong
  1031. */
  1032. ips->cpu_turbo_enabled = false;
  1033. if (ips->gpu_busy)
  1034. ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
  1035. ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
  1036. STS_PTL_SHIFT;
  1037. ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
  1038. STS_PPL_SHIFT;
  1039. verify_limits(ips);
  1040. spin_unlock(&ips->turbo_status_lock);
  1041. thm_writeb(THM_SEC, SEC_ACK);
  1042. }
  1043. thm_writeb(THM_TES, tes);
  1044. }
  1045. /* Thermal trip */
  1046. if (tses) {
  1047. dev_warn(&ips->dev->dev,
  1048. "thermal trip occurred, tses: 0x%04x\n", tses);
  1049. thm_writeb(THM_TSES, tses);
  1050. }
  1051. return IRQ_HANDLED;
  1052. }
  1053. #ifndef CONFIG_DEBUG_FS
  1054. static void ips_debugfs_init(struct ips_driver *ips) { return; }
  1055. static void ips_debugfs_cleanup(struct ips_driver *ips) { return; }
  1056. #else
  1057. /* Expose current state and limits in debugfs if possible */
  1058. struct ips_debugfs_node {
  1059. struct ips_driver *ips;
  1060. char *name;
  1061. int (*show)(struct seq_file *m, void *data);
  1062. };
  1063. static int show_cpu_temp(struct seq_file *m, void *data)
  1064. {
  1065. struct ips_driver *ips = m->private;
  1066. seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100,
  1067. ips->ctv1_avg_temp % 100);
  1068. return 0;
  1069. }
  1070. static int show_cpu_power(struct seq_file *m, void *data)
  1071. {
  1072. struct ips_driver *ips = m->private;
  1073. seq_printf(m, "%dmW\n", ips->cpu_avg_power);
  1074. return 0;
  1075. }
  1076. static int show_cpu_clamp(struct seq_file *m, void *data)
  1077. {
  1078. u64 turbo_override;
  1079. int tdp, tdc;
  1080. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  1081. tdp = (int)(turbo_override & TURBO_TDP_MASK);
  1082. tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT);
  1083. /* Convert to .1W/A units */
  1084. tdp = tdp * 10 / 8;
  1085. tdc = tdc * 10 / 8;
  1086. /* Watts Amperes */
  1087. seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10,
  1088. tdc / 10, tdc % 10);
  1089. return 0;
  1090. }
  1091. static int show_mch_temp(struct seq_file *m, void *data)
  1092. {
  1093. struct ips_driver *ips = m->private;
  1094. seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100,
  1095. ips->mch_avg_temp % 100);
  1096. return 0;
  1097. }
  1098. static int show_mch_power(struct seq_file *m, void *data)
  1099. {
  1100. struct ips_driver *ips = m->private;
  1101. seq_printf(m, "%dmW\n", ips->mch_avg_power);
  1102. return 0;
  1103. }
  1104. static struct ips_debugfs_node ips_debug_files[] = {
  1105. { NULL, "cpu_temp", show_cpu_temp },
  1106. { NULL, "cpu_power", show_cpu_power },
  1107. { NULL, "cpu_clamp", show_cpu_clamp },
  1108. { NULL, "mch_temp", show_mch_temp },
  1109. { NULL, "mch_power", show_mch_power },
  1110. };
  1111. static int ips_debugfs_open(struct inode *inode, struct file *file)
  1112. {
  1113. struct ips_debugfs_node *node = inode->i_private;
  1114. return single_open(file, node->show, node->ips);
  1115. }
  1116. static const struct file_operations ips_debugfs_ops = {
  1117. .owner = THIS_MODULE,
  1118. .open = ips_debugfs_open,
  1119. .read = seq_read,
  1120. .llseek = seq_lseek,
  1121. .release = single_release,
  1122. };
  1123. static void ips_debugfs_cleanup(struct ips_driver *ips)
  1124. {
  1125. if (ips->debug_root)
  1126. debugfs_remove_recursive(ips->debug_root);
  1127. return;
  1128. }
  1129. static void ips_debugfs_init(struct ips_driver *ips)
  1130. {
  1131. int i;
  1132. ips->debug_root = debugfs_create_dir("ips", NULL);
  1133. if (!ips->debug_root) {
  1134. dev_err(&ips->dev->dev,
  1135. "failed to create debugfs entries: %ld\n",
  1136. PTR_ERR(ips->debug_root));
  1137. return;
  1138. }
  1139. for (i = 0; i < ARRAY_SIZE(ips_debug_files); i++) {
  1140. struct dentry *ent;
  1141. struct ips_debugfs_node *node = &ips_debug_files[i];
  1142. node->ips = ips;
  1143. ent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
  1144. ips->debug_root, node,
  1145. &ips_debugfs_ops);
  1146. if (!ent) {
  1147. dev_err(&ips->dev->dev,
  1148. "failed to create debug file: %ld\n",
  1149. PTR_ERR(ent));
  1150. goto err_cleanup;
  1151. }
  1152. }
  1153. return;
  1154. err_cleanup:
  1155. ips_debugfs_cleanup(ips);
  1156. return;
  1157. }
  1158. #endif /* CONFIG_DEBUG_FS */
  1159. /**
  1160. * ips_detect_cpu - detect whether CPU supports IPS
  1161. *
  1162. * Walk our list and see if we're on a supported CPU. If we find one,
  1163. * return the limits for it.
  1164. */
  1165. static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
  1166. {
  1167. u64 turbo_power, misc_en;
  1168. struct ips_mcp_limits *limits = NULL;
  1169. u16 tdp;
  1170. if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
  1171. dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n");
  1172. goto out;
  1173. }
  1174. rdmsrl(IA32_MISC_ENABLE, misc_en);
  1175. /*
  1176. * If the turbo enable bit isn't set, we shouldn't try to enable/disable
  1177. * turbo manually or we'll get an illegal MSR access, even though
  1178. * turbo will still be available.
  1179. */
  1180. if (misc_en & IA32_MISC_TURBO_EN)
  1181. ips->turbo_toggle_allowed = true;
  1182. else
  1183. ips->turbo_toggle_allowed = false;
  1184. if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
  1185. limits = &ips_sv_limits;
  1186. else if (strstr(boot_cpu_data.x86_model_id, "CPU L"))
  1187. limits = &ips_lv_limits;
  1188. else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
  1189. limits = &ips_ulv_limits;
  1190. else {
  1191. dev_info(&ips->dev->dev, "No CPUID match found.\n");
  1192. goto out;
  1193. }
  1194. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
  1195. tdp = turbo_power & TURBO_TDP_MASK;
  1196. /* Sanity check TDP against CPU */
  1197. if (limits->core_power_limit != (tdp / 8) * 1000) {
  1198. dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
  1199. tdp / 8, limits->core_power_limit / 1000);
  1200. limits->core_power_limit = (tdp / 8) * 1000;
  1201. }
  1202. out:
  1203. return limits;
  1204. }
  1205. /**
  1206. * ips_get_i915_syms - try to get GPU control methods from i915 driver
  1207. * @ips: IPS driver
  1208. *
  1209. * The i915 driver exports several interfaces to allow the IPS driver to
  1210. * monitor and control graphics turbo mode. If we can find them, we can
  1211. * enable graphics turbo, otherwise we must disable it to avoid exceeding
  1212. * thermal and power limits in the MCP.
  1213. */
  1214. static bool ips_get_i915_syms(struct ips_driver *ips)
  1215. {
  1216. ips->read_mch_val = symbol_get(i915_read_mch_val);
  1217. if (!ips->read_mch_val)
  1218. goto out_err;
  1219. ips->gpu_raise = symbol_get(i915_gpu_raise);
  1220. if (!ips->gpu_raise)
  1221. goto out_put_mch;
  1222. ips->gpu_lower = symbol_get(i915_gpu_lower);
  1223. if (!ips->gpu_lower)
  1224. goto out_put_raise;
  1225. ips->gpu_busy = symbol_get(i915_gpu_busy);
  1226. if (!ips->gpu_busy)
  1227. goto out_put_lower;
  1228. ips->gpu_turbo_disable = symbol_get(i915_gpu_turbo_disable);
  1229. if (!ips->gpu_turbo_disable)
  1230. goto out_put_busy;
  1231. return true;
  1232. out_put_busy:
  1233. symbol_put(i915_gpu_busy);
  1234. out_put_lower:
  1235. symbol_put(i915_gpu_lower);
  1236. out_put_raise:
  1237. symbol_put(i915_gpu_raise);
  1238. out_put_mch:
  1239. symbol_put(i915_read_mch_val);
  1240. out_err:
  1241. return false;
  1242. }
  1243. static bool
  1244. ips_gpu_turbo_enabled(struct ips_driver *ips)
  1245. {
  1246. if (!ips->gpu_busy && late_i915_load) {
  1247. if (ips_get_i915_syms(ips)) {
  1248. dev_info(&ips->dev->dev,
  1249. "i915 driver attached, reenabling gpu turbo\n");
  1250. ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
  1251. }
  1252. }
  1253. return ips->gpu_turbo_enabled;
  1254. }
  1255. void
  1256. ips_link_to_i915_driver(void)
  1257. {
  1258. /* We can't cleanly get at the various ips_driver structs from
  1259. * this caller (the i915 driver), so just set a flag saying
  1260. * that it's time to try getting the symbols again.
  1261. */
  1262. late_i915_load = true;
  1263. }
  1264. EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
  1265. static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
  1266. { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
  1267. PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
  1268. { 0, }
  1269. };
  1270. MODULE_DEVICE_TABLE(pci, ips_id_table);
  1271. static int ips_blacklist_callback(const struct dmi_system_id *id)
  1272. {
  1273. pr_info("Blacklisted intel_ips for %s\n", id->ident);
  1274. return 1;
  1275. }
  1276. static const struct dmi_system_id ips_blacklist[] = {
  1277. {
  1278. .callback = ips_blacklist_callback,
  1279. .ident = "HP ProBook",
  1280. .matches = {
  1281. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1282. DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
  1283. },
  1284. },
  1285. { } /* terminating entry */
  1286. };
  1287. static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
  1288. {
  1289. u64 platform_info;
  1290. struct ips_driver *ips;
  1291. u32 hts;
  1292. int ret = 0;
  1293. u16 htshi, trc, trc_required_mask;
  1294. u8 tse;
  1295. if (dmi_check_system(ips_blacklist))
  1296. return -ENODEV;
  1297. ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
  1298. if (!ips)
  1299. return -ENOMEM;
  1300. pci_set_drvdata(dev, ips);
  1301. ips->dev = dev;
  1302. ips->limits = ips_detect_cpu(ips);
  1303. if (!ips->limits) {
  1304. dev_info(&dev->dev, "IPS not supported on this CPU\n");
  1305. ret = -ENXIO;
  1306. goto error_free;
  1307. }
  1308. spin_lock_init(&ips->turbo_status_lock);
  1309. ret = pci_enable_device(dev);
  1310. if (ret) {
  1311. dev_err(&dev->dev, "can't enable PCI device, aborting\n");
  1312. goto error_free;
  1313. }
  1314. if (!pci_resource_start(dev, 0)) {
  1315. dev_err(&dev->dev, "TBAR not assigned, aborting\n");
  1316. ret = -ENXIO;
  1317. goto error_free;
  1318. }
  1319. ret = pci_request_regions(dev, "ips thermal sensor");
  1320. if (ret) {
  1321. dev_err(&dev->dev, "thermal resource busy, aborting\n");
  1322. goto error_free;
  1323. }
  1324. ips->regmap = ioremap(pci_resource_start(dev, 0),
  1325. pci_resource_len(dev, 0));
  1326. if (!ips->regmap) {
  1327. dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
  1328. ret = -EBUSY;
  1329. goto error_release;
  1330. }
  1331. tse = thm_readb(THM_TSE);
  1332. if (tse != TSE_EN) {
  1333. dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
  1334. ret = -ENXIO;
  1335. goto error_unmap;
  1336. }
  1337. trc = thm_readw(THM_TRC);
  1338. trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
  1339. if ((trc & trc_required_mask) != trc_required_mask) {
  1340. dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
  1341. ret = -ENXIO;
  1342. goto error_unmap;
  1343. }
  1344. if (trc & TRC_CORE2_EN)
  1345. ips->second_cpu = true;
  1346. update_turbo_limits(ips);
  1347. dev_dbg(&dev->dev, "max cpu power clamp: %dW\n",
  1348. ips->mcp_power_limit / 10);
  1349. dev_dbg(&dev->dev, "max core power clamp: %dW\n",
  1350. ips->core_power_limit / 10);
  1351. /* BIOS may update limits at runtime */
  1352. if (thm_readl(THM_PSC) & PSP_PBRT)
  1353. ips->poll_turbo_status = true;
  1354. if (!ips_get_i915_syms(ips)) {
  1355. dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
  1356. ips->gpu_turbo_enabled = false;
  1357. } else {
  1358. dev_dbg(&dev->dev, "graphics turbo enabled\n");
  1359. ips->gpu_turbo_enabled = true;
  1360. }
  1361. /*
  1362. * Check PLATFORM_INFO MSR to make sure this chip is
  1363. * turbo capable.
  1364. */
  1365. rdmsrl(PLATFORM_INFO, platform_info);
  1366. if (!(platform_info & PLATFORM_TDP)) {
  1367. dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
  1368. ret = -ENODEV;
  1369. goto error_unmap;
  1370. }
  1371. /*
  1372. * IRQ handler for ME interaction
  1373. * Note: don't use MSI here as the PCH has bugs.
  1374. */
  1375. pci_disable_msi(dev);
  1376. ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips",
  1377. ips);
  1378. if (ret) {
  1379. dev_err(&dev->dev, "request irq failed, aborting\n");
  1380. goto error_unmap;
  1381. }
  1382. /* Enable aux, hot & critical interrupts */
  1383. thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI |
  1384. TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI);
  1385. thm_writeb(THM_TEN, TEN_UPDATE_EN);
  1386. /* Collect adjustment values */
  1387. ips->cta_val = thm_readw(THM_CTA);
  1388. ips->pta_val = thm_readw(THM_PTA);
  1389. ips->mgta_val = thm_readw(THM_MGTA);
  1390. /* Save turbo limits & ratios */
  1391. rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
  1392. ips_disable_cpu_turbo(ips);
  1393. ips->cpu_turbo_enabled = false;
  1394. /* Create thermal adjust thread */
  1395. ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
  1396. if (IS_ERR(ips->adjust)) {
  1397. dev_err(&dev->dev,
  1398. "failed to create thermal adjust thread, aborting\n");
  1399. ret = -ENOMEM;
  1400. goto error_free_irq;
  1401. }
  1402. /*
  1403. * Set up the work queue and monitor thread. The monitor thread
  1404. * will wake up ips_adjust thread.
  1405. */
  1406. ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
  1407. if (IS_ERR(ips->monitor)) {
  1408. dev_err(&dev->dev,
  1409. "failed to create thermal monitor thread, aborting\n");
  1410. ret = -ENOMEM;
  1411. goto error_thread_cleanup;
  1412. }
  1413. hts = (ips->core_power_limit << HTS_PCPL_SHIFT) |
  1414. (ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV;
  1415. htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT;
  1416. thm_writew(THM_HTSHI, htshi);
  1417. thm_writel(THM_HTS, hts);
  1418. ips_debugfs_init(ips);
  1419. dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n",
  1420. ips->mcp_temp_limit);
  1421. return ret;
  1422. error_thread_cleanup:
  1423. kthread_stop(ips->adjust);
  1424. error_free_irq:
  1425. free_irq(ips->dev->irq, ips);
  1426. error_unmap:
  1427. iounmap(ips->regmap);
  1428. error_release:
  1429. pci_release_regions(dev);
  1430. error_free:
  1431. kfree(ips);
  1432. return ret;
  1433. }
  1434. static void ips_remove(struct pci_dev *dev)
  1435. {
  1436. struct ips_driver *ips = pci_get_drvdata(dev);
  1437. u64 turbo_override;
  1438. if (!ips)
  1439. return;
  1440. ips_debugfs_cleanup(ips);
  1441. /* Release i915 driver */
  1442. if (ips->read_mch_val)
  1443. symbol_put(i915_read_mch_val);
  1444. if (ips->gpu_raise)
  1445. symbol_put(i915_gpu_raise);
  1446. if (ips->gpu_lower)
  1447. symbol_put(i915_gpu_lower);
  1448. if (ips->gpu_busy)
  1449. symbol_put(i915_gpu_busy);
  1450. if (ips->gpu_turbo_disable)
  1451. symbol_put(i915_gpu_turbo_disable);
  1452. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  1453. turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN);
  1454. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  1455. wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
  1456. free_irq(ips->dev->irq, ips);
  1457. if (ips->adjust)
  1458. kthread_stop(ips->adjust);
  1459. if (ips->monitor)
  1460. kthread_stop(ips->monitor);
  1461. iounmap(ips->regmap);
  1462. pci_release_regions(dev);
  1463. kfree(ips);
  1464. dev_dbg(&dev->dev, "IPS driver removed\n");
  1465. }
  1466. #ifdef CONFIG_PM
  1467. static int ips_suspend(struct pci_dev *dev, pm_message_t state)
  1468. {
  1469. return 0;
  1470. }
  1471. static int ips_resume(struct pci_dev *dev)
  1472. {
  1473. return 0;
  1474. }
  1475. #else
  1476. #define ips_suspend NULL
  1477. #define ips_resume NULL
  1478. #endif /* CONFIG_PM */
  1479. static void ips_shutdown(struct pci_dev *dev)
  1480. {
  1481. }
  1482. static struct pci_driver ips_pci_driver = {
  1483. .name = "intel ips",
  1484. .id_table = ips_id_table,
  1485. .probe = ips_probe,
  1486. .remove = ips_remove,
  1487. .suspend = ips_suspend,
  1488. .resume = ips_resume,
  1489. .shutdown = ips_shutdown,
  1490. };
  1491. static int __init ips_init(void)
  1492. {
  1493. return pci_register_driver(&ips_pci_driver);
  1494. }
  1495. module_init(ips_init);
  1496. static void ips_exit(void)
  1497. {
  1498. pci_unregister_driver(&ips_pci_driver);
  1499. return;
  1500. }
  1501. module_exit(ips_exit);
  1502. MODULE_LICENSE("GPL");
  1503. MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>");
  1504. MODULE_DESCRIPTION("Intelligent Power Sharing Driver");