processor_idle.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  25. */
  26. #define pr_fmt(fmt) "ACPI: " fmt
  27. #include <linux/module.h>
  28. #include <linux/acpi.h>
  29. #include <linux/dmi.h>
  30. #include <linux/sched.h> /* need_resched() */
  31. #include <linux/tick.h>
  32. #include <linux/cpuidle.h>
  33. #include <linux/cpu.h>
  34. #include <acpi/processor.h>
  35. /*
  36. * Include the apic definitions for x86 to have the APIC timer related defines
  37. * available also for UP (on SMP it gets magically included via linux/smp.h).
  38. * asm/acpi.h is not an option, as it would require more include magic. Also
  39. * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  40. */
  41. #ifdef CONFIG_X86
  42. #include <asm/apic.h>
  43. #endif
  44. #define ACPI_PROCESSOR_CLASS "processor"
  45. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  46. ACPI_MODULE_NAME("processor_idle");
  47. static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  48. module_param(max_cstate, uint, 0000);
  49. static unsigned int nocst __read_mostly;
  50. module_param(nocst, uint, 0000);
  51. static int bm_check_disable __read_mostly;
  52. module_param(bm_check_disable, uint, 0000);
  53. static unsigned int latency_factor __read_mostly = 2;
  54. module_param(latency_factor, uint, 0644);
  55. static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
  56. struct cpuidle_driver acpi_idle_driver = {
  57. .name = "acpi_idle",
  58. .owner = THIS_MODULE,
  59. };
  60. #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
  61. static
  62. DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
  63. static int disabled_by_idle_boot_param(void)
  64. {
  65. return boot_option_idle_override == IDLE_POLL ||
  66. boot_option_idle_override == IDLE_HALT;
  67. }
  68. /*
  69. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  70. * For now disable this. Probably a bug somewhere else.
  71. *
  72. * To skip this limit, boot/load with a large max_cstate limit.
  73. */
  74. static int set_max_cstate(const struct dmi_system_id *id)
  75. {
  76. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  77. return 0;
  78. pr_notice("%s detected - limiting to C%ld max_cstate."
  79. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  80. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  81. max_cstate = (long)id->driver_data;
  82. return 0;
  83. }
  84. static const struct dmi_system_id processor_power_dmi_table[] = {
  85. { set_max_cstate, "Clevo 5600D", {
  86. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  87. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  88. (void *)2},
  89. { set_max_cstate, "Pavilion zv5000", {
  90. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  91. DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
  92. (void *)1},
  93. { set_max_cstate, "Asus L8400B", {
  94. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
  95. DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
  96. (void *)1},
  97. {},
  98. };
  99. /*
  100. * Callers should disable interrupts before the call and enable
  101. * interrupts after return.
  102. */
  103. static void __cpuidle acpi_safe_halt(void)
  104. {
  105. if (!tif_need_resched()) {
  106. safe_halt();
  107. local_irq_disable();
  108. }
  109. }
  110. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  111. /*
  112. * Some BIOS implementations switch to C3 in the published C2 state.
  113. * This seems to be a common problem on AMD boxen, but other vendors
  114. * are affected too. We pick the most conservative approach: we assume
  115. * that the local APIC stops in both C2 and C3.
  116. */
  117. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  118. struct acpi_processor_cx *cx)
  119. {
  120. struct acpi_processor_power *pwr = &pr->power;
  121. u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
  122. if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
  123. return;
  124. if (amd_e400_c1e_detected)
  125. type = ACPI_STATE_C1;
  126. /*
  127. * Check, if one of the previous states already marked the lapic
  128. * unstable
  129. */
  130. if (pwr->timer_broadcast_on_state < state)
  131. return;
  132. if (cx->type >= type)
  133. pr->power.timer_broadcast_on_state = state;
  134. }
  135. static void __lapic_timer_propagate_broadcast(void *arg)
  136. {
  137. struct acpi_processor *pr = (struct acpi_processor *) arg;
  138. if (pr->power.timer_broadcast_on_state < INT_MAX)
  139. tick_broadcast_enable();
  140. else
  141. tick_broadcast_disable();
  142. }
  143. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
  144. {
  145. smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
  146. (void *)pr, 1);
  147. }
  148. /* Power(C) State timer broadcast control */
  149. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  150. struct acpi_processor_cx *cx,
  151. int broadcast)
  152. {
  153. int state = cx - pr->power.states;
  154. if (state >= pr->power.timer_broadcast_on_state) {
  155. if (broadcast)
  156. tick_broadcast_enter();
  157. else
  158. tick_broadcast_exit();
  159. }
  160. }
  161. #else
  162. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  163. struct acpi_processor_cx *cstate) { }
  164. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
  165. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  166. struct acpi_processor_cx *cx,
  167. int broadcast)
  168. {
  169. }
  170. #endif
  171. #if defined(CONFIG_X86)
  172. static void tsc_check_state(int state)
  173. {
  174. switch (boot_cpu_data.x86_vendor) {
  175. case X86_VENDOR_AMD:
  176. case X86_VENDOR_INTEL:
  177. /*
  178. * AMD Fam10h TSC will tick in all
  179. * C/P/S0/S1 states when this bit is set.
  180. */
  181. if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  182. return;
  183. /*FALL THROUGH*/
  184. default:
  185. /* TSC could halt in idle, so notify users */
  186. if (state > ACPI_STATE_C1)
  187. mark_tsc_unstable("TSC halts in idle");
  188. }
  189. }
  190. #else
  191. static void tsc_check_state(int state) { return; }
  192. #endif
  193. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  194. {
  195. if (!pr->pblk)
  196. return -ENODEV;
  197. /* if info is obtained from pblk/fadt, type equals state */
  198. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  199. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  200. #ifndef CONFIG_HOTPLUG_CPU
  201. /*
  202. * Check for P_LVL2_UP flag before entering C2 and above on
  203. * an SMP system.
  204. */
  205. if ((num_online_cpus() > 1) &&
  206. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  207. return -ENODEV;
  208. #endif
  209. /* determine C2 and C3 address from pblk */
  210. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  211. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  212. /* determine latencies from FADT */
  213. pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
  214. pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
  215. /*
  216. * FADT specified C2 latency must be less than or equal to
  217. * 100 microseconds.
  218. */
  219. if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  220. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  221. "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
  222. /* invalidate C2 */
  223. pr->power.states[ACPI_STATE_C2].address = 0;
  224. }
  225. /*
  226. * FADT supplied C3 latency must be less than or equal to
  227. * 1000 microseconds.
  228. */
  229. if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  230. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  231. "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
  232. /* invalidate C3 */
  233. pr->power.states[ACPI_STATE_C3].address = 0;
  234. }
  235. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  236. "lvl2[0x%08x] lvl3[0x%08x]\n",
  237. pr->power.states[ACPI_STATE_C2].address,
  238. pr->power.states[ACPI_STATE_C3].address));
  239. return 0;
  240. }
  241. static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
  242. {
  243. if (!pr->power.states[ACPI_STATE_C1].valid) {
  244. /* set the first C-State to C1 */
  245. /* all processors need to support C1 */
  246. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  247. pr->power.states[ACPI_STATE_C1].valid = 1;
  248. pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
  249. }
  250. /* the C0 state only exists as a filler in our array */
  251. pr->power.states[ACPI_STATE_C0].valid = 1;
  252. return 0;
  253. }
  254. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  255. {
  256. acpi_status status;
  257. u64 count;
  258. int current_count;
  259. int i, ret = 0;
  260. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  261. union acpi_object *cst;
  262. if (nocst)
  263. return -ENODEV;
  264. current_count = 0;
  265. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  266. if (ACPI_FAILURE(status)) {
  267. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  268. return -ENODEV;
  269. }
  270. cst = buffer.pointer;
  271. /* There must be at least 2 elements */
  272. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  273. pr_err("not enough elements in _CST\n");
  274. ret = -EFAULT;
  275. goto end;
  276. }
  277. count = cst->package.elements[0].integer.value;
  278. /* Validate number of power states. */
  279. if (count < 1 || count != cst->package.count - 1) {
  280. pr_err("count given by _CST is not valid\n");
  281. ret = -EFAULT;
  282. goto end;
  283. }
  284. /* Tell driver that at least _CST is supported. */
  285. pr->flags.has_cst = 1;
  286. for (i = 1; i <= count; i++) {
  287. union acpi_object *element;
  288. union acpi_object *obj;
  289. struct acpi_power_register *reg;
  290. struct acpi_processor_cx cx;
  291. memset(&cx, 0, sizeof(cx));
  292. element = &(cst->package.elements[i]);
  293. if (element->type != ACPI_TYPE_PACKAGE)
  294. continue;
  295. if (element->package.count != 4)
  296. continue;
  297. obj = &(element->package.elements[0]);
  298. if (obj->type != ACPI_TYPE_BUFFER)
  299. continue;
  300. reg = (struct acpi_power_register *)obj->buffer.pointer;
  301. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  302. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  303. continue;
  304. /* There should be an easy way to extract an integer... */
  305. obj = &(element->package.elements[1]);
  306. if (obj->type != ACPI_TYPE_INTEGER)
  307. continue;
  308. cx.type = obj->integer.value;
  309. /*
  310. * Some buggy BIOSes won't list C1 in _CST -
  311. * Let acpi_processor_get_power_info_default() handle them later
  312. */
  313. if (i == 1 && cx.type != ACPI_STATE_C1)
  314. current_count++;
  315. cx.address = reg->address;
  316. cx.index = current_count + 1;
  317. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  318. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  319. if (acpi_processor_ffh_cstate_probe
  320. (pr->id, &cx, reg) == 0) {
  321. cx.entry_method = ACPI_CSTATE_FFH;
  322. } else if (cx.type == ACPI_STATE_C1) {
  323. /*
  324. * C1 is a special case where FIXED_HARDWARE
  325. * can be handled in non-MWAIT way as well.
  326. * In that case, save this _CST entry info.
  327. * Otherwise, ignore this info and continue.
  328. */
  329. cx.entry_method = ACPI_CSTATE_HALT;
  330. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  331. } else {
  332. continue;
  333. }
  334. if (cx.type == ACPI_STATE_C1 &&
  335. (boot_option_idle_override == IDLE_NOMWAIT)) {
  336. /*
  337. * In most cases the C1 space_id obtained from
  338. * _CST object is FIXED_HARDWARE access mode.
  339. * But when the option of idle=halt is added,
  340. * the entry_method type should be changed from
  341. * CSTATE_FFH to CSTATE_HALT.
  342. * When the option of idle=nomwait is added,
  343. * the C1 entry_method type should be
  344. * CSTATE_HALT.
  345. */
  346. cx.entry_method = ACPI_CSTATE_HALT;
  347. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  348. }
  349. } else {
  350. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  351. cx.address);
  352. }
  353. if (cx.type == ACPI_STATE_C1) {
  354. cx.valid = 1;
  355. }
  356. obj = &(element->package.elements[2]);
  357. if (obj->type != ACPI_TYPE_INTEGER)
  358. continue;
  359. cx.latency = obj->integer.value;
  360. obj = &(element->package.elements[3]);
  361. if (obj->type != ACPI_TYPE_INTEGER)
  362. continue;
  363. current_count++;
  364. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  365. /*
  366. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  367. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  368. */
  369. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  370. pr_warn("Limiting number of power states to max (%d)\n",
  371. ACPI_PROCESSOR_MAX_POWER);
  372. pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  373. break;
  374. }
  375. }
  376. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  377. current_count));
  378. /* Validate number of power states discovered */
  379. if (current_count < 2)
  380. ret = -EFAULT;
  381. end:
  382. kfree(buffer.pointer);
  383. return ret;
  384. }
  385. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  386. struct acpi_processor_cx *cx)
  387. {
  388. static int bm_check_flag = -1;
  389. static int bm_control_flag = -1;
  390. if (!cx->address)
  391. return;
  392. /*
  393. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  394. * DMA transfers are used by any ISA device to avoid livelock.
  395. * Note that we could disable Type-F DMA (as recommended by
  396. * the erratum), but this is known to disrupt certain ISA
  397. * devices thus we take the conservative approach.
  398. */
  399. else if (errata.piix4.fdma) {
  400. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  401. "C3 not supported on PIIX4 with Type-F DMA\n"));
  402. return;
  403. }
  404. /* All the logic here assumes flags.bm_check is same across all CPUs */
  405. if (bm_check_flag == -1) {
  406. /* Determine whether bm_check is needed based on CPU */
  407. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  408. bm_check_flag = pr->flags.bm_check;
  409. bm_control_flag = pr->flags.bm_control;
  410. } else {
  411. pr->flags.bm_check = bm_check_flag;
  412. pr->flags.bm_control = bm_control_flag;
  413. }
  414. if (pr->flags.bm_check) {
  415. if (!pr->flags.bm_control) {
  416. if (pr->flags.has_cst != 1) {
  417. /* bus mastering control is necessary */
  418. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  419. "C3 support requires BM control\n"));
  420. return;
  421. } else {
  422. /* Here we enter C3 without bus mastering */
  423. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  424. "C3 support without BM control\n"));
  425. }
  426. }
  427. } else {
  428. /*
  429. * WBINVD should be set in fadt, for C3 state to be
  430. * supported on when bm_check is not required.
  431. */
  432. if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
  433. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  434. "Cache invalidation should work properly"
  435. " for C3 to be enabled on SMP systems\n"));
  436. return;
  437. }
  438. }
  439. /*
  440. * Otherwise we've met all of our C3 requirements.
  441. * Normalize the C3 latency to expidite policy. Enable
  442. * checking of bus mastering status (bm_check) so we can
  443. * use this in our C3 policy
  444. */
  445. cx->valid = 1;
  446. /*
  447. * On older chipsets, BM_RLD needs to be set
  448. * in order for Bus Master activity to wake the
  449. * system from C3. Newer chipsets handle DMA
  450. * during C3 automatically and BM_RLD is a NOP.
  451. * In either case, the proper way to
  452. * handle BM_RLD is to set it and leave it set.
  453. */
  454. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  455. return;
  456. }
  457. static int acpi_processor_power_verify(struct acpi_processor *pr)
  458. {
  459. unsigned int i;
  460. unsigned int working = 0;
  461. pr->power.timer_broadcast_on_state = INT_MAX;
  462. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  463. struct acpi_processor_cx *cx = &pr->power.states[i];
  464. switch (cx->type) {
  465. case ACPI_STATE_C1:
  466. cx->valid = 1;
  467. break;
  468. case ACPI_STATE_C2:
  469. if (!cx->address)
  470. break;
  471. cx->valid = 1;
  472. break;
  473. case ACPI_STATE_C3:
  474. acpi_processor_power_verify_c3(pr, cx);
  475. break;
  476. }
  477. if (!cx->valid)
  478. continue;
  479. lapic_timer_check_state(i, pr, cx);
  480. tsc_check_state(cx->type);
  481. working++;
  482. }
  483. lapic_timer_propagate_broadcast(pr);
  484. return (working);
  485. }
  486. static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
  487. {
  488. unsigned int i;
  489. int result;
  490. /* NOTE: the idle thread may not be running while calling
  491. * this function */
  492. /* Zero initialize all the C-states info. */
  493. memset(pr->power.states, 0, sizeof(pr->power.states));
  494. result = acpi_processor_get_power_info_cst(pr);
  495. if (result == -ENODEV)
  496. result = acpi_processor_get_power_info_fadt(pr);
  497. if (result)
  498. return result;
  499. acpi_processor_get_power_info_default(pr);
  500. pr->power.count = acpi_processor_power_verify(pr);
  501. /*
  502. * if one state of type C2 or C3 is available, mark this
  503. * CPU as being "idle manageable"
  504. */
  505. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  506. if (pr->power.states[i].valid) {
  507. pr->power.count = i;
  508. if (pr->power.states[i].type >= ACPI_STATE_C2)
  509. pr->flags.power = 1;
  510. }
  511. }
  512. return 0;
  513. }
  514. /**
  515. * acpi_idle_bm_check - checks if bus master activity was detected
  516. */
  517. static int acpi_idle_bm_check(void)
  518. {
  519. u32 bm_status = 0;
  520. if (bm_check_disable)
  521. return 0;
  522. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  523. if (bm_status)
  524. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  525. /*
  526. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  527. * the true state of bus mastering activity; forcing us to
  528. * manually check the BMIDEA bit of each IDE channel.
  529. */
  530. else if (errata.piix4.bmisx) {
  531. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  532. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  533. bm_status = 1;
  534. }
  535. return bm_status;
  536. }
  537. /**
  538. * acpi_idle_do_entry - enter idle state using the appropriate method
  539. * @cx: cstate data
  540. *
  541. * Caller disables interrupt before call and enables interrupt after return.
  542. */
  543. static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
  544. {
  545. if (cx->entry_method == ACPI_CSTATE_FFH) {
  546. /* Call into architectural FFH based C-state */
  547. acpi_processor_ffh_cstate_enter(cx);
  548. } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  549. acpi_safe_halt();
  550. } else {
  551. /* IO port based C-state */
  552. inb(cx->address);
  553. /* Dummy wait op - must do something useless after P_LVL2 read
  554. because chipsets cannot guarantee that STPCLK# signal
  555. gets asserted in time to freeze execution properly. */
  556. inl(acpi_gbl_FADT.xpm_timer_block.address);
  557. }
  558. }
  559. /**
  560. * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
  561. * @dev: the target CPU
  562. * @index: the index of suggested state
  563. */
  564. static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
  565. {
  566. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  567. ACPI_FLUSH_CPU_CACHE();
  568. while (1) {
  569. if (cx->entry_method == ACPI_CSTATE_HALT)
  570. safe_halt();
  571. else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
  572. inb(cx->address);
  573. /* See comment in acpi_idle_do_entry() */
  574. inl(acpi_gbl_FADT.xpm_timer_block.address);
  575. } else
  576. return -ENODEV;
  577. }
  578. /* Never reached */
  579. return 0;
  580. }
  581. static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
  582. {
  583. return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
  584. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
  585. }
  586. static int c3_cpu_count;
  587. static DEFINE_RAW_SPINLOCK(c3_lock);
  588. /**
  589. * acpi_idle_enter_bm - enters C3 with proper BM handling
  590. * @pr: Target processor
  591. * @cx: Target state context
  592. * @timer_bc: Whether or not to change timer mode to broadcast
  593. */
  594. static void acpi_idle_enter_bm(struct acpi_processor *pr,
  595. struct acpi_processor_cx *cx, bool timer_bc)
  596. {
  597. acpi_unlazy_tlb(smp_processor_id());
  598. /*
  599. * Must be done before busmaster disable as we might need to
  600. * access HPET !
  601. */
  602. if (timer_bc)
  603. lapic_timer_state_broadcast(pr, cx, 1);
  604. /*
  605. * disable bus master
  606. * bm_check implies we need ARB_DIS
  607. * bm_control implies whether we can do ARB_DIS
  608. *
  609. * That leaves a case where bm_check is set and bm_control is
  610. * not set. In that case we cannot do much, we enter C3
  611. * without doing anything.
  612. */
  613. if (pr->flags.bm_control) {
  614. raw_spin_lock(&c3_lock);
  615. c3_cpu_count++;
  616. /* Disable bus master arbitration when all CPUs are in C3 */
  617. if (c3_cpu_count == num_online_cpus())
  618. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
  619. raw_spin_unlock(&c3_lock);
  620. }
  621. acpi_idle_do_entry(cx);
  622. /* Re-enable bus master arbitration */
  623. if (pr->flags.bm_control) {
  624. raw_spin_lock(&c3_lock);
  625. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
  626. c3_cpu_count--;
  627. raw_spin_unlock(&c3_lock);
  628. }
  629. if (timer_bc)
  630. lapic_timer_state_broadcast(pr, cx, 0);
  631. }
  632. static int acpi_idle_enter(struct cpuidle_device *dev,
  633. struct cpuidle_driver *drv, int index)
  634. {
  635. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  636. struct acpi_processor *pr;
  637. pr = __this_cpu_read(processors);
  638. if (unlikely(!pr))
  639. return -EINVAL;
  640. if (cx->type != ACPI_STATE_C1) {
  641. if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
  642. index = CPUIDLE_DRIVER_STATE_START;
  643. cx = per_cpu(acpi_cstate[index], dev->cpu);
  644. } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
  645. if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
  646. acpi_idle_enter_bm(pr, cx, true);
  647. return index;
  648. } else if (drv->safe_state_index >= 0) {
  649. index = drv->safe_state_index;
  650. cx = per_cpu(acpi_cstate[index], dev->cpu);
  651. } else {
  652. acpi_safe_halt();
  653. return -EBUSY;
  654. }
  655. }
  656. }
  657. lapic_timer_state_broadcast(pr, cx, 1);
  658. if (cx->type == ACPI_STATE_C3)
  659. ACPI_FLUSH_CPU_CACHE();
  660. acpi_idle_do_entry(cx);
  661. lapic_timer_state_broadcast(pr, cx, 0);
  662. return index;
  663. }
  664. static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
  665. struct cpuidle_driver *drv, int index)
  666. {
  667. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  668. if (cx->type == ACPI_STATE_C3) {
  669. struct acpi_processor *pr = __this_cpu_read(processors);
  670. if (unlikely(!pr))
  671. return;
  672. if (pr->flags.bm_check) {
  673. acpi_idle_enter_bm(pr, cx, false);
  674. return;
  675. } else {
  676. ACPI_FLUSH_CPU_CACHE();
  677. }
  678. }
  679. acpi_idle_do_entry(cx);
  680. }
  681. static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
  682. struct cpuidle_device *dev)
  683. {
  684. int i, count = CPUIDLE_DRIVER_STATE_START;
  685. struct acpi_processor_cx *cx;
  686. if (max_cstate == 0)
  687. max_cstate = 1;
  688. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  689. cx = &pr->power.states[i];
  690. if (!cx->valid)
  691. continue;
  692. per_cpu(acpi_cstate[count], dev->cpu) = cx;
  693. count++;
  694. if (count == CPUIDLE_STATE_MAX)
  695. break;
  696. }
  697. if (!count)
  698. return -EINVAL;
  699. return 0;
  700. }
  701. static int acpi_processor_setup_cstates(struct acpi_processor *pr)
  702. {
  703. int i, count = CPUIDLE_DRIVER_STATE_START;
  704. struct acpi_processor_cx *cx;
  705. struct cpuidle_state *state;
  706. struct cpuidle_driver *drv = &acpi_idle_driver;
  707. if (max_cstate == 0)
  708. max_cstate = 1;
  709. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  710. cx = &pr->power.states[i];
  711. if (!cx->valid)
  712. continue;
  713. state = &drv->states[count];
  714. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
  715. strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  716. state->exit_latency = cx->latency;
  717. state->target_residency = cx->latency * latency_factor;
  718. state->enter = acpi_idle_enter;
  719. state->flags = 0;
  720. if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
  721. state->enter_dead = acpi_idle_play_dead;
  722. drv->safe_state_index = count;
  723. }
  724. /*
  725. * Halt-induced C1 is not good for ->enter_freeze, because it
  726. * re-enables interrupts on exit. Moreover, C1 is generally not
  727. * particularly interesting from the suspend-to-idle angle, so
  728. * avoid C1 and the situations in which we may need to fall back
  729. * to it altogether.
  730. */
  731. if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
  732. state->enter_freeze = acpi_idle_enter_freeze;
  733. count++;
  734. if (count == CPUIDLE_STATE_MAX)
  735. break;
  736. }
  737. drv->state_count = count;
  738. if (!count)
  739. return -EINVAL;
  740. return 0;
  741. }
  742. static inline void acpi_processor_cstate_first_run_checks(void)
  743. {
  744. acpi_status status;
  745. static int first_run;
  746. if (first_run)
  747. return;
  748. dmi_check_system(processor_power_dmi_table);
  749. max_cstate = acpi_processor_cstate_check(max_cstate);
  750. if (max_cstate < ACPI_C_STATES_MAX)
  751. pr_notice("ACPI: processor limited to max C-state %d\n",
  752. max_cstate);
  753. first_run++;
  754. if (acpi_gbl_FADT.cst_control && !nocst) {
  755. status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
  756. acpi_gbl_FADT.cst_control, 8);
  757. if (ACPI_FAILURE(status))
  758. ACPI_EXCEPTION((AE_INFO, status,
  759. "Notifying BIOS of _CST ability failed"));
  760. }
  761. }
  762. #else
  763. static inline int disabled_by_idle_boot_param(void) { return 0; }
  764. static inline void acpi_processor_cstate_first_run_checks(void) { }
  765. static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
  766. {
  767. return -ENODEV;
  768. }
  769. static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
  770. struct cpuidle_device *dev)
  771. {
  772. return -EINVAL;
  773. }
  774. static int acpi_processor_setup_cstates(struct acpi_processor *pr)
  775. {
  776. return -EINVAL;
  777. }
  778. #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
  779. struct acpi_lpi_states_array {
  780. unsigned int size;
  781. unsigned int composite_states_size;
  782. struct acpi_lpi_state *entries;
  783. struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
  784. };
  785. static int obj_get_integer(union acpi_object *obj, u32 *value)
  786. {
  787. if (obj->type != ACPI_TYPE_INTEGER)
  788. return -EINVAL;
  789. *value = obj->integer.value;
  790. return 0;
  791. }
  792. static int acpi_processor_evaluate_lpi(acpi_handle handle,
  793. struct acpi_lpi_states_array *info)
  794. {
  795. acpi_status status;
  796. int ret = 0;
  797. int pkg_count, state_idx = 1, loop;
  798. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  799. union acpi_object *lpi_data;
  800. struct acpi_lpi_state *lpi_state;
  801. status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
  802. if (ACPI_FAILURE(status)) {
  803. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
  804. return -ENODEV;
  805. }
  806. lpi_data = buffer.pointer;
  807. /* There must be at least 4 elements = 3 elements + 1 package */
  808. if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
  809. lpi_data->package.count < 4) {
  810. pr_debug("not enough elements in _LPI\n");
  811. ret = -ENODATA;
  812. goto end;
  813. }
  814. pkg_count = lpi_data->package.elements[2].integer.value;
  815. /* Validate number of power states. */
  816. if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
  817. pr_debug("count given by _LPI is not valid\n");
  818. ret = -ENODATA;
  819. goto end;
  820. }
  821. lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
  822. if (!lpi_state) {
  823. ret = -ENOMEM;
  824. goto end;
  825. }
  826. info->size = pkg_count;
  827. info->entries = lpi_state;
  828. /* LPI States start at index 3 */
  829. for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
  830. union acpi_object *element, *pkg_elem, *obj;
  831. element = &lpi_data->package.elements[loop];
  832. if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
  833. continue;
  834. pkg_elem = element->package.elements;
  835. obj = pkg_elem + 6;
  836. if (obj->type == ACPI_TYPE_BUFFER) {
  837. struct acpi_power_register *reg;
  838. reg = (struct acpi_power_register *)obj->buffer.pointer;
  839. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  840. reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
  841. continue;
  842. lpi_state->address = reg->address;
  843. lpi_state->entry_method =
  844. reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
  845. ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
  846. } else if (obj->type == ACPI_TYPE_INTEGER) {
  847. lpi_state->entry_method = ACPI_CSTATE_INTEGER;
  848. lpi_state->address = obj->integer.value;
  849. } else {
  850. continue;
  851. }
  852. /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
  853. obj = pkg_elem + 9;
  854. if (obj->type == ACPI_TYPE_STRING)
  855. strlcpy(lpi_state->desc, obj->string.pointer,
  856. ACPI_CX_DESC_LEN);
  857. lpi_state->index = state_idx;
  858. if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
  859. pr_debug("No min. residency found, assuming 10 us\n");
  860. lpi_state->min_residency = 10;
  861. }
  862. if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
  863. pr_debug("No wakeup residency found, assuming 10 us\n");
  864. lpi_state->wake_latency = 10;
  865. }
  866. if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
  867. lpi_state->flags = 0;
  868. if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
  869. lpi_state->arch_flags = 0;
  870. if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
  871. lpi_state->res_cnt_freq = 1;
  872. if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
  873. lpi_state->enable_parent_state = 0;
  874. }
  875. acpi_handle_debug(handle, "Found %d power states\n", state_idx);
  876. end:
  877. kfree(buffer.pointer);
  878. return ret;
  879. }
  880. /*
  881. * flat_state_cnt - the number of composite LPI states after the process of flattening
  882. */
  883. static int flat_state_cnt;
  884. /**
  885. * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
  886. *
  887. * @local: local LPI state
  888. * @parent: parent LPI state
  889. * @result: composite LPI state
  890. */
  891. static bool combine_lpi_states(struct acpi_lpi_state *local,
  892. struct acpi_lpi_state *parent,
  893. struct acpi_lpi_state *result)
  894. {
  895. if (parent->entry_method == ACPI_CSTATE_INTEGER) {
  896. if (!parent->address) /* 0 means autopromotable */
  897. return false;
  898. result->address = local->address + parent->address;
  899. } else {
  900. result->address = parent->address;
  901. }
  902. result->min_residency = max(local->min_residency, parent->min_residency);
  903. result->wake_latency = local->wake_latency + parent->wake_latency;
  904. result->enable_parent_state = parent->enable_parent_state;
  905. result->entry_method = local->entry_method;
  906. result->flags = parent->flags;
  907. result->arch_flags = parent->arch_flags;
  908. result->index = parent->index;
  909. strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
  910. strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
  911. strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
  912. return true;
  913. }
  914. #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
  915. static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
  916. struct acpi_lpi_state *t)
  917. {
  918. curr_level->composite_states[curr_level->composite_states_size++] = t;
  919. }
  920. static int flatten_lpi_states(struct acpi_processor *pr,
  921. struct acpi_lpi_states_array *curr_level,
  922. struct acpi_lpi_states_array *prev_level)
  923. {
  924. int i, j, state_count = curr_level->size;
  925. struct acpi_lpi_state *p, *t = curr_level->entries;
  926. curr_level->composite_states_size = 0;
  927. for (j = 0; j < state_count; j++, t++) {
  928. struct acpi_lpi_state *flpi;
  929. if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
  930. continue;
  931. if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
  932. pr_warn("Limiting number of LPI states to max (%d)\n",
  933. ACPI_PROCESSOR_MAX_POWER);
  934. pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  935. break;
  936. }
  937. flpi = &pr->power.lpi_states[flat_state_cnt];
  938. if (!prev_level) { /* leaf/processor node */
  939. memcpy(flpi, t, sizeof(*t));
  940. stash_composite_state(curr_level, flpi);
  941. flat_state_cnt++;
  942. continue;
  943. }
  944. for (i = 0; i < prev_level->composite_states_size; i++) {
  945. p = prev_level->composite_states[i];
  946. if (t->index <= p->enable_parent_state &&
  947. combine_lpi_states(p, t, flpi)) {
  948. stash_composite_state(curr_level, flpi);
  949. flat_state_cnt++;
  950. flpi++;
  951. }
  952. }
  953. }
  954. kfree(curr_level->entries);
  955. return 0;
  956. }
  957. static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
  958. {
  959. int ret, i;
  960. acpi_status status;
  961. acpi_handle handle = pr->handle, pr_ahandle;
  962. struct acpi_device *d = NULL;
  963. struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
  964. if (!osc_pc_lpi_support_confirmed)
  965. return -EOPNOTSUPP;
  966. if (!acpi_has_method(handle, "_LPI"))
  967. return -EINVAL;
  968. flat_state_cnt = 0;
  969. prev = &info[0];
  970. curr = &info[1];
  971. handle = pr->handle;
  972. ret = acpi_processor_evaluate_lpi(handle, prev);
  973. if (ret)
  974. return ret;
  975. flatten_lpi_states(pr, prev, NULL);
  976. status = acpi_get_parent(handle, &pr_ahandle);
  977. while (ACPI_SUCCESS(status)) {
  978. acpi_bus_get_device(pr_ahandle, &d);
  979. handle = pr_ahandle;
  980. if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
  981. break;
  982. /* can be optional ? */
  983. if (!acpi_has_method(handle, "_LPI"))
  984. break;
  985. ret = acpi_processor_evaluate_lpi(handle, curr);
  986. if (ret)
  987. break;
  988. /* flatten all the LPI states in this level of hierarchy */
  989. flatten_lpi_states(pr, curr, prev);
  990. tmp = prev, prev = curr, curr = tmp;
  991. status = acpi_get_parent(handle, &pr_ahandle);
  992. }
  993. pr->power.count = flat_state_cnt;
  994. /* reset the index after flattening */
  995. for (i = 0; i < pr->power.count; i++)
  996. pr->power.lpi_states[i].index = i;
  997. /* Tell driver that _LPI is supported. */
  998. pr->flags.has_lpi = 1;
  999. pr->flags.power = 1;
  1000. return 0;
  1001. }
  1002. int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
  1003. {
  1004. return -ENODEV;
  1005. }
  1006. int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
  1007. {
  1008. return -ENODEV;
  1009. }
  1010. /**
  1011. * acpi_idle_lpi_enter - enters an ACPI any LPI state
  1012. * @dev: the target CPU
  1013. * @drv: cpuidle driver containing cpuidle state info
  1014. * @index: index of target state
  1015. *
  1016. * Return: 0 for success or negative value for error
  1017. */
  1018. static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
  1019. struct cpuidle_driver *drv, int index)
  1020. {
  1021. struct acpi_processor *pr;
  1022. struct acpi_lpi_state *lpi;
  1023. pr = __this_cpu_read(processors);
  1024. if (unlikely(!pr))
  1025. return -EINVAL;
  1026. lpi = &pr->power.lpi_states[index];
  1027. if (lpi->entry_method == ACPI_CSTATE_FFH)
  1028. return acpi_processor_ffh_lpi_enter(lpi);
  1029. return -EINVAL;
  1030. }
  1031. static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
  1032. {
  1033. int i;
  1034. struct acpi_lpi_state *lpi;
  1035. struct cpuidle_state *state;
  1036. struct cpuidle_driver *drv = &acpi_idle_driver;
  1037. if (!pr->flags.has_lpi)
  1038. return -EOPNOTSUPP;
  1039. for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
  1040. lpi = &pr->power.lpi_states[i];
  1041. state = &drv->states[i];
  1042. snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
  1043. strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
  1044. state->exit_latency = lpi->wake_latency;
  1045. state->target_residency = lpi->min_residency;
  1046. if (lpi->arch_flags)
  1047. state->flags |= CPUIDLE_FLAG_TIMER_STOP;
  1048. state->enter = acpi_idle_lpi_enter;
  1049. drv->safe_state_index = i;
  1050. }
  1051. drv->state_count = i;
  1052. return 0;
  1053. }
  1054. /**
  1055. * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
  1056. * global state data i.e. idle routines
  1057. *
  1058. * @pr: the ACPI processor
  1059. */
  1060. static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
  1061. {
  1062. int i;
  1063. struct cpuidle_driver *drv = &acpi_idle_driver;
  1064. if (!pr->flags.power_setup_done || !pr->flags.power)
  1065. return -EINVAL;
  1066. drv->safe_state_index = -1;
  1067. for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
  1068. drv->states[i].name[0] = '\0';
  1069. drv->states[i].desc[0] = '\0';
  1070. }
  1071. if (pr->flags.has_lpi)
  1072. return acpi_processor_setup_lpi_states(pr);
  1073. return acpi_processor_setup_cstates(pr);
  1074. }
  1075. /**
  1076. * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
  1077. * device i.e. per-cpu data
  1078. *
  1079. * @pr: the ACPI processor
  1080. * @dev : the cpuidle device
  1081. */
  1082. static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
  1083. struct cpuidle_device *dev)
  1084. {
  1085. if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
  1086. return -EINVAL;
  1087. dev->cpu = pr->id;
  1088. if (pr->flags.has_lpi)
  1089. return acpi_processor_ffh_lpi_probe(pr->id);
  1090. return acpi_processor_setup_cpuidle_cx(pr, dev);
  1091. }
  1092. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  1093. {
  1094. int ret;
  1095. ret = acpi_processor_get_lpi_info(pr);
  1096. if (ret)
  1097. ret = acpi_processor_get_cstate_info(pr);
  1098. return ret;
  1099. }
  1100. int acpi_processor_hotplug(struct acpi_processor *pr)
  1101. {
  1102. int ret = 0;
  1103. struct cpuidle_device *dev;
  1104. if (disabled_by_idle_boot_param())
  1105. return 0;
  1106. if (!pr->flags.power_setup_done)
  1107. return -ENODEV;
  1108. dev = per_cpu(acpi_cpuidle_device, pr->id);
  1109. cpuidle_pause_and_lock();
  1110. cpuidle_disable_device(dev);
  1111. ret = acpi_processor_get_power_info(pr);
  1112. if (!ret && pr->flags.power) {
  1113. acpi_processor_setup_cpuidle_dev(pr, dev);
  1114. ret = cpuidle_enable_device(dev);
  1115. }
  1116. cpuidle_resume_and_unlock();
  1117. return ret;
  1118. }
  1119. int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
  1120. {
  1121. int cpu;
  1122. struct acpi_processor *_pr;
  1123. struct cpuidle_device *dev;
  1124. if (disabled_by_idle_boot_param())
  1125. return 0;
  1126. if (!pr->flags.power_setup_done)
  1127. return -ENODEV;
  1128. /*
  1129. * FIXME: Design the ACPI notification to make it once per
  1130. * system instead of once per-cpu. This condition is a hack
  1131. * to make the code that updates C-States be called once.
  1132. */
  1133. if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
  1134. /* Protect against cpu-hotplug */
  1135. get_online_cpus();
  1136. cpuidle_pause_and_lock();
  1137. /* Disable all cpuidle devices */
  1138. for_each_online_cpu(cpu) {
  1139. _pr = per_cpu(processors, cpu);
  1140. if (!_pr || !_pr->flags.power_setup_done)
  1141. continue;
  1142. dev = per_cpu(acpi_cpuidle_device, cpu);
  1143. cpuidle_disable_device(dev);
  1144. }
  1145. /* Populate Updated C-state information */
  1146. acpi_processor_get_power_info(pr);
  1147. acpi_processor_setup_cpuidle_states(pr);
  1148. /* Enable all cpuidle devices */
  1149. for_each_online_cpu(cpu) {
  1150. _pr = per_cpu(processors, cpu);
  1151. if (!_pr || !_pr->flags.power_setup_done)
  1152. continue;
  1153. acpi_processor_get_power_info(_pr);
  1154. if (_pr->flags.power) {
  1155. dev = per_cpu(acpi_cpuidle_device, cpu);
  1156. acpi_processor_setup_cpuidle_dev(_pr, dev);
  1157. cpuidle_enable_device(dev);
  1158. }
  1159. }
  1160. cpuidle_resume_and_unlock();
  1161. put_online_cpus();
  1162. }
  1163. return 0;
  1164. }
  1165. static int acpi_processor_registered;
  1166. int acpi_processor_power_init(struct acpi_processor *pr)
  1167. {
  1168. int retval;
  1169. struct cpuidle_device *dev;
  1170. if (disabled_by_idle_boot_param())
  1171. return 0;
  1172. acpi_processor_cstate_first_run_checks();
  1173. if (!acpi_processor_get_power_info(pr))
  1174. pr->flags.power_setup_done = 1;
  1175. /*
  1176. * Install the idle handler if processor power management is supported.
  1177. * Note that we use previously set idle handler will be used on
  1178. * platforms that only support C1.
  1179. */
  1180. if (pr->flags.power) {
  1181. /* Register acpi_idle_driver if not already registered */
  1182. if (!acpi_processor_registered) {
  1183. acpi_processor_setup_cpuidle_states(pr);
  1184. retval = cpuidle_register_driver(&acpi_idle_driver);
  1185. if (retval)
  1186. return retval;
  1187. pr_debug("%s registered with cpuidle\n",
  1188. acpi_idle_driver.name);
  1189. }
  1190. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1191. if (!dev)
  1192. return -ENOMEM;
  1193. per_cpu(acpi_cpuidle_device, pr->id) = dev;
  1194. acpi_processor_setup_cpuidle_dev(pr, dev);
  1195. /* Register per-cpu cpuidle_device. Cpuidle driver
  1196. * must already be registered before registering device
  1197. */
  1198. retval = cpuidle_register_device(dev);
  1199. if (retval) {
  1200. if (acpi_processor_registered == 0)
  1201. cpuidle_unregister_driver(&acpi_idle_driver);
  1202. return retval;
  1203. }
  1204. acpi_processor_registered++;
  1205. }
  1206. return 0;
  1207. }
  1208. int acpi_processor_power_exit(struct acpi_processor *pr)
  1209. {
  1210. struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
  1211. if (disabled_by_idle_boot_param())
  1212. return 0;
  1213. if (pr->flags.power) {
  1214. cpuidle_unregister_device(dev);
  1215. acpi_processor_registered--;
  1216. if (acpi_processor_registered == 0)
  1217. cpuidle_unregister_driver(&acpi_idle_driver);
  1218. }
  1219. pr->flags.power_setup_done = 0;
  1220. return 0;
  1221. }