cpuidle-powernv.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * cpuidle-powernv - idle state cpuidle driver.
  3. * Adapted from drivers/cpuidle/cpuidle-pseries
  4. *
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/moduleparam.h>
  10. #include <linux/cpuidle.h>
  11. #include <linux/cpu.h>
  12. #include <linux/notifier.h>
  13. #include <linux/clockchips.h>
  14. #include <linux/of.h>
  15. #include <linux/slab.h>
  16. #include <asm/machdep.h>
  17. #include <asm/firmware.h>
  18. #include <asm/opal.h>
  19. #include <asm/runlatch.h>
  20. #define POWERNV_THRESHOLD_LATENCY_NS 200000
  21. struct cpuidle_driver powernv_idle_driver = {
  22. .name = "powernv_idle",
  23. .owner = THIS_MODULE,
  24. };
  25. static int max_idle_state;
  26. static struct cpuidle_state *cpuidle_state_table;
  27. static u64 stop_psscr_table[CPUIDLE_STATE_MAX];
  28. static u64 default_snooze_timeout;
  29. static bool snooze_timeout_en;
  30. static u64 get_snooze_timeout(struct cpuidle_device *dev,
  31. struct cpuidle_driver *drv,
  32. int index)
  33. {
  34. int i;
  35. if (unlikely(!snooze_timeout_en))
  36. return default_snooze_timeout;
  37. for (i = index + 1; i < drv->state_count; i++) {
  38. struct cpuidle_state *s = &drv->states[i];
  39. struct cpuidle_state_usage *su = &dev->states_usage[i];
  40. if (s->disabled || su->disable)
  41. continue;
  42. return s->target_residency * tb_ticks_per_usec;
  43. }
  44. return default_snooze_timeout;
  45. }
  46. static int snooze_loop(struct cpuidle_device *dev,
  47. struct cpuidle_driver *drv,
  48. int index)
  49. {
  50. u64 snooze_exit_time;
  51. local_irq_enable();
  52. set_thread_flag(TIF_POLLING_NRFLAG);
  53. snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
  54. ppc64_runlatch_off();
  55. while (!need_resched()) {
  56. HMT_low();
  57. HMT_very_low();
  58. if (snooze_timeout_en && get_tb() > snooze_exit_time)
  59. break;
  60. }
  61. HMT_medium();
  62. ppc64_runlatch_on();
  63. clear_thread_flag(TIF_POLLING_NRFLAG);
  64. smp_mb();
  65. return index;
  66. }
  67. static int nap_loop(struct cpuidle_device *dev,
  68. struct cpuidle_driver *drv,
  69. int index)
  70. {
  71. ppc64_runlatch_off();
  72. power7_idle();
  73. ppc64_runlatch_on();
  74. return index;
  75. }
  76. /* Register for fastsleep only in oneshot mode of broadcast */
  77. #ifdef CONFIG_TICK_ONESHOT
  78. static int fastsleep_loop(struct cpuidle_device *dev,
  79. struct cpuidle_driver *drv,
  80. int index)
  81. {
  82. unsigned long old_lpcr = mfspr(SPRN_LPCR);
  83. unsigned long new_lpcr;
  84. if (unlikely(system_state < SYSTEM_RUNNING))
  85. return index;
  86. new_lpcr = old_lpcr;
  87. /* Do not exit powersave upon decrementer as we've setup the timer
  88. * offload.
  89. */
  90. new_lpcr &= ~LPCR_PECE1;
  91. mtspr(SPRN_LPCR, new_lpcr);
  92. power7_sleep();
  93. mtspr(SPRN_LPCR, old_lpcr);
  94. return index;
  95. }
  96. #endif
  97. static int stop_loop(struct cpuidle_device *dev,
  98. struct cpuidle_driver *drv,
  99. int index)
  100. {
  101. ppc64_runlatch_off();
  102. power9_idle_stop(stop_psscr_table[index]);
  103. ppc64_runlatch_on();
  104. return index;
  105. }
  106. /*
  107. * States for dedicated partition case.
  108. */
  109. static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
  110. { /* Snooze */
  111. .name = "snooze",
  112. .desc = "snooze",
  113. .exit_latency = 0,
  114. .target_residency = 0,
  115. .enter = snooze_loop },
  116. };
  117. static int powernv_cpuidle_cpu_online(unsigned int cpu)
  118. {
  119. struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
  120. if (dev && cpuidle_get_driver()) {
  121. cpuidle_pause_and_lock();
  122. cpuidle_enable_device(dev);
  123. cpuidle_resume_and_unlock();
  124. }
  125. return 0;
  126. }
  127. static int powernv_cpuidle_cpu_dead(unsigned int cpu)
  128. {
  129. struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
  130. if (dev && cpuidle_get_driver()) {
  131. cpuidle_pause_and_lock();
  132. cpuidle_disable_device(dev);
  133. cpuidle_resume_and_unlock();
  134. }
  135. return 0;
  136. }
  137. /*
  138. * powernv_cpuidle_driver_init()
  139. */
  140. static int powernv_cpuidle_driver_init(void)
  141. {
  142. int idle_state;
  143. struct cpuidle_driver *drv = &powernv_idle_driver;
  144. drv->state_count = 0;
  145. for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
  146. /* Is the state not enabled? */
  147. if (cpuidle_state_table[idle_state].enter == NULL)
  148. continue;
  149. drv->states[drv->state_count] = /* structure copy */
  150. cpuidle_state_table[idle_state];
  151. drv->state_count += 1;
  152. }
  153. /*
  154. * On the PowerNV platform cpu_present may be less than cpu_possible in
  155. * cases when firmware detects the CPU, but it is not available to the
  156. * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
  157. * run time and hence cpu_devices are not created for those CPUs by the
  158. * generic topology_init().
  159. *
  160. * drv->cpumask defaults to cpu_possible_mask in
  161. * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
  162. * cpu_devices are not created for CPUs in cpu_possible_mask that
  163. * cannot be hot-added later at run time.
  164. *
  165. * Trying cpuidle_register_device() on a CPU without a cpu_device is
  166. * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
  167. */
  168. drv->cpumask = (struct cpumask *)cpu_present_mask;
  169. return 0;
  170. }
  171. static int powernv_add_idle_states(void)
  172. {
  173. struct device_node *power_mgt;
  174. int nr_idle_states = 1; /* Snooze */
  175. int dt_idle_states;
  176. u32 latency_ns[CPUIDLE_STATE_MAX];
  177. u32 residency_ns[CPUIDLE_STATE_MAX];
  178. u32 flags[CPUIDLE_STATE_MAX];
  179. u64 psscr_val[CPUIDLE_STATE_MAX];
  180. const char *names[CPUIDLE_STATE_MAX];
  181. int i, rc;
  182. /* Currently we have snooze statically defined */
  183. power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
  184. if (!power_mgt) {
  185. pr_warn("opal: PowerMgmt Node not found\n");
  186. goto out;
  187. }
  188. /* Read values of any property to determine the num of idle states */
  189. dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
  190. if (dt_idle_states < 0) {
  191. pr_warn("cpuidle-powernv: no idle states found in the DT\n");
  192. goto out;
  193. }
  194. /*
  195. * Since snooze is used as first idle state, max idle states allowed is
  196. * CPUIDLE_STATE_MAX -1
  197. */
  198. if (dt_idle_states > CPUIDLE_STATE_MAX - 1) {
  199. pr_warn("cpuidle-powernv: discovered idle states more than allowed");
  200. dt_idle_states = CPUIDLE_STATE_MAX - 1;
  201. }
  202. if (of_property_read_u32_array(power_mgt,
  203. "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
  204. pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
  205. goto out;
  206. }
  207. if (of_property_read_u32_array(power_mgt,
  208. "ibm,cpu-idle-state-latencies-ns", latency_ns,
  209. dt_idle_states)) {
  210. pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
  211. goto out;
  212. }
  213. if (of_property_read_string_array(power_mgt,
  214. "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) {
  215. pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
  216. goto out;
  217. }
  218. /*
  219. * If the idle states use stop instruction, probe for psscr values
  220. * which are necessary to specify required stop level.
  221. */
  222. if (flags[0] & (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP))
  223. if (of_property_read_u64_array(power_mgt,
  224. "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) {
  225. pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n");
  226. goto out;
  227. }
  228. rc = of_property_read_u32_array(power_mgt,
  229. "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
  230. for (i = 0; i < dt_idle_states; i++) {
  231. /*
  232. * If an idle state has exit latency beyond
  233. * POWERNV_THRESHOLD_LATENCY_NS then don't use it
  234. * in cpu-idle.
  235. */
  236. if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS)
  237. continue;
  238. /*
  239. * Cpuidle accepts exit_latency and target_residency in us.
  240. * Use default target_residency values if f/w does not expose it.
  241. */
  242. if (flags[i] & OPAL_PM_NAP_ENABLED) {
  243. /* Add NAP state */
  244. strcpy(powernv_states[nr_idle_states].name, "Nap");
  245. strcpy(powernv_states[nr_idle_states].desc, "Nap");
  246. powernv_states[nr_idle_states].flags = 0;
  247. powernv_states[nr_idle_states].target_residency = 100;
  248. powernv_states[nr_idle_states].enter = nap_loop;
  249. } else if ((flags[i] & OPAL_PM_STOP_INST_FAST) &&
  250. !(flags[i] & OPAL_PM_TIMEBASE_STOP)) {
  251. strncpy(powernv_states[nr_idle_states].name,
  252. names[i], CPUIDLE_NAME_LEN);
  253. strncpy(powernv_states[nr_idle_states].desc,
  254. names[i], CPUIDLE_NAME_LEN);
  255. powernv_states[nr_idle_states].flags = 0;
  256. powernv_states[nr_idle_states].enter = stop_loop;
  257. stop_psscr_table[nr_idle_states] = psscr_val[i];
  258. }
  259. /*
  260. * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
  261. * within this config dependency check.
  262. */
  263. #ifdef CONFIG_TICK_ONESHOT
  264. if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
  265. flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
  266. /* Add FASTSLEEP state */
  267. strcpy(powernv_states[nr_idle_states].name, "FastSleep");
  268. strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
  269. powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
  270. powernv_states[nr_idle_states].target_residency = 300000;
  271. powernv_states[nr_idle_states].enter = fastsleep_loop;
  272. } else if ((flags[i] & OPAL_PM_STOP_INST_DEEP) &&
  273. (flags[i] & OPAL_PM_TIMEBASE_STOP)) {
  274. strncpy(powernv_states[nr_idle_states].name,
  275. names[i], CPUIDLE_NAME_LEN);
  276. strncpy(powernv_states[nr_idle_states].desc,
  277. names[i], CPUIDLE_NAME_LEN);
  278. powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
  279. powernv_states[nr_idle_states].enter = stop_loop;
  280. stop_psscr_table[nr_idle_states] = psscr_val[i];
  281. }
  282. #endif
  283. powernv_states[nr_idle_states].exit_latency =
  284. ((unsigned int)latency_ns[i]) / 1000;
  285. if (!rc) {
  286. powernv_states[nr_idle_states].target_residency =
  287. ((unsigned int)residency_ns[i]) / 1000;
  288. }
  289. nr_idle_states++;
  290. }
  291. out:
  292. return nr_idle_states;
  293. }
  294. /*
  295. * powernv_idle_probe()
  296. * Choose state table for shared versus dedicated partition
  297. */
  298. static int powernv_idle_probe(void)
  299. {
  300. if (cpuidle_disable != IDLE_NO_OVERRIDE)
  301. return -ENODEV;
  302. if (firmware_has_feature(FW_FEATURE_OPAL)) {
  303. cpuidle_state_table = powernv_states;
  304. /* Device tree can indicate more idle states */
  305. max_idle_state = powernv_add_idle_states();
  306. default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
  307. if (max_idle_state > 1)
  308. snooze_timeout_en = true;
  309. } else
  310. return -ENODEV;
  311. return 0;
  312. }
  313. static int __init powernv_processor_idle_init(void)
  314. {
  315. int retval;
  316. retval = powernv_idle_probe();
  317. if (retval)
  318. return retval;
  319. powernv_cpuidle_driver_init();
  320. retval = cpuidle_register(&powernv_idle_driver, NULL);
  321. if (retval) {
  322. printk(KERN_DEBUG "Registration of powernv driver failed.\n");
  323. return retval;
  324. }
  325. retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
  326. "cpuidle/powernv:online",
  327. powernv_cpuidle_cpu_online, NULL);
  328. WARN_ON(retval < 0);
  329. retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
  330. "cpuidle/powernv:dead", NULL,
  331. powernv_cpuidle_cpu_dead);
  332. WARN_ON(retval < 0);
  333. printk(KERN_DEBUG "powernv_idle_driver registered\n");
  334. return 0;
  335. }
  336. device_initcall(powernv_processor_idle_init);