psci_checker.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License version 2 as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright (C) 2016 ARM Limited
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/atomic.h>
  15. #include <linux/completion.h>
  16. #include <linux/cpu.h>
  17. #include <linux/cpuidle.h>
  18. #include <linux/cpu_pm.h>
  19. #include <linux/kernel.h>
  20. #include <linux/kthread.h>
  21. #include <linux/module.h>
  22. #include <linux/preempt.h>
  23. #include <linux/psci.h>
  24. #include <linux/slab.h>
  25. #include <linux/tick.h>
  26. #include <linux/topology.h>
  27. #include <asm/cpuidle.h>
  28. #include <uapi/linux/psci.h>
  29. #define NUM_SUSPEND_CYCLE (10)
  30. static unsigned int nb_available_cpus;
  31. static int tos_resident_cpu = -1;
  32. static atomic_t nb_active_threads;
  33. static struct completion suspend_threads_started =
  34. COMPLETION_INITIALIZER(suspend_threads_started);
  35. static struct completion suspend_threads_done =
  36. COMPLETION_INITIALIZER(suspend_threads_done);
  37. /*
  38. * We assume that PSCI operations are used if they are available. This is not
  39. * necessarily true on arm64, since the decision is based on the
  40. * "enable-method" property of each CPU in the DT, but given that there is no
  41. * arch-specific way to check this, we assume that the DT is sensible.
  42. */
  43. static int psci_ops_check(void)
  44. {
  45. int migrate_type = -1;
  46. int cpu;
  47. if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
  48. pr_warn("Missing PSCI operations, aborting tests\n");
  49. return -EOPNOTSUPP;
  50. }
  51. if (psci_ops.migrate_info_type)
  52. migrate_type = psci_ops.migrate_info_type();
  53. if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
  54. migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
  55. /* There is a UP Trusted OS, find on which core it resides. */
  56. for_each_online_cpu(cpu)
  57. if (psci_tos_resident_on(cpu)) {
  58. tos_resident_cpu = cpu;
  59. break;
  60. }
  61. if (tos_resident_cpu == -1)
  62. pr_warn("UP Trusted OS resides on no online CPU\n");
  63. }
  64. return 0;
  65. }
  66. static int find_clusters(const struct cpumask *cpus,
  67. const struct cpumask **clusters)
  68. {
  69. unsigned int nb = 0;
  70. cpumask_var_t tmp;
  71. if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
  72. return -ENOMEM;
  73. cpumask_copy(tmp, cpus);
  74. while (!cpumask_empty(tmp)) {
  75. const struct cpumask *cluster =
  76. topology_core_cpumask(cpumask_any(tmp));
  77. clusters[nb++] = cluster;
  78. cpumask_andnot(tmp, tmp, cluster);
  79. }
  80. free_cpumask_var(tmp);
  81. return nb;
  82. }
  83. /*
  84. * offlined_cpus is a temporary array but passing it as an argument avoids
  85. * multiple allocations.
  86. */
  87. static unsigned int down_and_up_cpus(const struct cpumask *cpus,
  88. struct cpumask *offlined_cpus)
  89. {
  90. int cpu;
  91. int err = 0;
  92. cpumask_clear(offlined_cpus);
  93. /* Try to power down all CPUs in the mask. */
  94. for_each_cpu(cpu, cpus) {
  95. int ret = cpu_down(cpu);
  96. /*
  97. * cpu_down() checks the number of online CPUs before the TOS
  98. * resident CPU.
  99. */
  100. if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
  101. if (ret != -EBUSY) {
  102. pr_err("Unexpected return code %d while trying "
  103. "to power down last online CPU %d\n",
  104. ret, cpu);
  105. ++err;
  106. }
  107. } else if (cpu == tos_resident_cpu) {
  108. if (ret != -EPERM) {
  109. pr_err("Unexpected return code %d while trying "
  110. "to power down TOS resident CPU %d\n",
  111. ret, cpu);
  112. ++err;
  113. }
  114. } else if (ret != 0) {
  115. pr_err("Error occurred (%d) while trying "
  116. "to power down CPU %d\n", ret, cpu);
  117. ++err;
  118. }
  119. if (ret == 0)
  120. cpumask_set_cpu(cpu, offlined_cpus);
  121. }
  122. /* Try to power up all the CPUs that have been offlined. */
  123. for_each_cpu(cpu, offlined_cpus) {
  124. int ret = cpu_up(cpu);
  125. if (ret != 0) {
  126. pr_err("Error occurred (%d) while trying "
  127. "to power up CPU %d\n", ret, cpu);
  128. ++err;
  129. } else {
  130. cpumask_clear_cpu(cpu, offlined_cpus);
  131. }
  132. }
  133. /*
  134. * Something went bad at some point and some CPUs could not be turned
  135. * back on.
  136. */
  137. WARN_ON(!cpumask_empty(offlined_cpus) ||
  138. num_online_cpus() != nb_available_cpus);
  139. return err;
  140. }
  141. static int hotplug_tests(void)
  142. {
  143. int err;
  144. cpumask_var_t offlined_cpus;
  145. int i, nb_cluster;
  146. const struct cpumask **clusters;
  147. char *page_buf;
  148. err = -ENOMEM;
  149. if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
  150. return err;
  151. /* We may have up to nb_available_cpus clusters. */
  152. clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
  153. GFP_KERNEL);
  154. if (!clusters)
  155. goto out_free_cpus;
  156. page_buf = (char *)__get_free_page(GFP_KERNEL);
  157. if (!page_buf)
  158. goto out_free_clusters;
  159. err = 0;
  160. nb_cluster = find_clusters(cpu_online_mask, clusters);
  161. /*
  162. * Of course the last CPU cannot be powered down and cpu_down() should
  163. * refuse doing that.
  164. */
  165. pr_info("Trying to turn off and on again all CPUs\n");
  166. err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
  167. /*
  168. * Take down CPUs by cluster this time. When the last CPU is turned
  169. * off, the cluster itself should shut down.
  170. */
  171. for (i = 0; i < nb_cluster; ++i) {
  172. int cluster_id =
  173. topology_physical_package_id(cpumask_any(clusters[i]));
  174. ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
  175. clusters[i]);
  176. /* Remove trailing newline. */
  177. page_buf[len - 1] = '\0';
  178. pr_info("Trying to turn off and on again cluster %d "
  179. "(CPUs %s)\n", cluster_id, page_buf);
  180. err += down_and_up_cpus(clusters[i], offlined_cpus);
  181. }
  182. free_page((unsigned long)page_buf);
  183. out_free_clusters:
  184. kfree(clusters);
  185. out_free_cpus:
  186. free_cpumask_var(offlined_cpus);
  187. return err;
  188. }
  189. static void dummy_callback(unsigned long ignored) {}
  190. static int suspend_cpu(int index, bool broadcast)
  191. {
  192. int ret;
  193. arch_cpu_idle_enter();
  194. if (broadcast) {
  195. /*
  196. * The local timer will be shut down, we need to enter tick
  197. * broadcast.
  198. */
  199. ret = tick_broadcast_enter();
  200. if (ret) {
  201. /*
  202. * In the absence of hardware broadcast mechanism,
  203. * this CPU might be used to broadcast wakeups, which
  204. * may be why entering tick broadcast has failed.
  205. * There is little the kernel can do to work around
  206. * that, so enter WFI instead (idle state 0).
  207. */
  208. cpu_do_idle();
  209. ret = 0;
  210. goto out_arch_exit;
  211. }
  212. }
  213. /*
  214. * Replicate the common ARM cpuidle enter function
  215. * (arm_enter_idle_state).
  216. */
  217. ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
  218. if (broadcast)
  219. tick_broadcast_exit();
  220. out_arch_exit:
  221. arch_cpu_idle_exit();
  222. return ret;
  223. }
  224. static int suspend_test_thread(void *arg)
  225. {
  226. int cpu = (long)arg;
  227. int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
  228. struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
  229. struct cpuidle_device *dev;
  230. struct cpuidle_driver *drv;
  231. /* No need for an actual callback, we just want to wake up the CPU. */
  232. struct timer_list wakeup_timer;
  233. /* Wait for the main thread to give the start signal. */
  234. wait_for_completion(&suspend_threads_started);
  235. /* Set maximum priority to preempt all other threads on this CPU. */
  236. if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
  237. pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
  238. cpu);
  239. dev = this_cpu_read(cpuidle_devices);
  240. drv = cpuidle_get_cpu_driver(dev);
  241. pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
  242. cpu, drv->state_count - 1);
  243. setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
  244. for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
  245. int index;
  246. /*
  247. * Test all possible states, except 0 (which is usually WFI and
  248. * doesn't use PSCI).
  249. */
  250. for (index = 1; index < drv->state_count; ++index) {
  251. struct cpuidle_state *state = &drv->states[index];
  252. bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
  253. int ret;
  254. /*
  255. * Set the timer to wake this CPU up in some time (which
  256. * should be largely sufficient for entering suspend).
  257. * If the local tick is disabled when entering suspend,
  258. * suspend_cpu() takes care of switching to a broadcast
  259. * tick, so the timer will still wake us up.
  260. */
  261. mod_timer(&wakeup_timer, jiffies +
  262. usecs_to_jiffies(state->target_residency));
  263. /* IRQs must be disabled during suspend operations. */
  264. local_irq_disable();
  265. ret = suspend_cpu(index, broadcast);
  266. /*
  267. * We have woken up. Re-enable IRQs to handle any
  268. * pending interrupt, do not wait until the end of the
  269. * loop.
  270. */
  271. local_irq_enable();
  272. if (ret == index) {
  273. ++nb_suspend;
  274. } else if (ret >= 0) {
  275. /* We did not enter the expected state. */
  276. ++nb_shallow_sleep;
  277. } else {
  278. pr_err("Failed to suspend CPU %d: error %d "
  279. "(requested state %d, cycle %d)\n",
  280. cpu, ret, index, i);
  281. ++nb_err;
  282. }
  283. }
  284. }
  285. /*
  286. * Disable the timer to make sure that the timer will not trigger
  287. * later.
  288. */
  289. del_timer(&wakeup_timer);
  290. if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
  291. complete(&suspend_threads_done);
  292. /* Give up on RT scheduling and wait for termination. */
  293. sched_priority.sched_priority = 0;
  294. if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
  295. pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
  296. cpu);
  297. for (;;) {
  298. /* Needs to be set first to avoid missing a wakeup. */
  299. set_current_state(TASK_INTERRUPTIBLE);
  300. if (kthread_should_stop()) {
  301. __set_current_state(TASK_RUNNING);
  302. break;
  303. }
  304. schedule();
  305. }
  306. pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
  307. cpu, nb_suspend, nb_shallow_sleep, nb_err);
  308. return nb_err;
  309. }
  310. static int suspend_tests(void)
  311. {
  312. int i, cpu, err = 0;
  313. struct task_struct **threads;
  314. int nb_threads = 0;
  315. threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
  316. GFP_KERNEL);
  317. if (!threads)
  318. return -ENOMEM;
  319. /*
  320. * Stop cpuidle to prevent the idle tasks from entering a deep sleep
  321. * mode, as it might interfere with the suspend threads on other CPUs.
  322. * This does not prevent the suspend threads from using cpuidle (only
  323. * the idle tasks check this status). Take the idle lock so that
  324. * the cpuidle driver and device look-up can be carried out safely.
  325. */
  326. cpuidle_pause_and_lock();
  327. for_each_online_cpu(cpu) {
  328. struct task_struct *thread;
  329. /* Check that cpuidle is available on that CPU. */
  330. struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
  331. struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
  332. if (!dev || !drv) {
  333. pr_warn("cpuidle not available on CPU %d, ignoring\n",
  334. cpu);
  335. continue;
  336. }
  337. thread = kthread_create_on_cpu(suspend_test_thread,
  338. (void *)(long)cpu, cpu,
  339. "psci_suspend_test");
  340. if (IS_ERR(thread))
  341. pr_err("Failed to create kthread on CPU %d\n", cpu);
  342. else
  343. threads[nb_threads++] = thread;
  344. }
  345. if (nb_threads < 1) {
  346. err = -ENODEV;
  347. goto out;
  348. }
  349. atomic_set(&nb_active_threads, nb_threads);
  350. /*
  351. * Wake up the suspend threads. To avoid the main thread being preempted
  352. * before all the threads have been unparked, the suspend threads will
  353. * wait for the completion of suspend_threads_started.
  354. */
  355. for (i = 0; i < nb_threads; ++i)
  356. wake_up_process(threads[i]);
  357. complete_all(&suspend_threads_started);
  358. wait_for_completion(&suspend_threads_done);
  359. /* Stop and destroy all threads, get return status. */
  360. for (i = 0; i < nb_threads; ++i)
  361. err += kthread_stop(threads[i]);
  362. out:
  363. cpuidle_resume_and_unlock();
  364. kfree(threads);
  365. return err;
  366. }
  367. static int __init psci_checker(void)
  368. {
  369. int ret;
  370. /*
  371. * Since we're in an initcall, we assume that all the CPUs that all
  372. * CPUs that can be onlined have been onlined.
  373. *
  374. * The tests assume that hotplug is enabled but nobody else is using it,
  375. * otherwise the results will be unpredictable. However, since there
  376. * is no userspace yet in initcalls, that should be fine, as long as
  377. * no torture test is running at the same time (see Kconfig).
  378. */
  379. nb_available_cpus = num_online_cpus();
  380. /* Check PSCI operations are set up and working. */
  381. ret = psci_ops_check();
  382. if (ret)
  383. return ret;
  384. pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
  385. pr_info("Starting hotplug tests\n");
  386. ret = hotplug_tests();
  387. if (ret == 0)
  388. pr_info("Hotplug tests passed OK\n");
  389. else if (ret > 0)
  390. pr_err("%d error(s) encountered in hotplug tests\n", ret);
  391. else {
  392. pr_err("Out of memory\n");
  393. return ret;
  394. }
  395. pr_info("Starting suspend tests (%d cycles per state)\n",
  396. NUM_SUSPEND_CYCLE);
  397. ret = suspend_tests();
  398. if (ret == 0)
  399. pr_info("Suspend tests passed OK\n");
  400. else if (ret > 0)
  401. pr_err("%d error(s) encountered in suspend tests\n", ret);
  402. else {
  403. switch (ret) {
  404. case -ENOMEM:
  405. pr_err("Out of memory\n");
  406. break;
  407. case -ENODEV:
  408. pr_warn("Could not start suspend tests on any CPU\n");
  409. break;
  410. }
  411. }
  412. pr_info("PSCI checker completed\n");
  413. return ret < 0 ? ret : 0;
  414. }
  415. late_initcall(psci_checker);