omap-mpuss-lowpower.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. /*
  2. * OMAP MPUSS low power code
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. *
  7. * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
  8. * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
  9. * CPU0 and CPU1 LPRM modules.
  10. * CPU0, CPU1 and MPUSS each have there own power domain and
  11. * hence multiple low power combinations of MPUSS are possible.
  12. *
  13. * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
  14. * because the mode is not supported by hw constraints of dormant
  15. * mode. While waking up from the dormant mode, a reset signal
  16. * to the Cortex-A9 processor must be asserted by the external
  17. * power controller.
  18. *
  19. * With architectural inputs and hardware recommendations, only
  20. * below modes are supported from power gain vs latency point of view.
  21. *
  22. * CPU0 CPU1 MPUSS
  23. * ----------------------------------------------
  24. * ON ON ON
  25. * ON(Inactive) OFF ON(Inactive)
  26. * OFF OFF CSWR
  27. * OFF OFF OSWR
  28. * OFF OFF OFF(Device OFF *TBD)
  29. * ----------------------------------------------
  30. *
  31. * Note: CPU0 is the master core and it is the last CPU to go down
  32. * and first to wake-up when MPUSS low power states are excercised
  33. *
  34. *
  35. * This program is free software; you can redistribute it and/or modify
  36. * it under the terms of the GNU General Public License version 2 as
  37. * published by the Free Software Foundation.
  38. */
  39. #include <linux/kernel.h>
  40. #include <linux/io.h>
  41. #include <linux/errno.h>
  42. #include <linux/linkage.h>
  43. #include <linux/smp.h>
  44. #include <asm/cacheflush.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/smp_scu.h>
  47. #include <asm/pgalloc.h>
  48. #include <asm/suspend.h>
  49. #include <asm/virt.h>
  50. #include <asm/hardware/cache-l2x0.h>
  51. #include "soc.h"
  52. #include "common.h"
  53. #include "omap44xx.h"
  54. #include "omap4-sar-layout.h"
  55. #include "pm.h"
  56. #include "prcm_mpu44xx.h"
  57. #include "prcm_mpu54xx.h"
  58. #include "prminst44xx.h"
  59. #include "prcm44xx.h"
  60. #include "prm44xx.h"
  61. #include "prm-regbits-44xx.h"
  62. static void __iomem *sar_base;
  63. #if defined(CONFIG_PM) && defined(CONFIG_SMP)
  64. struct omap4_cpu_pm_info {
  65. struct powerdomain *pwrdm;
  66. void __iomem *scu_sar_addr;
  67. void __iomem *wkup_sar_addr;
  68. void __iomem *l2x0_sar_addr;
  69. };
  70. /**
  71. * struct cpu_pm_ops - CPU pm operations
  72. * @finish_suspend: CPU suspend finisher function pointer
  73. * @resume: CPU resume function pointer
  74. * @scu_prepare: CPU Snoop Control program function pointer
  75. * @hotplug_restart: CPU restart function pointer
  76. *
  77. * Structure holds functions pointer for CPU low power operations like
  78. * suspend, resume and scu programming.
  79. */
  80. struct cpu_pm_ops {
  81. int (*finish_suspend)(unsigned long cpu_state);
  82. void (*resume)(void);
  83. void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
  84. void (*hotplug_restart)(void);
  85. };
  86. static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
  87. static struct powerdomain *mpuss_pd;
  88. static u32 cpu_context_offset;
  89. static int default_finish_suspend(unsigned long cpu_state)
  90. {
  91. omap_do_wfi();
  92. return 0;
  93. }
  94. static void dummy_cpu_resume(void)
  95. {}
  96. static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
  97. {}
  98. static struct cpu_pm_ops omap_pm_ops = {
  99. .finish_suspend = default_finish_suspend,
  100. .resume = dummy_cpu_resume,
  101. .scu_prepare = dummy_scu_prepare,
  102. .hotplug_restart = dummy_cpu_resume,
  103. };
  104. /*
  105. * Program the wakeup routine address for the CPU0 and CPU1
  106. * used for OFF or DORMANT wakeup.
  107. */
  108. static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
  109. {
  110. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
  111. if (pm_info->wkup_sar_addr)
  112. writel_relaxed(addr, pm_info->wkup_sar_addr);
  113. }
  114. /*
  115. * Store the SCU power status value to scratchpad memory
  116. */
  117. static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
  118. {
  119. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
  120. u32 scu_pwr_st;
  121. switch (cpu_state) {
  122. case PWRDM_POWER_RET:
  123. scu_pwr_st = SCU_PM_DORMANT;
  124. break;
  125. case PWRDM_POWER_OFF:
  126. scu_pwr_st = SCU_PM_POWEROFF;
  127. break;
  128. case PWRDM_POWER_ON:
  129. case PWRDM_POWER_INACTIVE:
  130. default:
  131. scu_pwr_st = SCU_PM_NORMAL;
  132. break;
  133. }
  134. if (pm_info->scu_sar_addr)
  135. writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr);
  136. }
  137. /* Helper functions for MPUSS OSWR */
  138. static inline void mpuss_clear_prev_logic_pwrst(void)
  139. {
  140. u32 reg;
  141. reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
  142. OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
  143. omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
  144. OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
  145. }
  146. static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
  147. {
  148. u32 reg;
  149. if (cpu_id) {
  150. reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
  151. cpu_context_offset);
  152. omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
  153. cpu_context_offset);
  154. } else {
  155. reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
  156. cpu_context_offset);
  157. omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
  158. cpu_context_offset);
  159. }
  160. }
  161. /*
  162. * Store the CPU cluster state for L2X0 low power operations.
  163. */
  164. static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
  165. {
  166. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
  167. if (pm_info->l2x0_sar_addr)
  168. writel_relaxed(save_state, pm_info->l2x0_sar_addr);
  169. }
  170. /*
  171. * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
  172. * in every restore MPUSS OFF path.
  173. */
  174. #ifdef CONFIG_CACHE_L2X0
  175. static void __init save_l2x0_context(void)
  176. {
  177. void __iomem *l2x0_base = omap4_get_l2cache_base();
  178. if (l2x0_base && sar_base) {
  179. writel_relaxed(l2x0_saved_regs.aux_ctrl,
  180. sar_base + L2X0_AUXCTRL_OFFSET);
  181. writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  182. sar_base + L2X0_PREFETCH_CTRL_OFFSET);
  183. }
  184. }
  185. #else
  186. static void __init save_l2x0_context(void)
  187. {}
  188. #endif
  189. /**
  190. * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
  191. * The purpose of this function is to manage low power programming
  192. * of OMAP4 MPUSS subsystem
  193. * @cpu : CPU ID
  194. * @power_state: Low power state.
  195. *
  196. * MPUSS states for the context save:
  197. * save_state =
  198. * 0 - Nothing lost and no need to save: MPUSS INACTIVE
  199. * 1 - CPUx L1 and logic lost: MPUSS CSWR
  200. * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
  201. * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
  202. */
  203. int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
  204. {
  205. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
  206. unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET;
  207. unsigned int wakeup_cpu;
  208. if (omap_rev() == OMAP4430_REV_ES1_0)
  209. return -ENXIO;
  210. switch (power_state) {
  211. case PWRDM_POWER_ON:
  212. case PWRDM_POWER_INACTIVE:
  213. save_state = 0;
  214. break;
  215. case PWRDM_POWER_OFF:
  216. cpu_logic_state = PWRDM_POWER_OFF;
  217. save_state = 1;
  218. break;
  219. case PWRDM_POWER_RET:
  220. if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
  221. save_state = 0;
  222. break;
  223. default:
  224. /*
  225. * CPUx CSWR is invalid hardware state. Also CPUx OSWR
  226. * doesn't make much scense, since logic is lost and $L1
  227. * needs to be cleaned because of coherency. This makes
  228. * CPUx OSWR equivalent to CPUX OFF and hence not supported
  229. */
  230. WARN_ON(1);
  231. return -ENXIO;
  232. }
  233. pwrdm_pre_transition(NULL);
  234. /*
  235. * Check MPUSS next state and save interrupt controller if needed.
  236. * In MPUSS OSWR or device OFF, interrupt controller contest is lost.
  237. */
  238. mpuss_clear_prev_logic_pwrst();
  239. if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
  240. (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
  241. save_state = 2;
  242. cpu_clear_prev_logic_pwrst(cpu);
  243. pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
  244. pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
  245. set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume));
  246. omap_pm_ops.scu_prepare(cpu, power_state);
  247. l2x0_pwrst_prepare(cpu, save_state);
  248. /*
  249. * Call low level function with targeted low power state.
  250. */
  251. if (save_state)
  252. cpu_suspend(save_state, omap_pm_ops.finish_suspend);
  253. else
  254. omap_pm_ops.finish_suspend(save_state);
  255. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
  256. gic_dist_enable();
  257. /*
  258. * Restore the CPUx power state to ON otherwise CPUx
  259. * power domain can transitions to programmed low power
  260. * state while doing WFI outside the low powe code. On
  261. * secure devices, CPUx does WFI which can result in
  262. * domain transition
  263. */
  264. wakeup_cpu = smp_processor_id();
  265. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  266. pwrdm_post_transition(NULL);
  267. return 0;
  268. }
  269. /**
  270. * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
  271. * @cpu : CPU ID
  272. * @power_state: CPU low power state.
  273. */
  274. int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
  275. {
  276. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
  277. unsigned int cpu_state = 0;
  278. if (omap_rev() == OMAP4430_REV_ES1_0)
  279. return -ENXIO;
  280. /* Use the achievable power state for the domain */
  281. power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm,
  282. false, power_state);
  283. if (power_state == PWRDM_POWER_OFF)
  284. cpu_state = 1;
  285. pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
  286. pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
  287. set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.hotplug_restart));
  288. omap_pm_ops.scu_prepare(cpu, power_state);
  289. /*
  290. * CPU never retuns back if targeted power state is OFF mode.
  291. * CPU ONLINE follows normal CPU ONLINE ptah via
  292. * omap4_secondary_startup().
  293. */
  294. omap_pm_ops.finish_suspend(cpu_state);
  295. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  296. return 0;
  297. }
  298. /*
  299. * Enable Mercury Fast HG retention mode by default.
  300. */
  301. static void enable_mercury_retention_mode(void)
  302. {
  303. u32 reg;
  304. reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST,
  305. OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
  306. /* Enable HG_EN, HG_RAMPUP = fast mode */
  307. reg |= BIT(24) | BIT(25);
  308. omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST,
  309. OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
  310. }
  311. /*
  312. * Initialise OMAP4 MPUSS
  313. */
  314. int __init omap4_mpuss_init(void)
  315. {
  316. struct omap4_cpu_pm_info *pm_info;
  317. if (omap_rev() == OMAP4430_REV_ES1_0) {
  318. WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
  319. return -ENODEV;
  320. }
  321. /* Initilaise per CPU PM information */
  322. pm_info = &per_cpu(omap4_pm_info, 0x0);
  323. if (sar_base) {
  324. pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
  325. if (cpu_is_omap44xx())
  326. pm_info->wkup_sar_addr = sar_base +
  327. CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
  328. else
  329. pm_info->wkup_sar_addr = sar_base +
  330. OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
  331. pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
  332. }
  333. pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
  334. if (!pm_info->pwrdm) {
  335. pr_err("Lookup failed for CPU0 pwrdm\n");
  336. return -ENODEV;
  337. }
  338. /* Clear CPU previous power domain state */
  339. pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
  340. cpu_clear_prev_logic_pwrst(0);
  341. /* Initialise CPU0 power domain state to ON */
  342. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  343. pm_info = &per_cpu(omap4_pm_info, 0x1);
  344. if (sar_base) {
  345. pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
  346. if (cpu_is_omap44xx())
  347. pm_info->wkup_sar_addr = sar_base +
  348. CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
  349. else
  350. pm_info->wkup_sar_addr = sar_base +
  351. OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
  352. pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
  353. }
  354. pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
  355. if (!pm_info->pwrdm) {
  356. pr_err("Lookup failed for CPU1 pwrdm\n");
  357. return -ENODEV;
  358. }
  359. /* Clear CPU previous power domain state */
  360. pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
  361. cpu_clear_prev_logic_pwrst(1);
  362. /* Initialise CPU1 power domain state to ON */
  363. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  364. mpuss_pd = pwrdm_lookup("mpu_pwrdm");
  365. if (!mpuss_pd) {
  366. pr_err("Failed to lookup MPUSS power domain\n");
  367. return -ENODEV;
  368. }
  369. pwrdm_clear_all_prev_pwrst(mpuss_pd);
  370. mpuss_clear_prev_logic_pwrst();
  371. if (sar_base) {
  372. /* Save device type on scratchpad for low level code to use */
  373. writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0,
  374. sar_base + OMAP_TYPE_OFFSET);
  375. save_l2x0_context();
  376. }
  377. if (cpu_is_omap44xx()) {
  378. omap_pm_ops.finish_suspend = omap4_finish_suspend;
  379. omap_pm_ops.resume = omap4_cpu_resume;
  380. omap_pm_ops.scu_prepare = scu_pwrst_prepare;
  381. omap_pm_ops.hotplug_restart = omap4_secondary_startup;
  382. cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET;
  383. } else if (soc_is_omap54xx() || soc_is_dra7xx()) {
  384. cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET;
  385. enable_mercury_retention_mode();
  386. }
  387. if (cpu_is_omap446x())
  388. omap_pm_ops.hotplug_restart = omap4460_secondary_startup;
  389. return 0;
  390. }
  391. #endif
  392. /*
  393. * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
  394. * current kernel's secondary_startup() early before
  395. * clockdomains_init(). Otherwise clockdomain_init() can
  396. * wake CPU1 and cause a hang.
  397. */
  398. void __init omap4_mpuss_early_init(void)
  399. {
  400. unsigned long startup_pa;
  401. if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
  402. return;
  403. sar_base = omap4_get_sar_ram_base();
  404. if (cpu_is_omap443x())
  405. startup_pa = virt_to_phys(omap4_secondary_startup);
  406. else if (cpu_is_omap446x())
  407. startup_pa = virt_to_phys(omap4460_secondary_startup);
  408. else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
  409. startup_pa = virt_to_phys(omap5_secondary_hyp_startup);
  410. else
  411. startup_pa = virt_to_phys(omap5_secondary_startup);
  412. if (cpu_is_omap44xx())
  413. writel_relaxed(startup_pa, sar_base +
  414. CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
  415. else
  416. writel_relaxed(startup_pa, sar_base +
  417. OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
  418. }