acpuclock-cortex.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. /*
  2. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) "%s: " fmt, __func__
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/io.h>
  18. #include <linux/delay.h>
  19. #include <linux/mutex.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/errno.h>
  22. #include <linux/cpufreq.h>
  23. #include <linux/clk.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/iopoll.h>
  26. #include <mach/board.h>
  27. #include <mach/msm_iomap.h>
  28. #include <mach/msm_bus.h>
  29. #include <mach/msm_bus_board.h>
  30. #include <mach/rpm-regulator.h>
  31. #include <mach/clk-provider.h>
  32. #include <mach/rpm-regulator-smd.h>
  33. #include "acpuclock.h"
  34. #include "acpuclock-cortex.h"
  35. #define POLL_INTERVAL_US 1
  36. #define APCS_RCG_UPDATE_TIMEOUT_US 20
  37. static struct acpuclk_drv_data *priv;
  38. static uint32_t bus_perf_client;
  39. #ifdef CONFIG_SEC_DEBUG_VERBOSE_SUMMARY_HTML
  40. extern int cpu_frequency[CONFIG_NR_CPUS];
  41. extern int cpu_volt[CONFIG_NR_CPUS];
  42. extern char cpu_state[CONFIG_NR_CPUS][30];
  43. #endif
  44. /* Update the bus bandwidth request. */
  45. static void set_bus_bw(unsigned int bw)
  46. {
  47. int ret;
  48. if (bw >= priv->bus_scale->num_usecases) {
  49. pr_err("invalid bandwidth request (%d)\n", bw);
  50. return;
  51. }
  52. /* Update bandwidth if request has changed. This may sleep. */
  53. ret = msm_bus_scale_client_update_request(bus_perf_client, bw);
  54. if (ret)
  55. pr_err("bandwidth request failed (%d)\n", ret);
  56. return;
  57. }
  58. /* Apply any voltage increases. */
  59. static int increase_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
  60. {
  61. int rc = 0;
  62. if (priv->vdd_mem) {
  63. /*
  64. * Increase vdd_mem before vdd_cpu. vdd_mem should
  65. * be >= vdd_cpu.
  66. */
  67. rc = regulator_set_voltage(priv->vdd_mem, vdd_mem,
  68. priv->vdd_max_mem);
  69. if (rc) {
  70. pr_err("vdd_mem increase failed (%d)\n", rc);
  71. return rc;
  72. }
  73. }
  74. rc = regulator_set_voltage(priv->vdd_cpu, vdd_cpu, priv->vdd_max_cpu);
  75. if (rc)
  76. pr_err("vdd_cpu increase failed (%d)\n", rc);
  77. return rc;
  78. }
  79. /* Apply any per-cpu voltage decreases. */
  80. static void decrease_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
  81. {
  82. int ret;
  83. /* Update CPU voltage. */
  84. ret = regulator_set_voltage(priv->vdd_cpu, vdd_cpu, priv->vdd_max_cpu);
  85. if (ret) {
  86. pr_err("vdd_cpu decrease failed (%d)\n", ret);
  87. return;
  88. }
  89. if (!priv->vdd_mem)
  90. return;
  91. /* Decrease vdd_mem after vdd_cpu. vdd_mem should be >= vdd_cpu. */
  92. ret = regulator_set_voltage(priv->vdd_mem, vdd_mem, priv->vdd_max_mem);
  93. if (ret)
  94. pr_err("vdd_mem decrease failed (%d)\n", ret);
  95. }
  96. static void select_clk_source_div(struct acpuclk_drv_data *drv_data,
  97. struct clkctl_acpu_speed *s)
  98. {
  99. u32 regval, rc, src_div;
  100. void __iomem *apcs_rcg_config = drv_data->apcs_rcg_config;
  101. void __iomem *apcs_rcg_cmd = drv_data->apcs_rcg_cmd;
  102. struct acpuclk_reg_data *r = &drv_data->reg_data;
  103. src_div = s->src_div ? ((2 * s->src_div) - 1) : s->src_div;
  104. regval = readl_relaxed(apcs_rcg_config);
  105. regval &= ~r->cfg_src_mask;
  106. regval |= s->src_sel << r->cfg_src_shift;
  107. regval &= ~r->cfg_div_mask;
  108. regval |= src_div << r->cfg_div_shift;
  109. writel_relaxed(regval, apcs_rcg_config);
  110. /* Update the configuration */
  111. regval = readl_relaxed(apcs_rcg_cmd);
  112. regval |= r->update_mask;
  113. writel_relaxed(regval, apcs_rcg_cmd);
  114. /* Wait for the update to take effect */
  115. rc = readl_poll_timeout_noirq(apcs_rcg_cmd, regval,
  116. !(regval & r->poll_mask),
  117. POLL_INTERVAL_US,
  118. APCS_RCG_UPDATE_TIMEOUT_US);
  119. if (rc)
  120. pr_warn("acpu rcg didn't update its configuration\n");
  121. }
  122. static struct clkctl_acpu_speed *__init find_cur_cpu_level(void)
  123. {
  124. struct clkctl_acpu_speed *f, *max = priv->freq_tbl;
  125. void __iomem *apcs_rcg_config = priv->apcs_rcg_config;
  126. struct acpuclk_reg_data *r = &priv->reg_data;
  127. u32 regval, div, src;
  128. unsigned long rate;
  129. struct clk *parent;
  130. regval = readl_relaxed(apcs_rcg_config);
  131. src = regval & r->cfg_src_mask;
  132. src >>= r->cfg_src_shift;
  133. div = regval & r->cfg_div_mask;
  134. div >>= r->cfg_div_shift;
  135. /* No support for half-integer dividers */
  136. div = div > 1 ? (div + 1) / 2 : 0;
  137. for (f = priv->freq_tbl; f->khz; f++) {
  138. if (f->use_for_scaling)
  139. max = f;
  140. if (f->src_sel != src || f->src_div != div)
  141. continue;
  142. parent = priv->src_clocks[f->src].clk;
  143. rate = parent->rate / (div ? div : 1);
  144. if (f->khz * 1000 == rate)
  145. break;
  146. }
  147. if (f->khz)
  148. return f;
  149. pr_err("CPUs are running at an unknown rate. Defaulting to %u KHz.\n",
  150. max->khz);
  151. /* Change to a safe frequency */
  152. select_clk_source_div(priv, priv->freq_tbl);
  153. /* Default to largest frequency */
  154. return max;
  155. }
  156. static int set_speed_atomic(struct clkctl_acpu_speed *tgt_s)
  157. {
  158. struct clkctl_acpu_speed *strt_s = priv->current_speed;
  159. struct clk *strt = priv->src_clocks[strt_s->src].clk;
  160. struct clk *tgt = priv->src_clocks[tgt_s->src].clk;
  161. int rc = 0;
  162. WARN(strt_s->src == ACPUPLL && tgt_s->src == ACPUPLL,
  163. "can't reprogram ACPUPLL during atomic context\n");
  164. rc = clk_enable(tgt);
  165. if (rc)
  166. return rc;
  167. select_clk_source_div(priv, tgt_s);
  168. clk_disable(strt);
  169. return rc;
  170. }
  171. static int set_speed(struct clkctl_acpu_speed *tgt_s)
  172. {
  173. int rc = 0;
  174. unsigned int div = tgt_s->src_div ? tgt_s->src_div : 1;
  175. unsigned int tgt_freq_hz = tgt_s->khz * 1000 * div;
  176. struct clkctl_acpu_speed *strt_s = priv->current_speed;
  177. struct clkctl_acpu_speed *cxo_s = &priv->freq_tbl[0];
  178. struct clk *strt = priv->src_clocks[strt_s->src].clk;
  179. struct clk *tgt = priv->src_clocks[tgt_s->src].clk;
  180. if (strt_s->src == ACPUPLL && tgt_s->src == ACPUPLL) {
  181. /* Switch to another always on src */
  182. select_clk_source_div(priv, cxo_s);
  183. /* Re-program acpu pll */
  184. clk_disable_unprepare(tgt);
  185. rc = clk_set_rate(tgt, tgt_freq_hz);
  186. if (rc)
  187. pr_err("Failed to set ACPU PLL to %u\n", tgt_freq_hz);
  188. BUG_ON(clk_prepare_enable(tgt));
  189. /* Switch back to acpu pll */
  190. select_clk_source_div(priv, tgt_s);
  191. } else if (strt_s->src != ACPUPLL && tgt_s->src == ACPUPLL) {
  192. rc = clk_set_rate(tgt, tgt_freq_hz);
  193. if (rc) {
  194. pr_err("Failed to set ACPU PLL to %u\n", tgt_freq_hz);
  195. return rc;
  196. }
  197. rc = clk_prepare_enable(tgt);
  198. if (rc) {
  199. pr_err("ACPU PLL enable failed\n");
  200. return rc;
  201. }
  202. select_clk_source_div(priv, tgt_s);
  203. clk_disable_unprepare(strt);
  204. } else {
  205. rc = clk_prepare_enable(tgt);
  206. if (rc) {
  207. pr_err("%s enable failed\n",
  208. priv->src_clocks[tgt_s->src].name);
  209. return rc;
  210. }
  211. select_clk_source_div(priv, tgt_s);
  212. clk_disable_unprepare(strt);
  213. }
  214. return rc;
  215. }
  216. static int acpuclk_cortex_set_rate(int cpu, unsigned long rate,
  217. enum setrate_reason reason)
  218. {
  219. struct clkctl_acpu_speed *tgt_s, *strt_s;
  220. int rc = 0;
  221. if (reason == SETRATE_CPUFREQ)
  222. mutex_lock(&priv->lock);
  223. strt_s = priv->current_speed;
  224. /* Return early if rate didn't change */
  225. if (rate == strt_s->khz && reason != SETRATE_INIT)
  226. goto out;
  227. /* Find target frequency */
  228. for (tgt_s = priv->freq_tbl; tgt_s->khz != 0; tgt_s++)
  229. if (tgt_s->khz == rate)
  230. break;
  231. if (tgt_s->khz == 0) {
  232. rc = -EINVAL;
  233. goto out;
  234. }
  235. /* Increase VDD levels if needed */
  236. if ((reason == SETRATE_CPUFREQ)
  237. && (tgt_s->khz > strt_s->khz)) {
  238. rc = increase_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
  239. if (rc)
  240. goto out;
  241. }
  242. pr_debug("Switching from CPU rate %u KHz -> %u KHz\n",
  243. strt_s->khz, tgt_s->khz);
  244. /* Switch CPU speed. Flag indicates atomic context */
  245. if (reason == SETRATE_CPUFREQ || reason == SETRATE_INIT)
  246. rc = set_speed(tgt_s);
  247. else
  248. rc = set_speed_atomic(tgt_s);
  249. if (rc)
  250. goto out;
  251. priv->current_speed = tgt_s;
  252. pr_debug("CPU speed change complete\n");
  253. /* Nothing else to do for SWFI or power-collapse. */
  254. if (reason == SETRATE_SWFI || reason == SETRATE_PC)
  255. goto out;
  256. /* Update bus bandwith request */
  257. set_bus_bw(tgt_s->bw_level);
  258. /* Drop VDD levels if we can. */
  259. if (tgt_s->khz < strt_s->khz || reason == SETRATE_INIT)
  260. decrease_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
  261. #ifdef CONFIG_SEC_DEBUG_VERBOSE_SUMMARY_HTML
  262. /* add to sec debug variable */
  263. /* save the voltage level and freq for master core*/
  264. if(cpu_online(cpu) && cpu_active(cpu))
  265. strncpy(cpu_state[cpu], "Online", ARRAY_SIZE(cpu_state[cpu]));
  266. else if(!cpu_online(cpu) && cpu_active(cpu))
  267. strncpy(cpu_state[cpu], "Migrating", ARRAY_SIZE(cpu_state[cpu]));
  268. else if(!cpu_online(cpu) && !cpu_active(cpu))
  269. strncpy(cpu_state[cpu], "Down", ARRAY_SIZE(cpu_state[cpu]));
  270. else
  271. strncpy(cpu_state[cpu], "On/NotActive", ARRAY_SIZE(cpu_state[cpu]));
  272. cpu_frequency[cpu] = priv->current_speed->khz;
  273. cpu_volt[cpu] = priv->current_speed->vdd_cpu;
  274. #endif
  275. out:
  276. if (reason == SETRATE_CPUFREQ)
  277. mutex_unlock(&priv->lock);
  278. return rc;
  279. }
  280. static unsigned long acpuclk_cortex_get_rate(int cpu)
  281. {
  282. return priv->current_speed->khz;
  283. }
  284. #ifdef CONFIG_SEC_DEBUG_VERBOSE_SUMMARY_HTML
  285. static unsigned int acpuclk_cortex_get_voltage(int cpu)
  286. {
  287. return priv->current_speed->vdd_cpu;
  288. }
  289. #endif
  290. #ifdef CONFIG_CPU_FREQ_MSM
  291. static struct cpufreq_frequency_table freq_table[30];
  292. static void __init cpufreq_table_init(void)
  293. {
  294. int i, freq_cnt = 0;
  295. /* Construct the freq_table tables from priv->freq_tbl. */
  296. for (i = 0; priv->freq_tbl[i].khz != 0
  297. && freq_cnt < ARRAY_SIZE(freq_table) - 1; i++) {
  298. if (!priv->freq_tbl[i].use_for_scaling)
  299. continue;
  300. freq_table[freq_cnt].index = freq_cnt;
  301. freq_table[freq_cnt].frequency = priv->freq_tbl[i].khz;
  302. freq_cnt++;
  303. }
  304. /* freq_table not big enough to store all usable freqs. */
  305. BUG_ON(priv->freq_tbl[i].khz != 0);
  306. freq_table[freq_cnt].index = freq_cnt;
  307. freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
  308. pr_info("CPU: %d scaling frequencies supported.\n", freq_cnt);
  309. /* Register table with CPUFreq. */
  310. for_each_possible_cpu(i)
  311. cpufreq_frequency_table_get_attr(freq_table, i);
  312. }
  313. #else
  314. static void __init cpufreq_table_init(void) {}
  315. #endif
  316. static struct acpuclk_data acpuclk_cortex_data = {
  317. .set_rate = acpuclk_cortex_set_rate,
  318. .get_rate = acpuclk_cortex_get_rate,
  319. #ifdef CONFIG_SEC_DEBUG_VERBOSE_SUMMARY_HTML
  320. .get_voltage = acpuclk_cortex_get_voltage,
  321. #endif
  322. };
  323. void __init get_speed_bin(void __iomem *base, struct bin_info *bin)
  324. {
  325. u32 pte_efuse, redundant_sel;
  326. pte_efuse = readl_relaxed(base);
  327. redundant_sel = (pte_efuse >> 24) & 0x7;
  328. bin->speed = pte_efuse & 0x7;
  329. if (redundant_sel == 1)
  330. bin->speed = (pte_efuse >> 27) & 0x7;
  331. bin->speed_valid = !!(pte_efuse & BIT(3));
  332. }
  333. static struct clkctl_acpu_speed *__init select_freq_plan(void)
  334. {
  335. struct bin_info bin;
  336. if (!priv->pte_efuse_base)
  337. return priv->freq_tbl;
  338. get_speed_bin(priv->pte_efuse_base, &bin);
  339. if (bin.speed_valid) {
  340. pr_info("SPEED BIN: %d\n", bin.speed);
  341. } else {
  342. bin.speed = 0;
  343. pr_warn("SPEED BIN: Defaulting to %d\n",
  344. bin.speed);
  345. }
  346. return priv->pvs_tables[bin.speed];
  347. }
  348. int __init acpuclk_cortex_init(struct platform_device *pdev,
  349. struct acpuclk_drv_data *data)
  350. {
  351. int rc;
  352. int parent;
  353. priv = data;
  354. mutex_init(&priv->lock);
  355. acpuclk_cortex_data.power_collapse_khz = priv->wait_for_irq_khz;
  356. acpuclk_cortex_data.wait_for_irq_khz = priv->wait_for_irq_khz;
  357. priv->freq_tbl = select_freq_plan();
  358. if (!priv->freq_tbl) {
  359. pr_err("Invalid freq table selected\n");
  360. BUG();
  361. }
  362. bus_perf_client = msm_bus_scale_register_client(priv->bus_scale);
  363. if (!bus_perf_client) {
  364. pr_err("Unable to register bus client\n");
  365. BUG();
  366. }
  367. /* Initialize regulators */
  368. rc = increase_vdd(priv->vdd_max_cpu, priv->vdd_max_mem);
  369. if (rc)
  370. return rc;
  371. if (priv->vdd_mem) {
  372. rc = regulator_enable(priv->vdd_mem);
  373. if (rc) {
  374. dev_err(&pdev->dev, "regulator_enable for mem failed\n");
  375. return rc;
  376. }
  377. }
  378. rc = regulator_enable(priv->vdd_cpu);
  379. if (rc) {
  380. dev_err(&pdev->dev, "regulator_enable for cpu failed\n");
  381. return rc;
  382. }
  383. priv->current_speed = find_cur_cpu_level();
  384. parent = priv->current_speed->src;
  385. rc = clk_prepare_enable(priv->src_clocks[parent].clk);
  386. if (rc) {
  387. dev_err(&pdev->dev, "handoff: prepare_enable failed\n");
  388. return rc;
  389. }
  390. rc = acpuclk_cortex_set_rate(0, priv->current_speed->khz, SETRATE_INIT);
  391. if (rc) {
  392. dev_err(&pdev->dev, "handoff: set rate failed\n");
  393. return rc;
  394. }
  395. acpuclk_register(&acpuclk_cortex_data);
  396. cpufreq_table_init();
  397. return 0;
  398. }