mt8173-cpufreq.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /*
  2. * Copyright (c) 2015 Linaro Ltd.
  3. * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/clk.h>
  15. #include <linux/cpu.h>
  16. #include <linux/cpu_cooling.h>
  17. #include <linux/cpufreq.h>
  18. #include <linux/cpumask.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_opp.h>
  23. #include <linux/regulator/consumer.h>
  24. #include <linux/slab.h>
  25. #include <linux/thermal.h>
  26. #define MIN_VOLT_SHIFT (100000)
  27. #define MAX_VOLT_SHIFT (200000)
  28. #define MAX_VOLT_LIMIT (1150000)
  29. #define VOLT_TOL (10000)
  30. /*
  31. * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
  32. * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
  33. * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
  34. * voltage inputs need to be controlled under a hardware limitation:
  35. * 100mV < Vsram - Vproc < 200mV
  36. *
  37. * When scaling the clock frequency of a CPU clock domain, the clock source
  38. * needs to be switched to another stable PLL clock temporarily until
  39. * the original PLL becomes stable at target frequency.
  40. */
  41. struct mtk_cpu_dvfs_info {
  42. struct cpumask cpus;
  43. struct device *cpu_dev;
  44. struct regulator *proc_reg;
  45. struct regulator *sram_reg;
  46. struct clk *cpu_clk;
  47. struct clk *inter_clk;
  48. struct thermal_cooling_device *cdev;
  49. struct list_head list_head;
  50. int intermediate_voltage;
  51. bool need_voltage_tracking;
  52. };
  53. static LIST_HEAD(dvfs_info_list);
  54. static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
  55. {
  56. struct mtk_cpu_dvfs_info *info;
  57. list_for_each_entry(info, &dvfs_info_list, list_head) {
  58. if (cpumask_test_cpu(cpu, &info->cpus))
  59. return info;
  60. }
  61. return NULL;
  62. }
  63. static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
  64. int new_vproc)
  65. {
  66. struct regulator *proc_reg = info->proc_reg;
  67. struct regulator *sram_reg = info->sram_reg;
  68. int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
  69. old_vproc = regulator_get_voltage(proc_reg);
  70. if (old_vproc < 0) {
  71. pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
  72. return old_vproc;
  73. }
  74. /* Vsram should not exceed the maximum allowed voltage of SoC. */
  75. new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
  76. if (old_vproc < new_vproc) {
  77. /*
  78. * When scaling up voltages, Vsram and Vproc scale up step
  79. * by step. At each step, set Vsram to (Vproc + 200mV) first,
  80. * then set Vproc to (Vsram - 100mV).
  81. * Keep doing it until Vsram and Vproc hit target voltages.
  82. */
  83. do {
  84. old_vsram = regulator_get_voltage(sram_reg);
  85. if (old_vsram < 0) {
  86. pr_err("%s: invalid Vsram value: %d\n",
  87. __func__, old_vsram);
  88. return old_vsram;
  89. }
  90. old_vproc = regulator_get_voltage(proc_reg);
  91. if (old_vproc < 0) {
  92. pr_err("%s: invalid Vproc value: %d\n",
  93. __func__, old_vproc);
  94. return old_vproc;
  95. }
  96. vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
  97. if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
  98. vsram = MAX_VOLT_LIMIT;
  99. /*
  100. * If the target Vsram hits the maximum voltage,
  101. * try to set the exact voltage value first.
  102. */
  103. ret = regulator_set_voltage(sram_reg, vsram,
  104. vsram);
  105. if (ret)
  106. ret = regulator_set_voltage(sram_reg,
  107. vsram - VOLT_TOL,
  108. vsram);
  109. vproc = new_vproc;
  110. } else {
  111. ret = regulator_set_voltage(sram_reg, vsram,
  112. vsram + VOLT_TOL);
  113. vproc = vsram - MIN_VOLT_SHIFT;
  114. }
  115. if (ret)
  116. return ret;
  117. ret = regulator_set_voltage(proc_reg, vproc,
  118. vproc + VOLT_TOL);
  119. if (ret) {
  120. regulator_set_voltage(sram_reg, old_vsram,
  121. old_vsram);
  122. return ret;
  123. }
  124. } while (vproc < new_vproc || vsram < new_vsram);
  125. } else if (old_vproc > new_vproc) {
  126. /*
  127. * When scaling down voltages, Vsram and Vproc scale down step
  128. * by step. At each step, set Vproc to (Vsram - 200mV) first,
  129. * then set Vproc to (Vproc + 100mV).
  130. * Keep doing it until Vsram and Vproc hit target voltages.
  131. */
  132. do {
  133. old_vproc = regulator_get_voltage(proc_reg);
  134. if (old_vproc < 0) {
  135. pr_err("%s: invalid Vproc value: %d\n",
  136. __func__, old_vproc);
  137. return old_vproc;
  138. }
  139. old_vsram = regulator_get_voltage(sram_reg);
  140. if (old_vsram < 0) {
  141. pr_err("%s: invalid Vsram value: %d\n",
  142. __func__, old_vsram);
  143. return old_vsram;
  144. }
  145. vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
  146. ret = regulator_set_voltage(proc_reg, vproc,
  147. vproc + VOLT_TOL);
  148. if (ret)
  149. return ret;
  150. if (vproc == new_vproc)
  151. vsram = new_vsram;
  152. else
  153. vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
  154. if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
  155. vsram = MAX_VOLT_LIMIT;
  156. /*
  157. * If the target Vsram hits the maximum voltage,
  158. * try to set the exact voltage value first.
  159. */
  160. ret = regulator_set_voltage(sram_reg, vsram,
  161. vsram);
  162. if (ret)
  163. ret = regulator_set_voltage(sram_reg,
  164. vsram - VOLT_TOL,
  165. vsram);
  166. } else {
  167. ret = regulator_set_voltage(sram_reg, vsram,
  168. vsram + VOLT_TOL);
  169. }
  170. if (ret) {
  171. regulator_set_voltage(proc_reg, old_vproc,
  172. old_vproc);
  173. return ret;
  174. }
  175. } while (vproc > new_vproc + VOLT_TOL ||
  176. vsram > new_vsram + VOLT_TOL);
  177. }
  178. return 0;
  179. }
  180. static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
  181. {
  182. if (info->need_voltage_tracking)
  183. return mtk_cpufreq_voltage_tracking(info, vproc);
  184. else
  185. return regulator_set_voltage(info->proc_reg, vproc,
  186. vproc + VOLT_TOL);
  187. }
  188. static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
  189. unsigned int index)
  190. {
  191. struct cpufreq_frequency_table *freq_table = policy->freq_table;
  192. struct clk *cpu_clk = policy->clk;
  193. struct clk *armpll = clk_get_parent(cpu_clk);
  194. struct mtk_cpu_dvfs_info *info = policy->driver_data;
  195. struct device *cpu_dev = info->cpu_dev;
  196. struct dev_pm_opp *opp;
  197. long freq_hz, old_freq_hz;
  198. int vproc, old_vproc, inter_vproc, target_vproc, ret;
  199. inter_vproc = info->intermediate_voltage;
  200. old_freq_hz = clk_get_rate(cpu_clk);
  201. old_vproc = regulator_get_voltage(info->proc_reg);
  202. if (old_vproc < 0) {
  203. pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
  204. return old_vproc;
  205. }
  206. freq_hz = freq_table[index].frequency * 1000;
  207. rcu_read_lock();
  208. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
  209. if (IS_ERR(opp)) {
  210. rcu_read_unlock();
  211. pr_err("cpu%d: failed to find OPP for %ld\n",
  212. policy->cpu, freq_hz);
  213. return PTR_ERR(opp);
  214. }
  215. vproc = dev_pm_opp_get_voltage(opp);
  216. rcu_read_unlock();
  217. /*
  218. * If the new voltage or the intermediate voltage is higher than the
  219. * current voltage, scale up voltage first.
  220. */
  221. target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
  222. if (old_vproc < target_vproc) {
  223. ret = mtk_cpufreq_set_voltage(info, target_vproc);
  224. if (ret) {
  225. pr_err("cpu%d: failed to scale up voltage!\n",
  226. policy->cpu);
  227. mtk_cpufreq_set_voltage(info, old_vproc);
  228. return ret;
  229. }
  230. }
  231. /* Reparent the CPU clock to intermediate clock. */
  232. ret = clk_set_parent(cpu_clk, info->inter_clk);
  233. if (ret) {
  234. pr_err("cpu%d: failed to re-parent cpu clock!\n",
  235. policy->cpu);
  236. mtk_cpufreq_set_voltage(info, old_vproc);
  237. WARN_ON(1);
  238. return ret;
  239. }
  240. /* Set the original PLL to target rate. */
  241. ret = clk_set_rate(armpll, freq_hz);
  242. if (ret) {
  243. pr_err("cpu%d: failed to scale cpu clock rate!\n",
  244. policy->cpu);
  245. clk_set_parent(cpu_clk, armpll);
  246. mtk_cpufreq_set_voltage(info, old_vproc);
  247. return ret;
  248. }
  249. /* Set parent of CPU clock back to the original PLL. */
  250. ret = clk_set_parent(cpu_clk, armpll);
  251. if (ret) {
  252. pr_err("cpu%d: failed to re-parent cpu clock!\n",
  253. policy->cpu);
  254. mtk_cpufreq_set_voltage(info, inter_vproc);
  255. WARN_ON(1);
  256. return ret;
  257. }
  258. /*
  259. * If the new voltage is lower than the intermediate voltage or the
  260. * original voltage, scale down to the new voltage.
  261. */
  262. if (vproc < inter_vproc || vproc < old_vproc) {
  263. ret = mtk_cpufreq_set_voltage(info, vproc);
  264. if (ret) {
  265. pr_err("cpu%d: failed to scale down voltage!\n",
  266. policy->cpu);
  267. clk_set_parent(cpu_clk, info->inter_clk);
  268. clk_set_rate(armpll, old_freq_hz);
  269. clk_set_parent(cpu_clk, armpll);
  270. return ret;
  271. }
  272. }
  273. return 0;
  274. }
  275. #define DYNAMIC_POWER "dynamic-power-coefficient"
  276. static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
  277. {
  278. struct mtk_cpu_dvfs_info *info = policy->driver_data;
  279. struct device_node *np = of_node_get(info->cpu_dev->of_node);
  280. u32 capacitance = 0;
  281. if (WARN_ON(!np))
  282. return;
  283. if (of_find_property(np, "#cooling-cells", NULL)) {
  284. of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
  285. info->cdev = of_cpufreq_power_cooling_register(np,
  286. policy->related_cpus,
  287. capacitance,
  288. NULL);
  289. if (IS_ERR(info->cdev)) {
  290. dev_err(info->cpu_dev,
  291. "running cpufreq without cooling device: %ld\n",
  292. PTR_ERR(info->cdev));
  293. info->cdev = NULL;
  294. }
  295. }
  296. of_node_put(np);
  297. }
  298. static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
  299. {
  300. struct device *cpu_dev;
  301. struct regulator *proc_reg = ERR_PTR(-ENODEV);
  302. struct regulator *sram_reg = ERR_PTR(-ENODEV);
  303. struct clk *cpu_clk = ERR_PTR(-ENODEV);
  304. struct clk *inter_clk = ERR_PTR(-ENODEV);
  305. struct dev_pm_opp *opp;
  306. unsigned long rate;
  307. int ret;
  308. cpu_dev = get_cpu_device(cpu);
  309. if (!cpu_dev) {
  310. pr_err("failed to get cpu%d device\n", cpu);
  311. return -ENODEV;
  312. }
  313. cpu_clk = clk_get(cpu_dev, "cpu");
  314. if (IS_ERR(cpu_clk)) {
  315. if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
  316. pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
  317. else
  318. pr_err("failed to get cpu clk for cpu%d\n", cpu);
  319. ret = PTR_ERR(cpu_clk);
  320. return ret;
  321. }
  322. inter_clk = clk_get(cpu_dev, "intermediate");
  323. if (IS_ERR(inter_clk)) {
  324. if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
  325. pr_warn("intermediate clk for cpu%d not ready, retry.\n",
  326. cpu);
  327. else
  328. pr_err("failed to get intermediate clk for cpu%d\n",
  329. cpu);
  330. ret = PTR_ERR(inter_clk);
  331. goto out_free_resources;
  332. }
  333. proc_reg = regulator_get_exclusive(cpu_dev, "proc");
  334. if (IS_ERR(proc_reg)) {
  335. if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
  336. pr_warn("proc regulator for cpu%d not ready, retry.\n",
  337. cpu);
  338. else
  339. pr_err("failed to get proc regulator for cpu%d\n",
  340. cpu);
  341. ret = PTR_ERR(proc_reg);
  342. goto out_free_resources;
  343. }
  344. /* Both presence and absence of sram regulator are valid cases. */
  345. sram_reg = regulator_get_exclusive(cpu_dev, "sram");
  346. /* Get OPP-sharing information from "operating-points-v2" bindings */
  347. ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
  348. if (ret) {
  349. pr_err("failed to get OPP-sharing information for cpu%d\n",
  350. cpu);
  351. goto out_free_resources;
  352. }
  353. ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
  354. if (ret) {
  355. pr_warn("no OPP table for cpu%d\n", cpu);
  356. goto out_free_resources;
  357. }
  358. /* Search a safe voltage for intermediate frequency. */
  359. rate = clk_get_rate(inter_clk);
  360. rcu_read_lock();
  361. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
  362. if (IS_ERR(opp)) {
  363. rcu_read_unlock();
  364. pr_err("failed to get intermediate opp for cpu%d\n", cpu);
  365. ret = PTR_ERR(opp);
  366. goto out_free_opp_table;
  367. }
  368. info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
  369. rcu_read_unlock();
  370. info->cpu_dev = cpu_dev;
  371. info->proc_reg = proc_reg;
  372. info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
  373. info->cpu_clk = cpu_clk;
  374. info->inter_clk = inter_clk;
  375. /*
  376. * If SRAM regulator is present, software "voltage tracking" is needed
  377. * for this CPU power domain.
  378. */
  379. info->need_voltage_tracking = !IS_ERR(sram_reg);
  380. return 0;
  381. out_free_opp_table:
  382. dev_pm_opp_of_cpumask_remove_table(&info->cpus);
  383. out_free_resources:
  384. if (!IS_ERR(proc_reg))
  385. regulator_put(proc_reg);
  386. if (!IS_ERR(sram_reg))
  387. regulator_put(sram_reg);
  388. if (!IS_ERR(cpu_clk))
  389. clk_put(cpu_clk);
  390. if (!IS_ERR(inter_clk))
  391. clk_put(inter_clk);
  392. return ret;
  393. }
  394. static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
  395. {
  396. if (!IS_ERR(info->proc_reg))
  397. regulator_put(info->proc_reg);
  398. if (!IS_ERR(info->sram_reg))
  399. regulator_put(info->sram_reg);
  400. if (!IS_ERR(info->cpu_clk))
  401. clk_put(info->cpu_clk);
  402. if (!IS_ERR(info->inter_clk))
  403. clk_put(info->inter_clk);
  404. dev_pm_opp_of_cpumask_remove_table(&info->cpus);
  405. }
  406. static int mtk_cpufreq_init(struct cpufreq_policy *policy)
  407. {
  408. struct mtk_cpu_dvfs_info *info;
  409. struct cpufreq_frequency_table *freq_table;
  410. int ret;
  411. info = mtk_cpu_dvfs_info_lookup(policy->cpu);
  412. if (!info) {
  413. pr_err("dvfs info for cpu%d is not initialized.\n",
  414. policy->cpu);
  415. return -EINVAL;
  416. }
  417. ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
  418. if (ret) {
  419. pr_err("failed to init cpufreq table for cpu%d: %d\n",
  420. policy->cpu, ret);
  421. return ret;
  422. }
  423. ret = cpufreq_table_validate_and_show(policy, freq_table);
  424. if (ret) {
  425. pr_err("%s: invalid frequency table: %d\n", __func__, ret);
  426. goto out_free_cpufreq_table;
  427. }
  428. cpumask_copy(policy->cpus, &info->cpus);
  429. policy->driver_data = info;
  430. policy->clk = info->cpu_clk;
  431. return 0;
  432. out_free_cpufreq_table:
  433. dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
  434. return ret;
  435. }
  436. static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
  437. {
  438. struct mtk_cpu_dvfs_info *info = policy->driver_data;
  439. cpufreq_cooling_unregister(info->cdev);
  440. dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
  441. return 0;
  442. }
  443. static struct cpufreq_driver mt8173_cpufreq_driver = {
  444. .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
  445. CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
  446. .verify = cpufreq_generic_frequency_table_verify,
  447. .target_index = mtk_cpufreq_set_target,
  448. .get = cpufreq_generic_get,
  449. .init = mtk_cpufreq_init,
  450. .exit = mtk_cpufreq_exit,
  451. .ready = mtk_cpufreq_ready,
  452. .name = "mtk-cpufreq",
  453. .attr = cpufreq_generic_attr,
  454. };
  455. static int mt8173_cpufreq_probe(struct platform_device *pdev)
  456. {
  457. struct mtk_cpu_dvfs_info *info, *tmp;
  458. int cpu, ret;
  459. for_each_possible_cpu(cpu) {
  460. info = mtk_cpu_dvfs_info_lookup(cpu);
  461. if (info)
  462. continue;
  463. info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
  464. if (!info) {
  465. ret = -ENOMEM;
  466. goto release_dvfs_info_list;
  467. }
  468. ret = mtk_cpu_dvfs_info_init(info, cpu);
  469. if (ret) {
  470. dev_err(&pdev->dev,
  471. "failed to initialize dvfs info for cpu%d\n",
  472. cpu);
  473. goto release_dvfs_info_list;
  474. }
  475. list_add(&info->list_head, &dvfs_info_list);
  476. }
  477. ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
  478. if (ret) {
  479. dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
  480. goto release_dvfs_info_list;
  481. }
  482. return 0;
  483. release_dvfs_info_list:
  484. list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
  485. mtk_cpu_dvfs_info_release(info);
  486. list_del(&info->list_head);
  487. }
  488. return ret;
  489. }
  490. static struct platform_driver mt8173_cpufreq_platdrv = {
  491. .driver = {
  492. .name = "mt8173-cpufreq",
  493. },
  494. .probe = mt8173_cpufreq_probe,
  495. };
  496. static int mt8173_cpufreq_driver_init(void)
  497. {
  498. struct platform_device *pdev;
  499. int err;
  500. if (!of_machine_is_compatible("mediatek,mt8173"))
  501. return -ENODEV;
  502. err = platform_driver_register(&mt8173_cpufreq_platdrv);
  503. if (err)
  504. return err;
  505. /*
  506. * Since there's no place to hold device registration code and no
  507. * device tree based way to match cpufreq driver yet, both the driver
  508. * and the device registration codes are put here to handle defer
  509. * probing.
  510. */
  511. pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
  512. if (IS_ERR(pdev)) {
  513. pr_err("failed to register mtk-cpufreq platform device\n");
  514. return PTR_ERR(pdev);
  515. }
  516. return 0;
  517. }
  518. device_initcall(mt8173_cpufreq_driver_init);