cppc_cpufreq.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. /*
  2. * CPPC (Collaborative Processor Performance Control) driver for
  3. * interfacing with the CPUfreq layer and governors. See
  4. * cppc_acpi.c for CPPC specific methods.
  5. *
  6. * (C) Copyright 2014, 2015 Linaro Ltd.
  7. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/delay.h>
  18. #include <linux/cpu.h>
  19. #include <linux/cpufreq.h>
  20. #include <linux/dmi.h>
  21. #include <linux/time.h>
  22. #include <linux/vmalloc.h>
  23. #include <asm/unaligned.h>
  24. #include <acpi/cppc_acpi.h>
  25. /* Minimum struct length needed for the DMI processor entry we want */
  26. #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
  27. /* Offest in the DMI processor structure for the max frequency */
  28. #define DMI_PROCESSOR_MAX_SPEED 0x14
  29. /*
  30. * These structs contain information parsed from per CPU
  31. * ACPI _CPC structures.
  32. * e.g. For each CPU the highest, lowest supported
  33. * performance capabilities, desired performance level
  34. * requested etc.
  35. */
  36. static struct cppc_cpudata **all_cpu_data;
  37. /* Capture the max KHz from DMI */
  38. static u64 cppc_dmi_max_khz;
  39. /* Callback function used to retrieve the max frequency from DMI */
  40. static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
  41. {
  42. const u8 *dmi_data = (const u8 *)dm;
  43. u16 *mhz = (u16 *)private;
  44. if (dm->type == DMI_ENTRY_PROCESSOR &&
  45. dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
  46. u16 val = (u16)get_unaligned((const u16 *)
  47. (dmi_data + DMI_PROCESSOR_MAX_SPEED));
  48. *mhz = val > *mhz ? val : *mhz;
  49. }
  50. }
  51. /* Look up the max frequency in DMI */
  52. static u64 cppc_get_dmi_max_khz(void)
  53. {
  54. u16 mhz = 0;
  55. dmi_walk(cppc_find_dmi_mhz, &mhz);
  56. /*
  57. * Real stupid fallback value, just in case there is no
  58. * actual value set.
  59. */
  60. mhz = mhz ? mhz : 1;
  61. return (1000 * mhz);
  62. }
  63. static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
  64. unsigned int target_freq,
  65. unsigned int relation)
  66. {
  67. struct cppc_cpudata *cpu;
  68. struct cpufreq_freqs freqs;
  69. u32 desired_perf;
  70. int ret = 0;
  71. cpu = all_cpu_data[policy->cpu];
  72. desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
  73. /* Return if it is exactly the same perf */
  74. if (desired_perf == cpu->perf_ctrls.desired_perf)
  75. return ret;
  76. cpu->perf_ctrls.desired_perf = desired_perf;
  77. freqs.old = policy->cur;
  78. freqs.new = target_freq;
  79. cpufreq_freq_transition_begin(policy, &freqs);
  80. ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
  81. cpufreq_freq_transition_end(policy, &freqs, ret != 0);
  82. if (ret)
  83. pr_debug("Failed to set target on CPU:%d. ret:%d\n",
  84. cpu->cpu, ret);
  85. return ret;
  86. }
  87. static int cppc_verify_policy(struct cpufreq_policy *policy)
  88. {
  89. cpufreq_verify_within_cpu_limits(policy);
  90. return 0;
  91. }
  92. static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
  93. {
  94. int cpu_num = policy->cpu;
  95. struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
  96. int ret;
  97. cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
  98. ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
  99. if (ret)
  100. pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
  101. cpu->perf_caps.lowest_perf, cpu_num, ret);
  102. }
  103. /*
  104. * The PCC subspace describes the rate at which platform can accept commands
  105. * on the shared PCC channel (including READs which do not count towards freq
  106. * trasition requests), so ideally we need to use the PCC values as a fallback
  107. * if we don't have a platform specific transition_delay_us
  108. */
  109. #ifdef CONFIG_ARM64
  110. #include <asm/cputype.h>
  111. static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
  112. {
  113. unsigned long implementor = read_cpuid_implementor();
  114. unsigned long part_num = read_cpuid_part_number();
  115. unsigned int delay_us = 0;
  116. switch (implementor) {
  117. case ARM_CPU_IMP_QCOM:
  118. switch (part_num) {
  119. case QCOM_CPU_PART_FALKOR_V1:
  120. case QCOM_CPU_PART_FALKOR:
  121. delay_us = 10000;
  122. break;
  123. default:
  124. delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
  125. break;
  126. }
  127. break;
  128. default:
  129. delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
  130. break;
  131. }
  132. return delay_us;
  133. }
  134. #else
  135. static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
  136. {
  137. return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
  138. }
  139. #endif
  140. static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
  141. {
  142. struct cppc_cpudata *cpu;
  143. unsigned int cpu_num = policy->cpu;
  144. int ret = 0;
  145. cpu = all_cpu_data[policy->cpu];
  146. cpu->cpu = cpu_num;
  147. ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
  148. if (ret) {
  149. pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
  150. cpu_num, ret);
  151. return ret;
  152. }
  153. cppc_dmi_max_khz = cppc_get_dmi_max_khz();
  154. /*
  155. * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
  156. * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
  157. */
  158. policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
  159. cpu->perf_caps.highest_perf;
  160. policy->max = cppc_dmi_max_khz;
  161. /*
  162. * Set cpuinfo.min_freq to Lowest to make the full range of performance
  163. * available if userspace wants to use any perf between lowest & lowest
  164. * nonlinear perf
  165. */
  166. policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
  167. cpu->perf_caps.highest_perf;
  168. policy->cpuinfo.max_freq = cppc_dmi_max_khz;
  169. policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
  170. policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
  171. policy->shared_type = cpu->shared_type;
  172. if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
  173. int i;
  174. cpumask_copy(policy->cpus, cpu->shared_cpu_map);
  175. for_each_cpu(i, policy->cpus) {
  176. if (unlikely(i == policy->cpu))
  177. continue;
  178. memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
  179. sizeof(cpu->perf_caps));
  180. }
  181. } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
  182. /* Support only SW_ANY for now. */
  183. pr_debug("Unsupported CPU co-ord type\n");
  184. return -EFAULT;
  185. }
  186. cpu->cur_policy = policy;
  187. /* Set policy->cur to max now. The governors will adjust later. */
  188. policy->cur = cppc_dmi_max_khz;
  189. cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
  190. ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
  191. if (ret)
  192. pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
  193. cpu->perf_caps.highest_perf, cpu_num, ret);
  194. return ret;
  195. }
  196. static struct cpufreq_driver cppc_cpufreq_driver = {
  197. .flags = CPUFREQ_CONST_LOOPS,
  198. .verify = cppc_verify_policy,
  199. .target = cppc_cpufreq_set_target,
  200. .init = cppc_cpufreq_cpu_init,
  201. .stop_cpu = cppc_cpufreq_stop_cpu,
  202. .name = "cppc_cpufreq",
  203. };
  204. static int __init cppc_cpufreq_init(void)
  205. {
  206. int i, ret = 0;
  207. struct cppc_cpudata *cpu;
  208. if (acpi_disabled)
  209. return -ENODEV;
  210. all_cpu_data = kzalloc(sizeof(void *) * num_possible_cpus(), GFP_KERNEL);
  211. if (!all_cpu_data)
  212. return -ENOMEM;
  213. for_each_possible_cpu(i) {
  214. all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
  215. if (!all_cpu_data[i])
  216. goto out;
  217. cpu = all_cpu_data[i];
  218. if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
  219. goto out;
  220. }
  221. ret = acpi_get_psd_map(all_cpu_data);
  222. if (ret) {
  223. pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
  224. goto out;
  225. }
  226. ret = cpufreq_register_driver(&cppc_cpufreq_driver);
  227. if (ret)
  228. goto out;
  229. return ret;
  230. out:
  231. for_each_possible_cpu(i) {
  232. cpu = all_cpu_data[i];
  233. if (!cpu)
  234. break;
  235. free_cpumask_var(cpu->shared_cpu_map);
  236. kfree(cpu);
  237. }
  238. kfree(all_cpu_data);
  239. return -ENODEV;
  240. }
  241. static void __exit cppc_cpufreq_exit(void)
  242. {
  243. struct cppc_cpudata *cpu;
  244. int i;
  245. cpufreq_unregister_driver(&cppc_cpufreq_driver);
  246. for_each_possible_cpu(i) {
  247. cpu = all_cpu_data[i];
  248. free_cpumask_var(cpu->shared_cpu_map);
  249. kfree(cpu);
  250. }
  251. kfree(all_cpu_data);
  252. }
  253. module_exit(cppc_cpufreq_exit);
  254. MODULE_AUTHOR("Ashwin Chaugule");
  255. MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
  256. MODULE_LICENSE("GPL");
  257. late_initcall(cppc_cpufreq_init);
  258. static const struct acpi_device_id cppc_acpi_ids[] = {
  259. {ACPI_PROCESSOR_DEVICE_HID, },
  260. {}
  261. };
  262. MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);