arm_big_little.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /*
  2. * ARM big.LITTLE Platforms CPUFreq support
  3. *
  4. * Copyright (C) 2013 ARM Ltd.
  5. * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
  6. *
  7. * Copyright (C) 2013 Linaro.
  8. * Viresh Kumar <viresh.kumar@linaro.org>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  15. * kind, whether express or implied; without even the implied warranty
  16. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20. #include <linux/clk.h>
  21. #include <linux/cpu.h>
  22. #include <linux/cpufreq.h>
  23. #include <linux/cpumask.h>
  24. #include <linux/cpu_cooling.h>
  25. #include <linux/export.h>
  26. #include <linux/module.h>
  27. #include <linux/mutex.h>
  28. #include <linux/of_platform.h>
  29. #include <linux/pm_opp.h>
  30. #include <linux/slab.h>
  31. #include <linux/topology.h>
  32. #include <linux/types.h>
  33. #include "arm_big_little.h"
  34. /* Currently we support only two clusters */
  35. #define A15_CLUSTER 0
  36. #define A7_CLUSTER 1
  37. #define MAX_CLUSTERS 2
  38. #ifdef CONFIG_BL_SWITCHER
  39. #include <asm/bL_switcher.h>
  40. static bool bL_switching_enabled;
  41. #define is_bL_switching_enabled() bL_switching_enabled
  42. #define set_switching_enabled(x) (bL_switching_enabled = (x))
  43. #else
  44. #define is_bL_switching_enabled() false
  45. #define set_switching_enabled(x) do { } while (0)
  46. #define bL_switch_request(...) do { } while (0)
  47. #define bL_switcher_put_enabled() do { } while (0)
  48. #define bL_switcher_get_enabled() do { } while (0)
  49. #endif
  50. #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
  51. #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
  52. static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
  53. static struct cpufreq_arm_bL_ops *arm_bL_ops;
  54. static struct clk *clk[MAX_CLUSTERS];
  55. static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
  56. static atomic_t cluster_usage[MAX_CLUSTERS + 1];
  57. static unsigned int clk_big_min; /* (Big) clock frequencies */
  58. static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
  59. static DEFINE_PER_CPU(unsigned int, physical_cluster);
  60. static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
  61. static struct mutex cluster_lock[MAX_CLUSTERS];
  62. static inline int raw_cpu_to_cluster(int cpu)
  63. {
  64. return topology_physical_package_id(cpu);
  65. }
  66. static inline int cpu_to_cluster(int cpu)
  67. {
  68. return is_bL_switching_enabled() ?
  69. MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
  70. }
  71. static unsigned int find_cluster_maxfreq(int cluster)
  72. {
  73. int j;
  74. u32 max_freq = 0, cpu_freq;
  75. for_each_online_cpu(j) {
  76. cpu_freq = per_cpu(cpu_last_req_freq, j);
  77. if ((cluster == per_cpu(physical_cluster, j)) &&
  78. (max_freq < cpu_freq))
  79. max_freq = cpu_freq;
  80. }
  81. pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
  82. max_freq);
  83. return max_freq;
  84. }
  85. static unsigned int clk_get_cpu_rate(unsigned int cpu)
  86. {
  87. u32 cur_cluster = per_cpu(physical_cluster, cpu);
  88. u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
  89. /* For switcher we use virtual A7 clock rates */
  90. if (is_bL_switching_enabled())
  91. rate = VIRT_FREQ(cur_cluster, rate);
  92. pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
  93. cur_cluster, rate);
  94. return rate;
  95. }
  96. static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
  97. {
  98. if (is_bL_switching_enabled()) {
  99. pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
  100. cpu));
  101. return per_cpu(cpu_last_req_freq, cpu);
  102. } else {
  103. return clk_get_cpu_rate(cpu);
  104. }
  105. }
  106. static unsigned int
  107. bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
  108. {
  109. u32 new_rate, prev_rate;
  110. int ret;
  111. bool bLs = is_bL_switching_enabled();
  112. mutex_lock(&cluster_lock[new_cluster]);
  113. if (bLs) {
  114. prev_rate = per_cpu(cpu_last_req_freq, cpu);
  115. per_cpu(cpu_last_req_freq, cpu) = rate;
  116. per_cpu(physical_cluster, cpu) = new_cluster;
  117. new_rate = find_cluster_maxfreq(new_cluster);
  118. new_rate = ACTUAL_FREQ(new_cluster, new_rate);
  119. } else {
  120. new_rate = rate;
  121. }
  122. pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
  123. __func__, cpu, old_cluster, new_cluster, new_rate);
  124. ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
  125. if (!ret) {
  126. /*
  127. * FIXME: clk_set_rate hasn't returned an error here however it
  128. * may be that clk_change_rate failed due to hardware or
  129. * firmware issues and wasn't able to report that due to the
  130. * current design of the clk core layer. To work around this
  131. * problem we will read back the clock rate and check it is
  132. * correct. This needs to be removed once clk core is fixed.
  133. */
  134. if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
  135. ret = -EIO;
  136. }
  137. if (WARN_ON(ret)) {
  138. pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
  139. new_cluster);
  140. if (bLs) {
  141. per_cpu(cpu_last_req_freq, cpu) = prev_rate;
  142. per_cpu(physical_cluster, cpu) = old_cluster;
  143. }
  144. mutex_unlock(&cluster_lock[new_cluster]);
  145. return ret;
  146. }
  147. mutex_unlock(&cluster_lock[new_cluster]);
  148. /* Recalc freq for old cluster when switching clusters */
  149. if (old_cluster != new_cluster) {
  150. pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
  151. __func__, cpu, old_cluster, new_cluster);
  152. /* Switch cluster */
  153. bL_switch_request(cpu, new_cluster);
  154. mutex_lock(&cluster_lock[old_cluster]);
  155. /* Set freq of old cluster if there are cpus left on it */
  156. new_rate = find_cluster_maxfreq(old_cluster);
  157. new_rate = ACTUAL_FREQ(old_cluster, new_rate);
  158. if (new_rate) {
  159. pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
  160. __func__, old_cluster, new_rate);
  161. if (clk_set_rate(clk[old_cluster], new_rate * 1000))
  162. pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
  163. __func__, ret, old_cluster);
  164. }
  165. mutex_unlock(&cluster_lock[old_cluster]);
  166. }
  167. return 0;
  168. }
  169. /* Set clock frequency */
  170. static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
  171. unsigned int index)
  172. {
  173. u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
  174. unsigned int freqs_new;
  175. int ret;
  176. cur_cluster = cpu_to_cluster(cpu);
  177. new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
  178. freqs_new = freq_table[cur_cluster][index].frequency;
  179. if (is_bL_switching_enabled()) {
  180. if ((actual_cluster == A15_CLUSTER) &&
  181. (freqs_new < clk_big_min)) {
  182. new_cluster = A7_CLUSTER;
  183. } else if ((actual_cluster == A7_CLUSTER) &&
  184. (freqs_new > clk_little_max)) {
  185. new_cluster = A15_CLUSTER;
  186. }
  187. }
  188. ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
  189. if (!ret) {
  190. arch_set_freq_scale(policy->related_cpus, freqs_new,
  191. policy->cpuinfo.max_freq);
  192. }
  193. return ret;
  194. }
  195. static inline u32 get_table_count(struct cpufreq_frequency_table *table)
  196. {
  197. int count;
  198. for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
  199. ;
  200. return count;
  201. }
  202. /* get the minimum frequency in the cpufreq_frequency_table */
  203. static inline u32 get_table_min(struct cpufreq_frequency_table *table)
  204. {
  205. struct cpufreq_frequency_table *pos;
  206. uint32_t min_freq = ~0;
  207. cpufreq_for_each_entry(pos, table)
  208. if (pos->frequency < min_freq)
  209. min_freq = pos->frequency;
  210. return min_freq;
  211. }
  212. /* get the maximum frequency in the cpufreq_frequency_table */
  213. static inline u32 get_table_max(struct cpufreq_frequency_table *table)
  214. {
  215. struct cpufreq_frequency_table *pos;
  216. uint32_t max_freq = 0;
  217. cpufreq_for_each_entry(pos, table)
  218. if (pos->frequency > max_freq)
  219. max_freq = pos->frequency;
  220. return max_freq;
  221. }
  222. static int merge_cluster_tables(void)
  223. {
  224. int i, j, k = 0, count = 1;
  225. struct cpufreq_frequency_table *table;
  226. for (i = 0; i < MAX_CLUSTERS; i++)
  227. count += get_table_count(freq_table[i]);
  228. table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
  229. if (!table)
  230. return -ENOMEM;
  231. freq_table[MAX_CLUSTERS] = table;
  232. /* Add in reverse order to get freqs in increasing order */
  233. for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
  234. for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
  235. j++) {
  236. table[k].frequency = VIRT_FREQ(i,
  237. freq_table[i][j].frequency);
  238. pr_debug("%s: index: %d, freq: %d\n", __func__, k,
  239. table[k].frequency);
  240. k++;
  241. }
  242. }
  243. table[k].driver_data = k;
  244. table[k].frequency = CPUFREQ_TABLE_END;
  245. pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
  246. return 0;
  247. }
  248. static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
  249. const struct cpumask *cpumask)
  250. {
  251. u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
  252. if (!freq_table[cluster])
  253. return;
  254. clk_put(clk[cluster]);
  255. dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
  256. if (arm_bL_ops->free_opp_table)
  257. arm_bL_ops->free_opp_table(cpumask);
  258. dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
  259. }
  260. static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
  261. const struct cpumask *cpumask)
  262. {
  263. u32 cluster = cpu_to_cluster(cpu_dev->id);
  264. int i;
  265. if (atomic_dec_return(&cluster_usage[cluster]))
  266. return;
  267. if (cluster < MAX_CLUSTERS)
  268. return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
  269. for_each_present_cpu(i) {
  270. struct device *cdev = get_cpu_device(i);
  271. if (!cdev) {
  272. pr_err("%s: failed to get cpu%d device\n", __func__, i);
  273. return;
  274. }
  275. _put_cluster_clk_and_freq_table(cdev, cpumask);
  276. }
  277. /* free virtual table */
  278. kfree(freq_table[cluster]);
  279. }
  280. static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
  281. const struct cpumask *cpumask)
  282. {
  283. u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
  284. int ret;
  285. if (freq_table[cluster])
  286. return 0;
  287. ret = arm_bL_ops->init_opp_table(cpumask);
  288. if (ret) {
  289. dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
  290. __func__, cpu_dev->id, ret);
  291. goto out;
  292. }
  293. ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
  294. if (ret) {
  295. dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
  296. __func__, cpu_dev->id, ret);
  297. goto free_opp_table;
  298. }
  299. clk[cluster] = clk_get(cpu_dev, NULL);
  300. if (!IS_ERR(clk[cluster])) {
  301. dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
  302. __func__, clk[cluster], freq_table[cluster],
  303. cluster);
  304. return 0;
  305. }
  306. dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
  307. __func__, cpu_dev->id, cluster);
  308. ret = PTR_ERR(clk[cluster]);
  309. dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
  310. free_opp_table:
  311. if (arm_bL_ops->free_opp_table)
  312. arm_bL_ops->free_opp_table(cpumask);
  313. out:
  314. dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
  315. cluster);
  316. return ret;
  317. }
  318. static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
  319. const struct cpumask *cpumask)
  320. {
  321. u32 cluster = cpu_to_cluster(cpu_dev->id);
  322. int i, ret;
  323. if (atomic_inc_return(&cluster_usage[cluster]) != 1)
  324. return 0;
  325. if (cluster < MAX_CLUSTERS) {
  326. ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
  327. if (ret)
  328. atomic_dec(&cluster_usage[cluster]);
  329. return ret;
  330. }
  331. /*
  332. * Get data for all clusters and fill virtual cluster with a merge of
  333. * both
  334. */
  335. for_each_present_cpu(i) {
  336. struct device *cdev = get_cpu_device(i);
  337. if (!cdev) {
  338. pr_err("%s: failed to get cpu%d device\n", __func__, i);
  339. return -ENODEV;
  340. }
  341. ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
  342. if (ret)
  343. goto put_clusters;
  344. }
  345. ret = merge_cluster_tables();
  346. if (ret)
  347. goto put_clusters;
  348. /* Assuming 2 cluster, set clk_big_min and clk_little_max */
  349. clk_big_min = get_table_min(freq_table[0]);
  350. clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
  351. pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
  352. __func__, cluster, clk_big_min, clk_little_max);
  353. return 0;
  354. put_clusters:
  355. for_each_present_cpu(i) {
  356. struct device *cdev = get_cpu_device(i);
  357. if (!cdev) {
  358. pr_err("%s: failed to get cpu%d device\n", __func__, i);
  359. return -ENODEV;
  360. }
  361. _put_cluster_clk_and_freq_table(cdev, cpumask);
  362. }
  363. atomic_dec(&cluster_usage[cluster]);
  364. return ret;
  365. }
  366. /* Per-CPU initialization */
  367. static int bL_cpufreq_init(struct cpufreq_policy *policy)
  368. {
  369. u32 cur_cluster = cpu_to_cluster(policy->cpu);
  370. struct device *cpu_dev;
  371. int ret;
  372. cpu_dev = get_cpu_device(policy->cpu);
  373. if (!cpu_dev) {
  374. pr_err("%s: failed to get cpu%d device\n", __func__,
  375. policy->cpu);
  376. return -ENODEV;
  377. }
  378. if (cur_cluster < MAX_CLUSTERS) {
  379. int cpu;
  380. cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
  381. for_each_cpu(cpu, policy->cpus)
  382. per_cpu(physical_cluster, cpu) = cur_cluster;
  383. } else {
  384. /* Assumption: during init, we are always running on A15 */
  385. per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
  386. }
  387. ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
  388. if (ret)
  389. return ret;
  390. ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
  391. if (ret) {
  392. dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
  393. policy->cpu, cur_cluster);
  394. put_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
  395. return ret;
  396. }
  397. policy->cpuinfo.transition_latency =
  398. arm_bL_ops->get_transition_latency(cpu_dev);
  399. if (is_bL_switching_enabled())
  400. per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
  401. dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
  402. return 0;
  403. }
  404. static int bL_cpufreq_exit(struct cpufreq_policy *policy)
  405. {
  406. struct device *cpu_dev;
  407. int cur_cluster = cpu_to_cluster(policy->cpu);
  408. if (cur_cluster < MAX_CLUSTERS) {
  409. cpufreq_cooling_unregister(cdev[cur_cluster]);
  410. cdev[cur_cluster] = NULL;
  411. }
  412. cpu_dev = get_cpu_device(policy->cpu);
  413. if (!cpu_dev) {
  414. pr_err("%s: failed to get cpu%d device\n", __func__,
  415. policy->cpu);
  416. return -ENODEV;
  417. }
  418. put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
  419. dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
  420. return 0;
  421. }
  422. static void bL_cpufreq_ready(struct cpufreq_policy *policy)
  423. {
  424. struct device *cpu_dev = get_cpu_device(policy->cpu);
  425. int cur_cluster = cpu_to_cluster(policy->cpu);
  426. struct device_node *np;
  427. /* Do not register a cpu_cooling device if we are in IKS mode */
  428. if (cur_cluster >= MAX_CLUSTERS)
  429. return;
  430. np = of_node_get(cpu_dev->of_node);
  431. if (WARN_ON(!np))
  432. return;
  433. if (of_find_property(np, "#cooling-cells", NULL)) {
  434. u32 power_coefficient = 0;
  435. of_property_read_u32(np, "dynamic-power-coefficient",
  436. &power_coefficient);
  437. cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
  438. policy, power_coefficient, NULL);
  439. if (IS_ERR(cdev[cur_cluster])) {
  440. dev_err(cpu_dev,
  441. "running cpufreq without cooling device: %ld\n",
  442. PTR_ERR(cdev[cur_cluster]));
  443. cdev[cur_cluster] = NULL;
  444. }
  445. }
  446. of_node_put(np);
  447. }
  448. static struct cpufreq_driver bL_cpufreq_driver = {
  449. .name = "arm-big-little",
  450. .flags = CPUFREQ_STICKY |
  451. CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
  452. CPUFREQ_NEED_INITIAL_FREQ_CHECK,
  453. .verify = cpufreq_generic_frequency_table_verify,
  454. .target_index = bL_cpufreq_set_target,
  455. .get = bL_cpufreq_get_rate,
  456. .init = bL_cpufreq_init,
  457. .exit = bL_cpufreq_exit,
  458. .ready = bL_cpufreq_ready,
  459. .attr = cpufreq_generic_attr,
  460. };
  461. #ifdef CONFIG_BL_SWITCHER
  462. static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
  463. unsigned long action, void *_arg)
  464. {
  465. pr_debug("%s: action: %ld\n", __func__, action);
  466. switch (action) {
  467. case BL_NOTIFY_PRE_ENABLE:
  468. case BL_NOTIFY_PRE_DISABLE:
  469. cpufreq_unregister_driver(&bL_cpufreq_driver);
  470. break;
  471. case BL_NOTIFY_POST_ENABLE:
  472. set_switching_enabled(true);
  473. cpufreq_register_driver(&bL_cpufreq_driver);
  474. break;
  475. case BL_NOTIFY_POST_DISABLE:
  476. set_switching_enabled(false);
  477. cpufreq_register_driver(&bL_cpufreq_driver);
  478. break;
  479. default:
  480. return NOTIFY_DONE;
  481. }
  482. return NOTIFY_OK;
  483. }
  484. static struct notifier_block bL_switcher_notifier = {
  485. .notifier_call = bL_cpufreq_switcher_notifier,
  486. };
  487. static int __bLs_register_notifier(void)
  488. {
  489. return bL_switcher_register_notifier(&bL_switcher_notifier);
  490. }
  491. static int __bLs_unregister_notifier(void)
  492. {
  493. return bL_switcher_unregister_notifier(&bL_switcher_notifier);
  494. }
  495. #else
  496. static int __bLs_register_notifier(void) { return 0; }
  497. static int __bLs_unregister_notifier(void) { return 0; }
  498. #endif
  499. int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
  500. {
  501. int ret, i;
  502. if (arm_bL_ops) {
  503. pr_debug("%s: Already registered: %s, exiting\n", __func__,
  504. arm_bL_ops->name);
  505. return -EBUSY;
  506. }
  507. if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
  508. !ops->get_transition_latency) {
  509. pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
  510. return -ENODEV;
  511. }
  512. arm_bL_ops = ops;
  513. set_switching_enabled(bL_switcher_get_enabled());
  514. for (i = 0; i < MAX_CLUSTERS; i++)
  515. mutex_init(&cluster_lock[i]);
  516. ret = cpufreq_register_driver(&bL_cpufreq_driver);
  517. if (ret) {
  518. pr_info("%s: Failed registering platform driver: %s, err: %d\n",
  519. __func__, ops->name, ret);
  520. arm_bL_ops = NULL;
  521. } else {
  522. ret = __bLs_register_notifier();
  523. if (ret) {
  524. cpufreq_unregister_driver(&bL_cpufreq_driver);
  525. arm_bL_ops = NULL;
  526. } else {
  527. pr_info("%s: Registered platform driver: %s\n",
  528. __func__, ops->name);
  529. }
  530. }
  531. bL_switcher_put_enabled();
  532. return ret;
  533. }
  534. EXPORT_SYMBOL_GPL(bL_cpufreq_register);
  535. void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
  536. {
  537. if (arm_bL_ops != ops) {
  538. pr_err("%s: Registered with: %s, can't unregister, exiting\n",
  539. __func__, arm_bL_ops->name);
  540. return;
  541. }
  542. bL_switcher_get_enabled();
  543. __bLs_unregister_notifier();
  544. cpufreq_unregister_driver(&bL_cpufreq_driver);
  545. bL_switcher_put_enabled();
  546. pr_info("%s: Un-registered platform driver: %s\n", __func__,
  547. arm_bL_ops->name);
  548. arm_bL_ops = NULL;
  549. }
  550. EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
  551. MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
  552. MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
  553. MODULE_LICENSE("GPL v2");