tune_plus.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * Copyright (C) 2018 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  11. * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
  12. */
  13. #define MET_STUNE_DEBUG 1
  14. #if MET_STUNE_DEBUG
  15. #include <mt-plat/met_drv.h>
  16. #endif
  17. int stune_task_threshold;
  18. static int default_stune_threshold;
  19. /* A lock for set stune_task_threshold */
  20. static raw_spinlock_t stune_lock;
  21. int set_stune_task_threshold(int threshold)
  22. {
  23. if (threshold > 1024 || threshold < -1)
  24. return -EINVAL;
  25. raw_spin_lock(&stune_lock);
  26. if (threshold < 0)
  27. stune_task_threshold = default_stune_threshold;
  28. else
  29. stune_task_threshold = threshold;
  30. raw_spin_unlock(&stune_lock);
  31. #if MET_STUNE_DEBUG
  32. met_tag_oneshot(0, "sched_stune_threshold", stune_task_threshold);
  33. #endif
  34. return 0;
  35. }
  36. int sched_stune_task_threshold_handler(struct ctl_table *table,
  37. int write, void __user *buffer,
  38. size_t *lenp, loff_t *ppos)
  39. {
  40. int ret;
  41. int old_threshold;
  42. old_threshold = stune_task_threshold;
  43. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  44. if (!ret && write) {
  45. ret = set_stune_task_threshold(stune_task_threshold);
  46. if (ret)
  47. stune_task_threshold = old_threshold;
  48. }
  49. return ret;
  50. }
  51. void calculate_default_stune_threshold(void)
  52. {
  53. const struct sched_group_energy *sge_core;
  54. struct hmp_domain *domain;
  55. int cluster_first_cpu = 0;
  56. raw_spin_lock_init(&stune_lock);
  57. rcu_read_lock();
  58. for_each_hmp_domain_L_first(domain) {
  59. cluster_first_cpu = cpumask_first(&domain->possible_cpus);
  60. /* lowest capacity CPU in system */
  61. sge_core = cpu_core_energy(cluster_first_cpu);
  62. if (!sge_core) {
  63. pr_info("schedtune: no energy model data\n");
  64. } else {
  65. default_stune_threshold = sge_core->cap_states[0].cap;
  66. if (default_stune_threshold) {
  67. set_stune_task_threshold(-1);
  68. break;
  69. }
  70. }
  71. }
  72. rcu_read_unlock();
  73. }
  74. #if defined(CONFIG_UCLAMP_TASK_GROUP) && defined(CONFIG_SCHED_TUNE)
  75. int uclamp_min_for_perf_idx(int idx, int min_value)
  76. {
  77. struct schedtune *st;
  78. struct cgroup_subsys_state *css;
  79. int ret = 0;
  80. if (min_value > SCHED_CAPACITY_SCALE)
  81. return -ERANGE;
  82. if (!is_group_idx_valid(idx))
  83. return -ERANGE;
  84. st = allocated_group[idx];
  85. if (!st)
  86. return -EINVAL;
  87. min_value = find_fit_capacity(min_value);
  88. css = &st->css;
  89. mutex_lock(&uclamp_mutex);
  90. rcu_read_lock();
  91. if (st->uclamp[UCLAMP_MIN].value == min_value)
  92. goto out;
  93. if (st->uclamp[UCLAMP_MAX].value < min_value) {
  94. ret = -EINVAL;
  95. goto out;
  96. }
  97. /* Update ST's reference count */
  98. uclamp_group_get(NULL, css, &st->uclamp[UCLAMP_MIN],
  99. UCLAMP_MIN, min_value);
  100. /* Update effective clamps to track the most restrictive value */
  101. cpu_util_update(css, UCLAMP_MIN, st->uclamp[UCLAMP_MIN].group_id,
  102. min_value);
  103. out:
  104. rcu_read_unlock();
  105. mutex_unlock(&uclamp_mutex);
  106. return ret;
  107. }
  108. int uclamp_min_pct_for_perf_idx(int idx, int pct)
  109. {
  110. unsigned int min_value;
  111. if (pct < 0 || pct > 100)
  112. return -ERANGE;
  113. min_value = scale_from_percent(pct);
  114. return uclamp_min_for_perf_idx(idx, min_value);
  115. }
  116. #endif
  117. int boost_write_for_perf_idx(int idx, int boost_value)
  118. {
  119. struct schedtune *ct;
  120. if (boost_value < 0 || boost_value > 100)
  121. printk_deferred("warn: boost value should be 0~100\n");
  122. if (boost_value >= 100)
  123. boost_value = 100;
  124. else if (boost_value <= 0)
  125. boost_value = 0;
  126. if (!is_group_idx_valid(idx))
  127. return -ERANGE;
  128. ct = allocated_group[idx];
  129. if (ct) {
  130. rcu_read_lock();
  131. ct->boost = boost_value;
  132. /* Update CPU boost */
  133. schedtune_boostgroup_update(ct->idx, ct->boost);
  134. rcu_read_unlock();
  135. #if MET_STUNE_DEBUG
  136. /* foreground */
  137. if (ct->idx == 1)
  138. met_tag_oneshot(0, "sched_boost_fg", ct->boost);
  139. /* top-app */
  140. if (ct->idx == 3)
  141. met_tag_oneshot(0, "sched_boost_ta", ct->boost);
  142. #endif
  143. } else {
  144. printk_deferred("error: stune group idx=%d is nonexistent\n",
  145. idx);
  146. return -EINVAL;
  147. }
  148. return 0;
  149. }
  150. int prefer_idle_for_perf_idx(int idx, int prefer_idle)
  151. {
  152. struct schedtune *ct = NULL;
  153. if (!is_group_idx_valid(idx))
  154. return -ERANGE;
  155. ct = allocated_group[idx];
  156. if (!ct)
  157. return -EINVAL;
  158. rcu_read_lock();
  159. ct->prefer_idle = prefer_idle;
  160. rcu_read_unlock();
  161. #if MET_STUNE_DEBUG
  162. /* foreground */
  163. if (ct->idx == 1)
  164. met_tag_oneshot(0, "sched_prefer_idle_fg",
  165. ct->prefer_idle);
  166. /* top-app */
  167. if (ct->idx == 3)
  168. met_tag_oneshot(0, "sched_prefer_idle_ta",
  169. ct->prefer_idle);
  170. #endif
  171. return 0;
  172. }
  173. int group_boost_read(int group_idx)
  174. {
  175. struct schedtune *ct;
  176. int boost = 0;
  177. if (!is_group_idx_valid(group_idx))
  178. return -ERANGE;
  179. ct = allocated_group[group_idx];
  180. if (ct) {
  181. rcu_read_lock();
  182. boost = ct->boost;
  183. rcu_read_unlock();
  184. }
  185. return boost;
  186. }
  187. EXPORT_SYMBOL(group_boost_read);
  188. int group_prefer_idle_read(int group_idx)
  189. {
  190. struct schedtune *ct;
  191. int prefer_idle = 0;
  192. if (!is_group_idx_valid(group_idx))
  193. return -ERANGE;
  194. ct = allocated_group[group_idx];
  195. if (ct) {
  196. rcu_read_lock();
  197. prefer_idle = ct->prefer_idle;
  198. rcu_read_unlock();
  199. }
  200. return prefer_idle;
  201. }
  202. EXPORT_SYMBOL(group_prefer_idle_read);
  203. #ifdef CONFIG_MTK_SCHED_RQAVG_KS
  204. /* mtk: a linear boost value for tuning */
  205. int linear_real_boost(int linear_boost)
  206. {
  207. int target_cpu, usage;
  208. int boost;
  209. int ta_org_cap;
  210. sched_max_util_task(&target_cpu, NULL, &usage, NULL);
  211. ta_org_cap = capacity_orig_of(target_cpu);
  212. if (usage >= SCHED_CAPACITY_SCALE)
  213. usage = SCHED_CAPACITY_SCALE;
  214. /*
  215. * Conversion Formula of Linear Boost:
  216. *
  217. * margin = (usage * linear_boost)/100;
  218. * margin = (original_cap - usage) * boost/100;
  219. * so
  220. * boost = (usage * linear_boost) / (original_cap - usage)
  221. */
  222. if (ta_org_cap <= usage) {
  223. /* If target cpu is saturated, consider bigger one */
  224. boost = (SCHED_CAPACITY_SCALE - usage) ?
  225. (usage * linear_boost)/(SCHED_CAPACITY_SCALE - usage) : 0;
  226. } else
  227. boost = (usage * linear_boost)/(ta_org_cap - usage);
  228. return boost;
  229. }
  230. EXPORT_SYMBOL(linear_real_boost);
  231. #endif