tune.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. #include <linux/cgroup.h>
  2. #include <linux/err.h>
  3. #include <linux/kernel.h>
  4. #include <linux/percpu.h>
  5. #include <linux/printk.h>
  6. #include <linux/rcupdate.h>
  7. #include <linux/slab.h>
  8. #include <trace/events/sched.h>
  9. #include "sched.h"
  10. #include "tune.h"
  11. bool schedtune_initialized = false;
  12. extern struct reciprocal_value schedtune_spc_rdiv;
  13. /* We hold schedtune boost in effect for at least this long */
  14. #define SCHEDTUNE_BOOST_HOLD_NS 50000000ULL
  15. /*
  16. * EAS scheduler tunables for task groups.
  17. */
  18. /* SchdTune tunables for a group of tasks */
  19. struct schedtune {
  20. /* SchedTune CGroup subsystem */
  21. struct cgroup_subsys_state css;
  22. /* Boost group allocated ID */
  23. int idx;
  24. /* Boost value for tasks on that SchedTune CGroup */
  25. int boost;
  26. /* Hint to bias scheduling of tasks on that SchedTune CGroup
  27. * towards idle CPUs */
  28. int prefer_idle;
  29. #ifdef CONFIG_UCLAMP_TASK_GROUP
  30. /* Task utilization clamping */
  31. struct uclamp_se uclamp[UCLAMP_CNT];
  32. #endif
  33. };
  34. static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
  35. {
  36. return css ? container_of(css, struct schedtune, css) : NULL;
  37. }
  38. static inline struct schedtune *task_schedtune(struct task_struct *tsk)
  39. {
  40. return css_st(task_css(tsk, schedtune_cgrp_id));
  41. }
  42. static inline struct schedtune *parent_st(struct schedtune *st)
  43. {
  44. return css_st(st->css.parent);
  45. }
  46. #ifdef CONFIG_UCLAMP_TASK_GROUP
  47. struct uclamp_se *task_schedtune_uclamp(struct task_struct *tsk, int clamp_id)
  48. {
  49. struct schedtune *st;
  50. rcu_read_lock();
  51. st = task_schedtune(tsk);
  52. rcu_read_unlock();
  53. return &st->uclamp[clamp_id];
  54. }
  55. #endif
  56. /*
  57. * SchedTune root control group
  58. * The root control group is used to defined a system-wide boosting tuning,
  59. * which is applied to all tasks in the system.
  60. * Task specific boost tuning could be specified by creating and
  61. * configuring a child control group under the root one.
  62. * By default, system-wide boosting is disabled, i.e. no boosting is applied
  63. * to tasks which are not into a child control group.
  64. */
  65. static struct schedtune
  66. root_schedtune = {
  67. .boost = 0,
  68. .prefer_idle = 0,
  69. };
  70. /*
  71. * Maximum number of boost groups to support
  72. * When per-task boosting is used we still allow only limited number of
  73. * boost groups for two main reasons:
  74. * 1. on a real system we usually have only few classes of workloads which
  75. * make sense to boost with different values (e.g. background vs foreground
  76. * tasks, interactive vs low-priority tasks)
  77. * 2. a limited number allows for a simpler and more memory/time efficient
  78. * implementation especially for the computation of the per-CPU boost
  79. * value
  80. */
  81. #define BOOSTGROUPS_COUNT 10
  82. /* Array of configured boostgroups */
  83. static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
  84. &root_schedtune,
  85. NULL,
  86. };
  87. static inline bool is_group_idx_valid(int idx)
  88. {
  89. return idx >= 0 && idx < BOOSTGROUPS_COUNT;
  90. }
  91. /* SchedTune boost groups
  92. * Keep track of all the boost groups which impact on CPU, for example when a
  93. * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
  94. * likely with different boost values.
  95. * Since on each system we expect only a limited number of boost groups, here
  96. * we use a simple array to keep track of the metrics required to compute the
  97. * maximum per-CPU boosting value.
  98. */
  99. struct boost_groups {
  100. /* Maximum boost value for all RUNNABLE tasks on a CPU */
  101. bool idle;
  102. int boost_max;
  103. u64 boost_ts;
  104. struct {
  105. /* The boost for tasks on that boost group */
  106. int boost;
  107. /* Count of RUNNABLE tasks on that boost group */
  108. unsigned tasks;
  109. /* Timestamp of boost activation */
  110. u64 ts;
  111. } group[BOOSTGROUPS_COUNT];
  112. /* CPU's boost group locking */
  113. raw_spinlock_t lock;
  114. };
  115. /* Boost groups affecting each CPU in the system */
  116. DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
  117. static inline bool schedtune_boost_timeout(u64 now, u64 ts)
  118. {
  119. return ((now - ts) > SCHEDTUNE_BOOST_HOLD_NS);
  120. }
  121. static inline bool
  122. schedtune_boost_group_active(int idx, struct boost_groups* bg, u64 now)
  123. {
  124. if (bg->group[idx].tasks)
  125. return true;
  126. return !schedtune_boost_timeout(now, bg->group[idx].ts);
  127. }
  128. static void
  129. schedtune_cpu_update(int cpu, u64 now)
  130. {
  131. struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
  132. int boost_max;
  133. u64 boost_ts;
  134. int idx;
  135. /* The root boost group is always active */
  136. boost_max = bg->group[0].boost;
  137. boost_ts = now;
  138. for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
  139. /*
  140. * A boost group affects a CPU only if it has
  141. * RUNNABLE tasks on that CPU or it has hold
  142. * in effect from a previous task.
  143. */
  144. if (!schedtune_boost_group_active(idx, bg, now))
  145. continue;
  146. /* This boost group is active */
  147. if (boost_max > bg->group[idx].boost)
  148. continue;
  149. boost_max = bg->group[idx].boost;
  150. boost_ts = bg->group[idx].ts;
  151. }
  152. /* Ensures boost_max is non-negative when all cgroup boost values
  153. * are neagtive. Avoids under-accounting of cpu capacity which may cause
  154. * task stacking and frequency spikes.*/
  155. boost_max = max(boost_max, 0);
  156. bg->boost_max = boost_max;
  157. bg->boost_ts = boost_ts;
  158. }
  159. static int
  160. schedtune_boostgroup_update(int idx, int boost)
  161. {
  162. struct boost_groups *bg;
  163. int cur_boost_max;
  164. int old_boost;
  165. int cpu;
  166. u64 now;
  167. /* Update per CPU boost groups */
  168. for_each_possible_cpu(cpu) {
  169. bg = &per_cpu(cpu_boost_groups, cpu);
  170. /*
  171. * Keep track of current boost values to compute the per CPU
  172. * maximum only when it has been affected by the new value of
  173. * the updated boost group
  174. */
  175. cur_boost_max = bg->boost_max;
  176. old_boost = bg->group[idx].boost;
  177. /* Update the boost value of this boost group */
  178. bg->group[idx].boost = boost;
  179. /* Check if this update increase current max */
  180. now = sched_clock_cpu(cpu);
  181. if (boost > cur_boost_max &&
  182. schedtune_boost_group_active(idx, bg, now)) {
  183. bg->boost_max = boost;
  184. bg->boost_ts = bg->group[idx].ts;
  185. trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
  186. continue;
  187. }
  188. /* Check if this update has decreased current max */
  189. if (cur_boost_max == old_boost && old_boost > boost) {
  190. schedtune_cpu_update(cpu, now);
  191. trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
  192. continue;
  193. }
  194. trace_sched_tune_boostgroup_update(cpu, 0, bg->boost_max);
  195. }
  196. return 0;
  197. }
  198. #ifdef CONFIG_UCLAMP_TASK_GROUP
  199. /**
  200. * cpu_util_update: update effective clamp
  201. * @css: the task group to update
  202. * @clamp_id: the clamp index to update
  203. * @group_id: the group index mapping the new task clamp value
  204. * @value: the new task group clamp value
  205. *
  206. * The effective clamp for a TG is expected to track the most restrictive
  207. * value between the ST's clamp value and it's parent effective clamp value.
  208. * This method achieve that:
  209. * 1. updating the current TG effective value
  210. * 2. walking all the descendant task group that needs an update
  211. *
  212. * A ST's effective clamp needs to be updated when its current value is not
  213. * matching the ST's clamp value. In this case indeed either:
  214. * a) the parent has got a more relaxed clamp value
  215. * thus potentially we can relax the effective value for this group
  216. * b) the parent has got a more strict clamp value
  217. * thus potentially we have to restrict the effective value of this group
  218. *
  219. * Restriction and relaxation of current ST's effective clamp values needs to
  220. * be propagated down to all the descendants. When a subgroup is found which
  221. * has already its effective clamp value matching its clamp value, then we can
  222. * safely skip all its descendants which are granted to be already in sync.
  223. *
  224. * The ST's group_id is also updated to ensure it tracks the effective clamp
  225. * value.
  226. */
  227. static void cpu_util_update(struct cgroup_subsys_state *css,
  228. unsigned int clamp_id, unsigned int group_id,
  229. unsigned int value)
  230. {
  231. struct uclamp_se *uc_se;
  232. uc_se = &css_st(css)->uclamp[clamp_id];
  233. uc_se->effective.value = value;
  234. uc_se->effective.group_id = group_id;
  235. }
  236. /*
  237. * free_uclamp_sched_group: release utilization clamp references of a TG
  238. * @st: the schetune being removed
  239. *
  240. * An empty task group can be removed only when it has no more tasks or child
  241. * groups. This means that we can also safely release all the reference
  242. * counting to clamp groups.
  243. */
  244. static inline void free_uclamp_sched_group(struct schedtune *st)
  245. {
  246. int clamp_id;
  247. for (clamp_id = 0; clamp_id < UCLAMP_CNT; ++clamp_id)
  248. uclamp_group_put(clamp_id, st->uclamp[clamp_id].group_id);
  249. }
  250. /**
  251. * alloc_uclamp_sched_group: initialize a new ST's for utilization clamping
  252. * @st: the newly created schedtune
  253. *
  254. * A newly created schedtune inherits its utilization clamp values, for all
  255. * clamp indexes, from its parent task group.
  256. * This ensures that its values are properly initialized and that the task
  257. * group is accounted in the same parent's group index.
  258. *
  259. * Return: 0 on error
  260. */
  261. static inline int alloc_uclamp_sched_group(struct schedtune *st)
  262. {
  263. struct uclamp_se *uc_se;
  264. int clamp_id;
  265. for (clamp_id = 0; clamp_id < UCLAMP_CNT; ++clamp_id) {
  266. uc_se = &st->uclamp[clamp_id];
  267. uclamp_group_get(NULL, NULL, &st->uclamp[clamp_id],
  268. clamp_id, uclamp_none(clamp_id));
  269. uc_se->effective.value = uc_se->value;
  270. uc_se->effective.group_id = uc_se->group_id;
  271. }
  272. return 1;
  273. }
  274. static int cpu_util_min_write_u64(struct cgroup_subsys_state *css,
  275. struct cftype *cftype, u64 min_value)
  276. {
  277. struct schedtune *st;
  278. int ret = 0;
  279. if (min_value > SCHED_CAPACITY_SCALE)
  280. return -ERANGE;
  281. if (!opp_capacity_tbl_ready)
  282. init_opp_capacity_tbl();
  283. min_value = find_fit_capacity(min_value);
  284. mutex_lock(&uclamp_mutex);
  285. rcu_read_lock();
  286. st = css_st(css);
  287. if (st->uclamp[UCLAMP_MIN].value == min_value)
  288. goto out;
  289. if (st->uclamp[UCLAMP_MAX].value < min_value) {
  290. ret = -EINVAL;
  291. goto out;
  292. }
  293. /* Update ST's reference count */
  294. uclamp_group_get(NULL, css, &st->uclamp[UCLAMP_MIN],
  295. UCLAMP_MIN, min_value);
  296. /* Update effective clamps to track the most restrictive value */
  297. cpu_util_update(css, UCLAMP_MIN, st->uclamp[UCLAMP_MIN].group_id,
  298. min_value);
  299. out:
  300. rcu_read_unlock();
  301. mutex_unlock(&uclamp_mutex);
  302. return ret;
  303. }
  304. static int cpu_util_min_pct_write_u64(struct cgroup_subsys_state *css,
  305. struct cftype *cftype, u64 pct)
  306. {
  307. u64 min_value;
  308. if (pct < 0 || pct > 100)
  309. return -ERANGE;
  310. min_value = scale_from_percent(pct);
  311. return cpu_util_min_write_u64(css, cftype, min_value);
  312. }
  313. static int cpu_util_max_write_u64(struct cgroup_subsys_state *css,
  314. struct cftype *cftype, u64 max_value)
  315. {
  316. struct schedtune *st;
  317. int ret = 0;
  318. if (max_value > SCHED_CAPACITY_SCALE)
  319. return -ERANGE;
  320. if (!opp_capacity_tbl_ready)
  321. init_opp_capacity_tbl();
  322. max_value = find_fit_capacity(max_value);
  323. mutex_lock(&uclamp_mutex);
  324. rcu_read_lock();
  325. st = css_st(css);
  326. if (st->uclamp[UCLAMP_MAX].value == max_value)
  327. goto out;
  328. if (st->uclamp[UCLAMP_MIN].value > max_value) {
  329. ret = -EINVAL;
  330. goto out;
  331. }
  332. /* Update ST's reference count */
  333. uclamp_group_get(NULL, css, &st->uclamp[UCLAMP_MAX],
  334. UCLAMP_MAX, max_value);
  335. /* Update effective clamps to track the most restrictive value */
  336. cpu_util_update(css, UCLAMP_MAX, st->uclamp[UCLAMP_MAX].group_id,
  337. max_value);
  338. out:
  339. rcu_read_unlock();
  340. mutex_unlock(&uclamp_mutex);
  341. return ret;
  342. }
  343. static int cpu_util_max_pct_write_u64(struct cgroup_subsys_state *css,
  344. struct cftype *cftype, u64 pct)
  345. {
  346. u64 max_value;
  347. if (pct < 0 || pct > 100)
  348. return -ERANGE;
  349. max_value = scale_from_percent(pct);
  350. return cpu_util_max_write_u64(css, cftype, max_value);
  351. }
  352. static inline u64 cpu_uclamp_read(struct cgroup_subsys_state *css,
  353. enum uclamp_id clamp_id,
  354. bool effective)
  355. {
  356. struct schedtune *st;
  357. u64 util_clamp;
  358. rcu_read_lock();
  359. st = css_st(css);
  360. util_clamp = effective
  361. ? st->uclamp[clamp_id].effective.value
  362. : st->uclamp[clamp_id].value;
  363. rcu_read_unlock();
  364. return util_clamp;
  365. }
  366. static u64 cpu_util_min_read_u64(struct cgroup_subsys_state *css,
  367. struct cftype *cft)
  368. {
  369. return cpu_uclamp_read(css, UCLAMP_MIN, false);
  370. }
  371. static u64 cpu_util_max_read_u64(struct cgroup_subsys_state *css,
  372. struct cftype *cft)
  373. {
  374. return cpu_uclamp_read(css, UCLAMP_MAX, false);
  375. }
  376. static u64 cpu_util_min_effective_read_u64(struct cgroup_subsys_state *css,
  377. struct cftype *cft)
  378. {
  379. return cpu_uclamp_read(css, UCLAMP_MIN, true);
  380. }
  381. static u64 cpu_util_max_effective_read_u64(struct cgroup_subsys_state *css,
  382. struct cftype *cft)
  383. {
  384. return cpu_uclamp_read(css, UCLAMP_MAX, true);
  385. }
  386. #else
  387. static inline void free_uclamp_sched_group(struct schedtune *st) {}
  388. static inline int alloc_uclamp_sched_group(struct schedtune *st)
  389. {
  390. return 1;
  391. }
  392. #endif /* CONFIG_UCLAMP_TASK_GROUP */
  393. #include "tune_plus.c"
  394. #define ENQUEUE_TASK 1
  395. #define DEQUEUE_TASK -1
  396. static inline bool
  397. schedtune_update_timestamp(struct task_struct *p)
  398. {
  399. if (sched_feat(SCHEDTUNE_BOOST_HOLD_ALL))
  400. return true;
  401. return task_has_rt_policy(p);
  402. }
  403. static inline void
  404. schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
  405. {
  406. struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
  407. int tasks = bg->group[idx].tasks + task_count;
  408. /* Update boosted tasks count while avoiding to make it negative */
  409. bg->group[idx].tasks = max(0, tasks);
  410. /* Update timeout on enqueue */
  411. if (task_count > 0) {
  412. u64 now = sched_clock_cpu(cpu);
  413. if (schedtune_update_timestamp(p))
  414. bg->group[idx].ts = now;
  415. /* Boost group activation or deactivation on that RQ */
  416. if (bg->group[idx].tasks == 1)
  417. schedtune_cpu_update(cpu, now);
  418. }
  419. trace_sched_tune_tasks_update(p, cpu, tasks, idx,
  420. bg->group[idx].boost, bg->boost_max,
  421. bg->group[idx].ts);
  422. }
  423. /*
  424. * NOTE: This function must be called while holding the lock on the CPU RQ
  425. */
  426. void schedtune_enqueue_task(struct task_struct *p, int cpu)
  427. {
  428. struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
  429. unsigned long irq_flags;
  430. struct schedtune *st;
  431. int idx;
  432. if (unlikely(!schedtune_initialized))
  433. return;
  434. /*
  435. * Boost group accouting is protected by a per-cpu lock and requires
  436. * interrupt to be disabled to avoid race conditions for example on
  437. * do_exit()::cgroup_exit() and task migration.
  438. */
  439. raw_spin_lock_irqsave(&bg->lock, irq_flags);
  440. rcu_read_lock();
  441. st = task_schedtune(p);
  442. idx = st->idx;
  443. schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
  444. rcu_read_unlock();
  445. raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
  446. }
  447. int schedtune_can_attach(struct cgroup_taskset *tset)
  448. {
  449. struct task_struct *task;
  450. struct cgroup_subsys_state *css;
  451. struct boost_groups *bg;
  452. struct rq_flags rq_flags;
  453. unsigned int cpu;
  454. struct rq *rq;
  455. int src_bg; /* Source boost group index */
  456. int dst_bg; /* Destination boost group index */
  457. int tasks;
  458. u64 now;
  459. if (unlikely(!schedtune_initialized))
  460. return 0;
  461. cgroup_taskset_for_each(task, css, tset) {
  462. /*
  463. * Lock the CPU's RQ the task is enqueued to avoid race
  464. * conditions with migration code while the task is being
  465. * accounted
  466. */
  467. rq = task_rq_lock(task, &rq_flags);
  468. if (!task->on_rq) {
  469. task_rq_unlock(rq, task, &rq_flags);
  470. continue;
  471. }
  472. /*
  473. * Boost group accouting is protected by a per-cpu lock and requires
  474. * interrupt to be disabled to avoid race conditions on...
  475. */
  476. cpu = cpu_of(rq);
  477. bg = &per_cpu(cpu_boost_groups, cpu);
  478. raw_spin_lock(&bg->lock);
  479. dst_bg = css_st(css)->idx;
  480. src_bg = task_schedtune(task)->idx;
  481. /*
  482. * Current task is not changing boostgroup, which can
  483. * happen when the new hierarchy is in use.
  484. */
  485. if (unlikely(dst_bg == src_bg)) {
  486. raw_spin_unlock(&bg->lock);
  487. task_rq_unlock(rq, task, &rq_flags);
  488. continue;
  489. }
  490. /*
  491. * This is the case of a RUNNABLE task which is switching its
  492. * current boost group.
  493. */
  494. /* Move task from src to dst boost group */
  495. tasks = bg->group[src_bg].tasks - 1;
  496. bg->group[src_bg].tasks = max(0, tasks);
  497. bg->group[dst_bg].tasks += 1;
  498. /* Update boost hold start for this group */
  499. now = sched_clock_cpu(cpu);
  500. bg->group[dst_bg].ts = now;
  501. /* Force boost group re-evaluation at next boost check */
  502. bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS;
  503. raw_spin_unlock(&bg->lock);
  504. task_rq_unlock(rq, task, &rq_flags);
  505. }
  506. return 0;
  507. }
  508. void schedtune_cancel_attach(struct cgroup_taskset *tset)
  509. {
  510. /* This can happen only if SchedTune controller is mounted with
  511. * other hierarchies ane one of them fails. Since usually SchedTune is
  512. * mouted on its own hierarcy, for the time being we do not implement
  513. * a proper rollback mechanism */
  514. WARN(1, "SchedTune cancel attach not implemented");
  515. }
  516. /*
  517. * NOTE: This function must be called while holding the lock on the CPU RQ
  518. */
  519. void schedtune_dequeue_task(struct task_struct *p, int cpu)
  520. {
  521. struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
  522. unsigned long irq_flags;
  523. struct schedtune *st;
  524. int idx;
  525. if (unlikely(!schedtune_initialized))
  526. return;
  527. /*
  528. * Boost group accouting is protected by a per-cpu lock and requires
  529. * interrupt to be disabled to avoid race conditions on...
  530. */
  531. raw_spin_lock_irqsave(&bg->lock, irq_flags);
  532. rcu_read_lock();
  533. st = task_schedtune(p);
  534. idx = st->idx;
  535. schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
  536. rcu_read_unlock();
  537. raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
  538. }
  539. int schedtune_cpu_boost(int cpu)
  540. {
  541. struct boost_groups *bg;
  542. u64 now;
  543. bg = &per_cpu(cpu_boost_groups, cpu);
  544. now = sched_clock_cpu(cpu);
  545. /* Check to see if we have a hold in effect */
  546. if (schedtune_boost_timeout(now, bg->boost_ts))
  547. schedtune_cpu_update(cpu, now);
  548. return bg->boost_max;
  549. }
  550. int schedtune_task_boost(struct task_struct *p)
  551. {
  552. struct schedtune *st;
  553. int task_boost;
  554. if (unlikely(!schedtune_initialized))
  555. return 0;
  556. /* Get task boost value */
  557. rcu_read_lock();
  558. st = task_schedtune(p);
  559. task_boost = st->boost;
  560. rcu_read_unlock();
  561. return task_boost;
  562. }
  563. int schedtune_prefer_idle(struct task_struct *p)
  564. {
  565. struct schedtune *st;
  566. int prefer_idle;
  567. if (unlikely(!schedtune_initialized))
  568. return 0;
  569. /* Get prefer_idle value */
  570. rcu_read_lock();
  571. st = task_schedtune(p);
  572. prefer_idle = st->prefer_idle;
  573. rcu_read_unlock();
  574. return prefer_idle;
  575. }
  576. static u64
  577. prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
  578. {
  579. struct schedtune *st = css_st(css);
  580. return st->prefer_idle;
  581. }
  582. static int
  583. prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
  584. u64 prefer_idle)
  585. {
  586. struct schedtune *st = css_st(css);
  587. st->prefer_idle = !!prefer_idle;
  588. #if MET_STUNE_DEBUG
  589. /* user: foreground */
  590. if (st->idx == 1)
  591. met_tag_oneshot(0, "sched_user_prefer_idle_fg",
  592. st->prefer_idle);
  593. /* user: top-app */
  594. if (st->idx == 3)
  595. met_tag_oneshot(0, "sched_user_prefer_idle_top",
  596. st->prefer_idle);
  597. #endif
  598. return 0;
  599. }
  600. static s64
  601. boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
  602. {
  603. struct schedtune *st = css_st(css);
  604. return st->boost;
  605. }
  606. static int
  607. boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
  608. s64 boost)
  609. {
  610. struct schedtune *st = css_st(css);
  611. if (boost < 0 || boost > 100)
  612. return -EINVAL;
  613. st->boost = boost;
  614. /* Update CPU boost */
  615. schedtune_boostgroup_update(st->idx, st->boost);
  616. #if MET_STUNE_DEBUG
  617. /* user: foreground */
  618. if (st->idx == 1)
  619. met_tag_oneshot(0, "sched_user_boost_fg", st->boost);
  620. /* user: top-app */
  621. if (st->idx == 3)
  622. met_tag_oneshot(0, "sched_user_boost_top", st->boost);
  623. #endif
  624. return 0;
  625. }
  626. static struct cftype files[] = {
  627. {
  628. .name = "boost",
  629. .read_s64 = boost_read,
  630. .write_s64 = boost_write,
  631. },
  632. {
  633. .name = "prefer_idle",
  634. .read_u64 = prefer_idle_read,
  635. .write_u64 = prefer_idle_write,
  636. },
  637. #if defined(CONFIG_UCLAMP_TASK_GROUP)
  638. {
  639. .name = "util.min",
  640. .read_u64 = cpu_util_min_read_u64,
  641. .write_u64 = cpu_util_min_write_u64,
  642. },
  643. {
  644. .name = "util.min.pct",
  645. .write_u64 = cpu_util_min_pct_write_u64,
  646. },
  647. {
  648. .name = "util.min.effective",
  649. .read_u64 = cpu_util_min_effective_read_u64,
  650. },
  651. {
  652. .name = "util.max",
  653. .read_u64 = cpu_util_max_read_u64,
  654. .write_u64 = cpu_util_max_write_u64,
  655. },
  656. {
  657. .name = "util.max.pct",
  658. .write_u64 = cpu_util_max_pct_write_u64,
  659. },
  660. {
  661. .name = "util.max.effective",
  662. .read_u64 = cpu_util_max_effective_read_u64,
  663. },
  664. #endif
  665. { } /* terminate */
  666. };
  667. static int
  668. schedtune_boostgroup_init(struct schedtune *st)
  669. {
  670. struct boost_groups *bg;
  671. int cpu;
  672. /* Keep track of allocated boost groups */
  673. allocated_group[st->idx] = st;
  674. /* Initialize the per CPU boost groups */
  675. for_each_possible_cpu(cpu) {
  676. bg = &per_cpu(cpu_boost_groups, cpu);
  677. bg->group[st->idx].boost = 0;
  678. bg->group[st->idx].tasks = 0;
  679. bg->group[st->idx].ts = 0;
  680. }
  681. return 0;
  682. }
  683. static struct cgroup_subsys_state *
  684. schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
  685. {
  686. struct schedtune *st;
  687. int idx;
  688. if (!parent_css)
  689. return &root_schedtune.css;
  690. /* Allow only single level hierachies */
  691. if (parent_css != &root_schedtune.css) {
  692. pr_err("Nested SchedTune boosting groups not allowed\n");
  693. return ERR_PTR(-ENOMEM);
  694. }
  695. /* Allow only a limited number of boosting groups */
  696. for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
  697. if (!allocated_group[idx])
  698. break;
  699. if (idx == BOOSTGROUPS_COUNT) {
  700. pr_err("Trying to create more than %d SchedTune boosting groups\n",
  701. BOOSTGROUPS_COUNT);
  702. return ERR_PTR(-ENOSPC);
  703. }
  704. st = kzalloc(sizeof(*st), GFP_KERNEL);
  705. if (!st)
  706. goto out;
  707. /* Initialize per CPUs boost group support */
  708. st->idx = idx;
  709. if (schedtune_boostgroup_init(st))
  710. goto release;
  711. if (!alloc_uclamp_sched_group(st))
  712. goto release;
  713. return &st->css;
  714. release:
  715. kfree(st);
  716. out:
  717. return ERR_PTR(-ENOMEM);
  718. }
  719. static void
  720. schedtune_boostgroup_release(struct schedtune *st)
  721. {
  722. /* Reset this boost group */
  723. schedtune_boostgroup_update(st->idx, 0);
  724. /* Keep track of allocated boost groups */
  725. allocated_group[st->idx] = NULL;
  726. }
  727. static void
  728. schedtune_css_free(struct cgroup_subsys_state *css)
  729. {
  730. struct schedtune *st = css_st(css);
  731. schedtune_boostgroup_release(st);
  732. free_uclamp_sched_group(st);
  733. kfree(st);
  734. }
  735. struct cgroup_subsys schedtune_cgrp_subsys = {
  736. .css_alloc = schedtune_css_alloc,
  737. .css_free = schedtune_css_free,
  738. .can_attach = schedtune_can_attach,
  739. .cancel_attach = schedtune_cancel_attach,
  740. .legacy_cftypes = files,
  741. .early_init = 1,
  742. };
  743. #ifdef CONFIG_UCLAMP_TASK_GROUP
  744. void schedtune_init_uclamp(void)
  745. {
  746. struct uclamp_se *uc_se;
  747. unsigned int clamp_id;
  748. for (clamp_id = 0; clamp_id < UCLAMP_CNT; ++clamp_id) {
  749. /* Init root ST's clamp group */
  750. uc_se = &root_schedtune.uclamp[clamp_id];
  751. uclamp_group_get(NULL, NULL, uc_se, clamp_id,
  752. uclamp_none(clamp_id));
  753. uc_se->effective.group_id = uc_se->group_id;
  754. uc_se->effective.value = uc_se->value;
  755. }
  756. }
  757. #endif
  758. static inline void
  759. schedtune_init_cgroups(void)
  760. {
  761. struct boost_groups *bg;
  762. int cpu;
  763. /* Initialize the per CPU boost groups */
  764. for_each_possible_cpu(cpu) {
  765. bg = &per_cpu(cpu_boost_groups, cpu);
  766. memset(bg, 0, sizeof(struct boost_groups));
  767. raw_spin_lock_init(&bg->lock);
  768. }
  769. pr_info("schedtune: configured to support %d boost groups\n",
  770. BOOSTGROUPS_COUNT);
  771. schedtune_initialized = true;
  772. }
  773. /*
  774. * Initialize the cgroup structures
  775. */
  776. static int
  777. schedtune_init(void)
  778. {
  779. /* set default threshold */
  780. calculate_default_stune_threshold();
  781. schedtune_spc_rdiv = reciprocal_value(100);
  782. schedtune_init_cgroups();
  783. return 0;
  784. }
  785. postcore_initcall(schedtune_init);