cpuacct.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/cgroup.h>
  3. #include <linux/slab.h>
  4. #include <linux/percpu.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/cpumask.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/rcupdate.h>
  9. #include <linux/kernel_stat.h>
  10. #include <linux/err.h>
  11. #include "sched.h"
  12. /*
  13. * CPU accounting code for task groups.
  14. *
  15. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  16. * (balbir@in.ibm.com).
  17. */
  18. /* Time spent by the tasks of the cpu accounting group executing in ... */
  19. enum cpuacct_stat_index {
  20. CPUACCT_STAT_USER, /* ... user mode */
  21. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  22. CPUACCT_STAT_NSTATS,
  23. };
  24. static const char * const cpuacct_stat_desc[] = {
  25. [CPUACCT_STAT_USER] = "user",
  26. [CPUACCT_STAT_SYSTEM] = "system",
  27. };
  28. struct cpuacct_usage {
  29. u64 usages[CPUACCT_STAT_NSTATS];
  30. };
  31. /* track cpu usage of a group of tasks and its child groups */
  32. struct cpuacct {
  33. struct cgroup_subsys_state css;
  34. /* cpuusage holds pointer to a u64-type object on every cpu */
  35. struct cpuacct_usage __percpu *cpuusage;
  36. struct kernel_cpustat __percpu *cpustat;
  37. };
  38. static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
  39. {
  40. return css ? container_of(css, struct cpuacct, css) : NULL;
  41. }
  42. /* return cpu accounting group to which this task belongs */
  43. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  44. {
  45. return css_ca(task_css(tsk, cpuacct_cgrp_id));
  46. }
  47. static inline struct cpuacct *parent_ca(struct cpuacct *ca)
  48. {
  49. return css_ca(ca->css.parent);
  50. }
  51. static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage);
  52. static struct cpuacct root_cpuacct = {
  53. .cpustat = &kernel_cpustat,
  54. .cpuusage = &root_cpuacct_cpuusage,
  55. };
  56. /* create a new cpu accounting group */
  57. static struct cgroup_subsys_state *
  58. cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
  59. {
  60. struct cpuacct *ca;
  61. if (!parent_css)
  62. return &root_cpuacct.css;
  63. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  64. if (!ca)
  65. goto out;
  66. ca->cpuusage = alloc_percpu(struct cpuacct_usage);
  67. if (!ca->cpuusage)
  68. goto out_free_ca;
  69. ca->cpustat = alloc_percpu(struct kernel_cpustat);
  70. if (!ca->cpustat)
  71. goto out_free_cpuusage;
  72. return &ca->css;
  73. out_free_cpuusage:
  74. free_percpu(ca->cpuusage);
  75. out_free_ca:
  76. kfree(ca);
  77. out:
  78. return ERR_PTR(-ENOMEM);
  79. }
  80. /* destroy an existing cpu accounting group */
  81. static void cpuacct_css_free(struct cgroup_subsys_state *css)
  82. {
  83. struct cpuacct *ca = css_ca(css);
  84. free_percpu(ca->cpustat);
  85. free_percpu(ca->cpuusage);
  86. kfree(ca);
  87. }
  88. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
  89. enum cpuacct_stat_index index)
  90. {
  91. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  92. u64 data;
  93. /*
  94. * We allow index == CPUACCT_STAT_NSTATS here to read
  95. * the sum of suages.
  96. */
  97. BUG_ON(index > CPUACCT_STAT_NSTATS);
  98. #ifndef CONFIG_64BIT
  99. /*
  100. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  101. */
  102. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  103. #endif
  104. if (index == CPUACCT_STAT_NSTATS) {
  105. int i = 0;
  106. data = 0;
  107. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  108. data += cpuusage->usages[i];
  109. } else {
  110. data = cpuusage->usages[index];
  111. }
  112. #ifndef CONFIG_64BIT
  113. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  114. #endif
  115. return data;
  116. }
  117. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  118. {
  119. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  120. int i;
  121. #ifndef CONFIG_64BIT
  122. /*
  123. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  124. */
  125. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  126. #endif
  127. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  128. cpuusage->usages[i] = val;
  129. #ifndef CONFIG_64BIT
  130. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  131. #endif
  132. }
  133. /* return total cpu usage (in nanoseconds) of a group */
  134. static u64 __cpuusage_read(struct cgroup_subsys_state *css,
  135. enum cpuacct_stat_index index)
  136. {
  137. struct cpuacct *ca = css_ca(css);
  138. u64 totalcpuusage = 0;
  139. int i;
  140. for_each_possible_cpu(i)
  141. totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
  142. return totalcpuusage;
  143. }
  144. static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
  145. struct cftype *cft)
  146. {
  147. return __cpuusage_read(css, CPUACCT_STAT_USER);
  148. }
  149. static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
  150. struct cftype *cft)
  151. {
  152. return __cpuusage_read(css, CPUACCT_STAT_SYSTEM);
  153. }
  154. static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
  155. {
  156. return __cpuusage_read(css, CPUACCT_STAT_NSTATS);
  157. }
  158. static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
  159. u64 val)
  160. {
  161. struct cpuacct *ca = css_ca(css);
  162. int cpu;
  163. /*
  164. * Only allow '0' here to do a reset.
  165. */
  166. if (val)
  167. return -EINVAL;
  168. for_each_possible_cpu(cpu)
  169. cpuacct_cpuusage_write(ca, cpu, 0);
  170. return 0;
  171. }
  172. static int __cpuacct_percpu_seq_show(struct seq_file *m,
  173. enum cpuacct_stat_index index)
  174. {
  175. struct cpuacct *ca = css_ca(seq_css(m));
  176. u64 percpu;
  177. int i;
  178. for_each_possible_cpu(i) {
  179. percpu = cpuacct_cpuusage_read(ca, i, index);
  180. seq_printf(m, "%llu ", (unsigned long long) percpu);
  181. }
  182. seq_printf(m, "\n");
  183. return 0;
  184. }
  185. static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
  186. {
  187. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_USER);
  188. }
  189. static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
  190. {
  191. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_SYSTEM);
  192. }
  193. static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
  194. {
  195. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_NSTATS);
  196. }
  197. static int cpuacct_all_seq_show(struct seq_file *m, void *V)
  198. {
  199. struct cpuacct *ca = css_ca(seq_css(m));
  200. int index;
  201. int cpu;
  202. seq_puts(m, "cpu");
  203. for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
  204. seq_printf(m, " %s", cpuacct_stat_desc[index]);
  205. seq_puts(m, "\n");
  206. for_each_possible_cpu(cpu) {
  207. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  208. seq_printf(m, "%d", cpu);
  209. for (index = 0; index < CPUACCT_STAT_NSTATS; index++) {
  210. #ifndef CONFIG_64BIT
  211. /*
  212. * Take rq->lock to make 64-bit read safe on 32-bit
  213. * platforms.
  214. */
  215. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  216. #endif
  217. seq_printf(m, " %llu", cpuusage->usages[index]);
  218. #ifndef CONFIG_64BIT
  219. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  220. #endif
  221. }
  222. seq_puts(m, "\n");
  223. }
  224. return 0;
  225. }
  226. static int cpuacct_stats_show(struct seq_file *sf, void *v)
  227. {
  228. struct cpuacct *ca = css_ca(seq_css(sf));
  229. s64 val[CPUACCT_STAT_NSTATS];
  230. int cpu;
  231. int stat;
  232. memset(val, 0, sizeof(val));
  233. for_each_possible_cpu(cpu) {
  234. u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
  235. val[CPUACCT_STAT_USER] += cpustat[CPUTIME_USER];
  236. val[CPUACCT_STAT_USER] += cpustat[CPUTIME_NICE];
  237. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SYSTEM];
  238. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_IRQ];
  239. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SOFTIRQ];
  240. }
  241. for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
  242. seq_printf(sf, "%s %lld\n",
  243. cpuacct_stat_desc[stat],
  244. (long long)nsec_to_clock_t(val[stat]));
  245. }
  246. return 0;
  247. }
  248. static struct cftype files[] = {
  249. {
  250. .name = "usage",
  251. .read_u64 = cpuusage_read,
  252. .write_u64 = cpuusage_write,
  253. },
  254. {
  255. .name = "usage_user",
  256. .read_u64 = cpuusage_user_read,
  257. },
  258. {
  259. .name = "usage_sys",
  260. .read_u64 = cpuusage_sys_read,
  261. },
  262. {
  263. .name = "usage_percpu",
  264. .seq_show = cpuacct_percpu_seq_show,
  265. },
  266. {
  267. .name = "usage_percpu_user",
  268. .seq_show = cpuacct_percpu_user_seq_show,
  269. },
  270. {
  271. .name = "usage_percpu_sys",
  272. .seq_show = cpuacct_percpu_sys_seq_show,
  273. },
  274. {
  275. .name = "usage_all",
  276. .seq_show = cpuacct_all_seq_show,
  277. },
  278. {
  279. .name = "stat",
  280. .seq_show = cpuacct_stats_show,
  281. },
  282. { } /* terminate */
  283. };
  284. /*
  285. * charge this task's execution time to its accounting group.
  286. *
  287. * called with rq->lock held.
  288. */
  289. void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  290. {
  291. struct cpuacct *ca;
  292. int index = CPUACCT_STAT_SYSTEM;
  293. struct pt_regs *regs = task_pt_regs(tsk);
  294. if (regs && user_mode(regs))
  295. index = CPUACCT_STAT_USER;
  296. rcu_read_lock();
  297. for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
  298. this_cpu_ptr(ca->cpuusage)->usages[index] += cputime;
  299. rcu_read_unlock();
  300. }
  301. /*
  302. * Add user/system time to cpuacct.
  303. *
  304. * Note: it's the caller that updates the account of the root cgroup.
  305. */
  306. void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
  307. {
  308. struct cpuacct *ca;
  309. rcu_read_lock();
  310. for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
  311. this_cpu_ptr(ca->cpustat)->cpustat[index] += val;
  312. rcu_read_unlock();
  313. }
  314. struct cgroup_subsys cpuacct_cgrp_subsys = {
  315. .css_alloc = cpuacct_css_alloc,
  316. .css_free = cpuacct_css_free,
  317. .legacy_cftypes = files,
  318. .early_init = true,
  319. };