cpuacct.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. #include <linux/cgroup.h>
  2. #include <linux/slab.h>
  3. #include <linux/percpu.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/cpumask.h>
  6. #include <linux/seq_file.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/kernel_stat.h>
  9. #include <linux/err.h>
  10. #include "sched.h"
  11. /*
  12. * CPU accounting code for task groups.
  13. *
  14. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  15. * (balbir@in.ibm.com).
  16. */
  17. /* Time spent by the tasks of the cpu accounting group executing in ... */
  18. enum cpuacct_stat_index {
  19. CPUACCT_STAT_USER, /* ... user mode */
  20. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  21. CPUACCT_STAT_NSTATS,
  22. };
  23. static const char * const cpuacct_stat_desc[] = {
  24. [CPUACCT_STAT_USER] = "user",
  25. [CPUACCT_STAT_SYSTEM] = "system",
  26. };
  27. struct cpuacct_usage {
  28. u64 usages[CPUACCT_STAT_NSTATS];
  29. };
  30. /* track cpu usage of a group of tasks and its child groups */
  31. struct cpuacct {
  32. struct cgroup_subsys_state css;
  33. /* cpuusage holds pointer to a u64-type object on every cpu */
  34. struct cpuacct_usage __percpu *cpuusage;
  35. struct kernel_cpustat __percpu *cpustat;
  36. };
  37. static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
  38. {
  39. return css ? container_of(css, struct cpuacct, css) : NULL;
  40. }
  41. /* return cpu accounting group to which this task belongs */
  42. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  43. {
  44. return css_ca(task_css(tsk, cpuacct_cgrp_id));
  45. }
  46. static inline struct cpuacct *parent_ca(struct cpuacct *ca)
  47. {
  48. return css_ca(ca->css.parent);
  49. }
  50. static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage);
  51. static struct cpuacct root_cpuacct = {
  52. .cpustat = &kernel_cpustat,
  53. .cpuusage = &root_cpuacct_cpuusage,
  54. };
  55. /* create a new cpu accounting group */
  56. static struct cgroup_subsys_state *
  57. cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
  58. {
  59. struct cpuacct *ca;
  60. if (!parent_css)
  61. return &root_cpuacct.css;
  62. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  63. if (!ca)
  64. goto out;
  65. ca->cpuusage = alloc_percpu(struct cpuacct_usage);
  66. if (!ca->cpuusage)
  67. goto out_free_ca;
  68. ca->cpustat = alloc_percpu(struct kernel_cpustat);
  69. if (!ca->cpustat)
  70. goto out_free_cpuusage;
  71. return &ca->css;
  72. out_free_cpuusage:
  73. free_percpu(ca->cpuusage);
  74. out_free_ca:
  75. kfree(ca);
  76. out:
  77. return ERR_PTR(-ENOMEM);
  78. }
  79. /* destroy an existing cpu accounting group */
  80. static void cpuacct_css_free(struct cgroup_subsys_state *css)
  81. {
  82. struct cpuacct *ca = css_ca(css);
  83. free_percpu(ca->cpustat);
  84. free_percpu(ca->cpuusage);
  85. kfree(ca);
  86. }
  87. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
  88. enum cpuacct_stat_index index)
  89. {
  90. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  91. u64 data;
  92. /*
  93. * We allow index == CPUACCT_STAT_NSTATS here to read
  94. * the sum of suages.
  95. */
  96. BUG_ON(index > CPUACCT_STAT_NSTATS);
  97. #ifndef CONFIG_64BIT
  98. /*
  99. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  100. */
  101. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  102. #endif
  103. if (index == CPUACCT_STAT_NSTATS) {
  104. int i = 0;
  105. data = 0;
  106. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  107. data += cpuusage->usages[i];
  108. } else {
  109. data = cpuusage->usages[index];
  110. }
  111. #ifndef CONFIG_64BIT
  112. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  113. #endif
  114. return data;
  115. }
  116. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  117. {
  118. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  119. int i;
  120. #ifndef CONFIG_64BIT
  121. /*
  122. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  123. */
  124. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  125. #endif
  126. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  127. cpuusage->usages[i] = val;
  128. #ifndef CONFIG_64BIT
  129. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  130. #endif
  131. }
  132. /* return total cpu usage (in nanoseconds) of a group */
  133. static u64 __cpuusage_read(struct cgroup_subsys_state *css,
  134. enum cpuacct_stat_index index)
  135. {
  136. struct cpuacct *ca = css_ca(css);
  137. u64 totalcpuusage = 0;
  138. int i;
  139. for_each_possible_cpu(i)
  140. totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
  141. return totalcpuusage;
  142. }
  143. static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
  144. struct cftype *cft)
  145. {
  146. return __cpuusage_read(css, CPUACCT_STAT_USER);
  147. }
  148. static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
  149. struct cftype *cft)
  150. {
  151. return __cpuusage_read(css, CPUACCT_STAT_SYSTEM);
  152. }
  153. static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
  154. {
  155. return __cpuusage_read(css, CPUACCT_STAT_NSTATS);
  156. }
  157. static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
  158. u64 val)
  159. {
  160. struct cpuacct *ca = css_ca(css);
  161. int cpu;
  162. /*
  163. * Only allow '0' here to do a reset.
  164. */
  165. if (val)
  166. return -EINVAL;
  167. for_each_possible_cpu(cpu)
  168. cpuacct_cpuusage_write(ca, cpu, 0);
  169. return 0;
  170. }
  171. static int __cpuacct_percpu_seq_show(struct seq_file *m,
  172. enum cpuacct_stat_index index)
  173. {
  174. struct cpuacct *ca = css_ca(seq_css(m));
  175. u64 percpu;
  176. int i;
  177. for_each_possible_cpu(i) {
  178. percpu = cpuacct_cpuusage_read(ca, i, index);
  179. seq_printf(m, "%llu ", (unsigned long long) percpu);
  180. }
  181. seq_printf(m, "\n");
  182. return 0;
  183. }
  184. static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
  185. {
  186. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_USER);
  187. }
  188. static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
  189. {
  190. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_SYSTEM);
  191. }
  192. static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
  193. {
  194. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_NSTATS);
  195. }
  196. static int cpuacct_all_seq_show(struct seq_file *m, void *V)
  197. {
  198. struct cpuacct *ca = css_ca(seq_css(m));
  199. int index;
  200. int cpu;
  201. seq_puts(m, "cpu");
  202. for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
  203. seq_printf(m, " %s", cpuacct_stat_desc[index]);
  204. seq_puts(m, "\n");
  205. for_each_possible_cpu(cpu) {
  206. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  207. seq_printf(m, "%d", cpu);
  208. for (index = 0; index < CPUACCT_STAT_NSTATS; index++) {
  209. #ifndef CONFIG_64BIT
  210. /*
  211. * Take rq->lock to make 64-bit read safe on 32-bit
  212. * platforms.
  213. */
  214. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  215. #endif
  216. seq_printf(m, " %llu", cpuusage->usages[index]);
  217. #ifndef CONFIG_64BIT
  218. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  219. #endif
  220. }
  221. seq_puts(m, "\n");
  222. }
  223. return 0;
  224. }
  225. static int cpuacct_stats_show(struct seq_file *sf, void *v)
  226. {
  227. struct cpuacct *ca = css_ca(seq_css(sf));
  228. s64 val[CPUACCT_STAT_NSTATS];
  229. int cpu;
  230. int stat;
  231. memset(val, 0, sizeof(val));
  232. for_each_possible_cpu(cpu) {
  233. u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
  234. val[CPUACCT_STAT_USER] += cpustat[CPUTIME_USER];
  235. val[CPUACCT_STAT_USER] += cpustat[CPUTIME_NICE];
  236. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SYSTEM];
  237. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_IRQ];
  238. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SOFTIRQ];
  239. }
  240. for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
  241. seq_printf(sf, "%s %lld\n",
  242. cpuacct_stat_desc[stat],
  243. cputime64_to_clock_t(val[stat]));
  244. }
  245. return 0;
  246. }
  247. static struct cftype files[] = {
  248. {
  249. .name = "usage",
  250. .read_u64 = cpuusage_read,
  251. .write_u64 = cpuusage_write,
  252. },
  253. {
  254. .name = "usage_user",
  255. .read_u64 = cpuusage_user_read,
  256. },
  257. {
  258. .name = "usage_sys",
  259. .read_u64 = cpuusage_sys_read,
  260. },
  261. {
  262. .name = "usage_percpu",
  263. .seq_show = cpuacct_percpu_seq_show,
  264. },
  265. {
  266. .name = "usage_percpu_user",
  267. .seq_show = cpuacct_percpu_user_seq_show,
  268. },
  269. {
  270. .name = "usage_percpu_sys",
  271. .seq_show = cpuacct_percpu_sys_seq_show,
  272. },
  273. {
  274. .name = "usage_all",
  275. .seq_show = cpuacct_all_seq_show,
  276. },
  277. {
  278. .name = "stat",
  279. .seq_show = cpuacct_stats_show,
  280. },
  281. { } /* terminate */
  282. };
  283. /*
  284. * charge this task's execution time to its accounting group.
  285. *
  286. * called with rq->lock held.
  287. */
  288. void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  289. {
  290. struct cpuacct *ca;
  291. int index = CPUACCT_STAT_SYSTEM;
  292. struct pt_regs *regs = task_pt_regs(tsk);
  293. if (regs && user_mode(regs))
  294. index = CPUACCT_STAT_USER;
  295. rcu_read_lock();
  296. for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
  297. this_cpu_ptr(ca->cpuusage)->usages[index] += cputime;
  298. rcu_read_unlock();
  299. }
  300. /*
  301. * Add user/system time to cpuacct.
  302. *
  303. * Note: it's the caller that updates the account of the root cgroup.
  304. */
  305. void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
  306. {
  307. struct cpuacct *ca;
  308. rcu_read_lock();
  309. for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
  310. this_cpu_ptr(ca->cpustat)->cpustat[index] += val;
  311. rcu_read_unlock();
  312. }
  313. struct cgroup_subsys cpuacct_cgrp_subsys = {
  314. .css_alloc = cpuacct_css_alloc,
  315. .css_free = cpuacct_css_free,
  316. .legacy_cftypes = files,
  317. .early_init = true,
  318. };