stats.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. #ifdef CONFIG_SCHEDSTATS
  2. /*
  3. * Expects runqueue lock to be held for atomicity of update
  4. */
  5. static inline void
  6. rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  7. {
  8. if (rq) {
  9. rq->rq_sched_info.run_delay += delta;
  10. rq->rq_sched_info.pcount++;
  11. }
  12. }
  13. /*
  14. * Expects runqueue lock to be held for atomicity of update
  15. */
  16. static inline void
  17. rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  18. {
  19. if (rq)
  20. rq->rq_cpu_time += delta;
  21. }
  22. static inline void
  23. rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  24. {
  25. if (rq)
  26. rq->rq_sched_info.run_delay += delta;
  27. }
  28. #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
  29. #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
  30. #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
  31. #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
  32. #define schedstat_val(var) (var)
  33. #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
  34. #else /* !CONFIG_SCHEDSTATS */
  35. static inline void
  36. rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  37. {}
  38. static inline void
  39. rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  40. {}
  41. static inline void
  42. rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  43. {}
  44. #define schedstat_enabled() 0
  45. #define schedstat_inc(var) do { } while (0)
  46. #define schedstat_add(var, amt) do { } while (0)
  47. #define schedstat_set(var, val) do { } while (0)
  48. #define schedstat_val(var) 0
  49. #define schedstat_val_or_zero(var) 0
  50. #endif /* CONFIG_SCHEDSTATS */
  51. #ifdef CONFIG_SCHED_INFO
  52. static inline void sched_info_reset_dequeued(struct task_struct *t)
  53. {
  54. t->sched_info.last_queued = 0;
  55. }
  56. /*
  57. * We are interested in knowing how long it was from the *first* time a
  58. * task was queued to the time that it finally hit a cpu, we call this routine
  59. * from dequeue_task() to account for possible rq->clock skew across cpus. The
  60. * delta taken on each cpu would annul the skew.
  61. */
  62. static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
  63. {
  64. unsigned long long now = rq_clock(rq), delta = 0;
  65. if (unlikely(sched_info_on()))
  66. if (t->sched_info.last_queued)
  67. delta = now - t->sched_info.last_queued;
  68. sched_info_reset_dequeued(t);
  69. t->sched_info.run_delay += delta;
  70. rq_sched_info_dequeued(rq, delta);
  71. }
  72. /*
  73. * Called when a task finally hits the cpu. We can now calculate how
  74. * long it was waiting to run. We also note when it began so that we
  75. * can keep stats on how long its timeslice is.
  76. */
  77. static void sched_info_arrive(struct rq *rq, struct task_struct *t)
  78. {
  79. unsigned long long now = rq_clock(rq), delta = 0;
  80. if (t->sched_info.last_queued)
  81. delta = now - t->sched_info.last_queued;
  82. sched_info_reset_dequeued(t);
  83. t->sched_info.run_delay += delta;
  84. t->sched_info.last_arrival = now;
  85. t->sched_info.pcount++;
  86. rq_sched_info_arrive(rq, delta);
  87. }
  88. /*
  89. * This function is only called from enqueue_task(), but also only updates
  90. * the timestamp if it is already not set. It's assumed that
  91. * sched_info_dequeued() will clear that stamp when appropriate.
  92. */
  93. static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
  94. {
  95. if (unlikely(sched_info_on()))
  96. if (!t->sched_info.last_queued)
  97. t->sched_info.last_queued = rq_clock(rq);
  98. }
  99. /*
  100. * Called when a process ceases being the active-running process involuntarily
  101. * due, typically, to expiring its time slice (this may also be called when
  102. * switching to the idle task). Now we can calculate how long we ran.
  103. * Also, if the process is still in the TASK_RUNNING state, call
  104. * sched_info_queued() to mark that it has now again started waiting on
  105. * the runqueue.
  106. */
  107. static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
  108. {
  109. unsigned long long delta = rq_clock(rq) -
  110. t->sched_info.last_arrival;
  111. rq_sched_info_depart(rq, delta);
  112. if (t->state == TASK_RUNNING)
  113. sched_info_queued(rq, t);
  114. }
  115. /*
  116. * Called when tasks are switched involuntarily due, typically, to expiring
  117. * their time slice. (This may also be called when switching to or from
  118. * the idle task.) We are only called when prev != next.
  119. */
  120. static inline void
  121. __sched_info_switch(struct rq *rq,
  122. struct task_struct *prev, struct task_struct *next)
  123. {
  124. /*
  125. * prev now departs the cpu. It's not interesting to record
  126. * stats about how efficient we were at scheduling the idle
  127. * process, however.
  128. */
  129. if (prev != rq->idle)
  130. sched_info_depart(rq, prev);
  131. if (next != rq->idle)
  132. sched_info_arrive(rq, next);
  133. }
  134. static inline void
  135. sched_info_switch(struct rq *rq,
  136. struct task_struct *prev, struct task_struct *next)
  137. {
  138. if (unlikely(sched_info_on()))
  139. __sched_info_switch(rq, prev, next);
  140. }
  141. #else
  142. #define sched_info_queued(rq, t) do { } while (0)
  143. #define sched_info_reset_dequeued(t) do { } while (0)
  144. #define sched_info_dequeued(rq, t) do { } while (0)
  145. #define sched_info_depart(rq, t) do { } while (0)
  146. #define sched_info_arrive(rq, next) do { } while (0)
  147. #define sched_info_switch(rq, t, next) do { } while (0)
  148. #endif /* CONFIG_SCHED_INFO */
  149. /*
  150. * The following are functions that support scheduler-internal time accounting.
  151. * These functions are generally called at the timer tick. None of this depends
  152. * on CONFIG_SCHEDSTATS.
  153. */
  154. /**
  155. * cputimer_running - return true if cputimer is running
  156. *
  157. * @tsk: Pointer to target task.
  158. */
  159. static inline bool cputimer_running(struct task_struct *tsk)
  160. {
  161. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  162. /* Check if cputimer isn't running. This is accessed without locking. */
  163. if (!READ_ONCE(cputimer->running))
  164. return false;
  165. /*
  166. * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
  167. * in __exit_signal(), we won't account to the signal struct further
  168. * cputime consumed by that task, even though the task can still be
  169. * ticking after __exit_signal().
  170. *
  171. * In order to keep a consistent behaviour between thread group cputime
  172. * and thread group cputimer accounting, lets also ignore the cputime
  173. * elapsing after __exit_signal() in any thread group timer running.
  174. *
  175. * This makes sure that POSIX CPU clocks and timers are synchronized, so
  176. * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
  177. * clock delta is behind the expiring timer value.
  178. */
  179. if (unlikely(!tsk->sighand))
  180. return false;
  181. return true;
  182. }
  183. /**
  184. * account_group_user_time - Maintain utime for a thread group.
  185. *
  186. * @tsk: Pointer to task structure.
  187. * @cputime: Time value by which to increment the utime field of the
  188. * thread_group_cputime structure.
  189. *
  190. * If thread group time is being maintained, get the structure for the
  191. * running CPU and update the utime field there.
  192. */
  193. static inline void account_group_user_time(struct task_struct *tsk,
  194. cputime_t cputime)
  195. {
  196. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  197. if (!cputimer_running(tsk))
  198. return;
  199. atomic64_add(cputime, &cputimer->cputime_atomic.utime);
  200. }
  201. /**
  202. * account_group_system_time - Maintain stime for a thread group.
  203. *
  204. * @tsk: Pointer to task structure.
  205. * @cputime: Time value by which to increment the stime field of the
  206. * thread_group_cputime structure.
  207. *
  208. * If thread group time is being maintained, get the structure for the
  209. * running CPU and update the stime field there.
  210. */
  211. static inline void account_group_system_time(struct task_struct *tsk,
  212. cputime_t cputime)
  213. {
  214. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  215. if (!cputimer_running(tsk))
  216. return;
  217. atomic64_add(cputime, &cputimer->cputime_atomic.stime);
  218. }
  219. /**
  220. * account_group_exec_runtime - Maintain exec runtime for a thread group.
  221. *
  222. * @tsk: Pointer to task structure.
  223. * @ns: Time value by which to increment the sum_exec_runtime field
  224. * of the thread_group_cputime structure.
  225. *
  226. * If thread group time is being maintained, get the structure for the
  227. * running CPU and update the sum_exec_runtime field there.
  228. */
  229. static inline void account_group_exec_runtime(struct task_struct *tsk,
  230. unsigned long long ns)
  231. {
  232. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  233. if (!cputimer_running(tsk))
  234. return;
  235. atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
  236. }