sched_clock.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /*
  2. * sched_clock for unstable cpu clocks
  3. *
  4. * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  5. *
  6. * Updates and enhancements:
  7. * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
  8. *
  9. * Based on code by:
  10. * Ingo Molnar <mingo@redhat.com>
  11. * Guillaume Chazarain <guichaz@gmail.com>
  12. *
  13. *
  14. * What:
  15. *
  16. * cpu_clock(i) provides a fast (execution time) high resolution
  17. * clock with bounded drift between CPUs. The value of cpu_clock(i)
  18. * is monotonic for constant i. The timestamp returned is in nanoseconds.
  19. *
  20. * ######################### BIG FAT WARNING ##########################
  21. * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
  22. * # go backwards !! #
  23. * ####################################################################
  24. *
  25. * There is no strict promise about the base, although it tends to start
  26. * at 0 on boot (but people really shouldn't rely on that).
  27. *
  28. * cpu_clock(i) -- can be used from any context, including NMI.
  29. * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
  30. * local_clock() -- is cpu_clock() on the current cpu.
  31. *
  32. * How:
  33. *
  34. * The implementation either uses sched_clock() when
  35. * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
  36. * sched_clock() is assumed to provide these properties (mostly it means
  37. * the architecture provides a globally synchronized highres time source).
  38. *
  39. * Otherwise it tries to create a semi stable clock from a mixture of other
  40. * clocks, including:
  41. *
  42. * - GTOD (clock monotomic)
  43. * - sched_clock()
  44. * - explicit idle events
  45. *
  46. * We use GTOD as base and use sched_clock() deltas to improve resolution. The
  47. * deltas are filtered to provide monotonicity and keeping it within an
  48. * expected window.
  49. *
  50. * Furthermore, explicit sleep and wakeup hooks allow us to account for time
  51. * that is otherwise invisible (TSC gets stopped).
  52. *
  53. *
  54. * Notes:
  55. *
  56. * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
  57. * like cpufreq interrupts that can change the base clock (TSC) multiplier
  58. * and cause funny jumps in time -- although the filtering provided by
  59. * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
  60. * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
  61. * sched_clock().
  62. */
  63. #include <linux/spinlock.h>
  64. #include <linux/hardirq.h>
  65. #include <linux/module.h>
  66. #include <linux/percpu.h>
  67. #include <linux/ktime.h>
  68. #include <linux/sched.h>
  69. /*
  70. * Scheduler clock - returns current time in nanosec units.
  71. * This is default implementation.
  72. * Architectures and sub-architectures can override this.
  73. */
  74. unsigned long long __attribute__((weak)) sched_clock(void)
  75. {
  76. return (unsigned long long)(jiffies - INITIAL_JIFFIES)
  77. * (NSEC_PER_SEC / HZ);
  78. }
  79. EXPORT_SYMBOL_GPL(sched_clock);
  80. __read_mostly int sched_clock_running;
  81. #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  82. __read_mostly int sched_clock_stable;
  83. struct sched_clock_data {
  84. u64 tick_raw;
  85. u64 tick_gtod;
  86. u64 clock;
  87. };
  88. static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
  89. static inline struct sched_clock_data *this_scd(void)
  90. {
  91. return &__get_cpu_var(sched_clock_data);
  92. }
  93. static inline struct sched_clock_data *cpu_sdc(int cpu)
  94. {
  95. return &per_cpu(sched_clock_data, cpu);
  96. }
  97. void sched_clock_init(void)
  98. {
  99. u64 ktime_now = ktime_to_ns(ktime_get());
  100. int cpu;
  101. for_each_possible_cpu(cpu) {
  102. struct sched_clock_data *scd = cpu_sdc(cpu);
  103. scd->tick_raw = 0;
  104. scd->tick_gtod = ktime_now;
  105. scd->clock = ktime_now;
  106. }
  107. sched_clock_running = 1;
  108. }
  109. /*
  110. * min, max except they take wrapping into account
  111. */
  112. static inline u64 wrap_min(u64 x, u64 y)
  113. {
  114. return (s64)(x - y) < 0 ? x : y;
  115. }
  116. static inline u64 wrap_max(u64 x, u64 y)
  117. {
  118. return (s64)(x - y) > 0 ? x : y;
  119. }
  120. /*
  121. * update the percpu scd from the raw @now value
  122. *
  123. * - filter out backward motion
  124. * - use the GTOD tick value to create a window to filter crazy TSC values
  125. */
  126. static u64 sched_clock_local(struct sched_clock_data *scd)
  127. {
  128. u64 now, clock, old_clock, min_clock, max_clock;
  129. s64 delta;
  130. again:
  131. now = sched_clock();
  132. delta = now - scd->tick_raw;
  133. if (unlikely(delta < 0))
  134. delta = 0;
  135. old_clock = scd->clock;
  136. /*
  137. * scd->clock = clamp(scd->tick_gtod + delta,
  138. * max(scd->tick_gtod, scd->clock),
  139. * scd->tick_gtod + TICK_NSEC);
  140. */
  141. clock = scd->tick_gtod + delta;
  142. min_clock = wrap_max(scd->tick_gtod, old_clock);
  143. max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
  144. clock = wrap_max(clock, min_clock);
  145. clock = wrap_min(clock, max_clock);
  146. if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
  147. goto again;
  148. return clock;
  149. }
  150. static u64 sched_clock_remote(struct sched_clock_data *scd)
  151. {
  152. struct sched_clock_data *my_scd = this_scd();
  153. u64 this_clock, remote_clock;
  154. u64 *ptr, old_val, val;
  155. sched_clock_local(my_scd);
  156. again:
  157. this_clock = my_scd->clock;
  158. remote_clock = scd->clock;
  159. /*
  160. * Use the opportunity that we have both locks
  161. * taken to couple the two clocks: we take the
  162. * larger time as the latest time for both
  163. * runqueues. (this creates monotonic movement)
  164. */
  165. if (likely((s64)(remote_clock - this_clock) < 0)) {
  166. ptr = &scd->clock;
  167. old_val = remote_clock;
  168. val = this_clock;
  169. } else {
  170. /*
  171. * Should be rare, but possible:
  172. */
  173. ptr = &my_scd->clock;
  174. old_val = this_clock;
  175. val = remote_clock;
  176. }
  177. if (cmpxchg64(ptr, old_val, val) != old_val)
  178. goto again;
  179. return val;
  180. }
  181. /*
  182. * Similar to cpu_clock(), but requires local IRQs to be disabled.
  183. *
  184. * See cpu_clock().
  185. */
  186. u64 sched_clock_cpu(int cpu)
  187. {
  188. struct sched_clock_data *scd;
  189. u64 clock;
  190. WARN_ON_ONCE(!irqs_disabled());
  191. if (sched_clock_stable)
  192. return sched_clock();
  193. if (unlikely(!sched_clock_running))
  194. return 0ull;
  195. scd = cpu_sdc(cpu);
  196. if (cpu != smp_processor_id())
  197. clock = sched_clock_remote(scd);
  198. else
  199. clock = sched_clock_local(scd);
  200. return clock;
  201. }
  202. void sched_clock_tick(void)
  203. {
  204. struct sched_clock_data *scd;
  205. u64 now, now_gtod;
  206. if (sched_clock_stable)
  207. return;
  208. if (unlikely(!sched_clock_running))
  209. return;
  210. WARN_ON_ONCE(!irqs_disabled());
  211. scd = this_scd();
  212. now_gtod = ktime_to_ns(ktime_get());
  213. now = sched_clock();
  214. scd->tick_raw = now;
  215. scd->tick_gtod = now_gtod;
  216. sched_clock_local(scd);
  217. }
  218. /*
  219. * We are going deep-idle (irqs are disabled):
  220. */
  221. void sched_clock_idle_sleep_event(void)
  222. {
  223. sched_clock_cpu(smp_processor_id());
  224. }
  225. EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
  226. /*
  227. * We just idled delta nanoseconds (called with irqs disabled):
  228. */
  229. void sched_clock_idle_wakeup_event(u64 delta_ns)
  230. {
  231. if (timekeeping_suspended)
  232. return;
  233. sched_clock_tick();
  234. touch_softlockup_watchdog();
  235. }
  236. EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
  237. /*
  238. * As outlined at the top, provides a fast, high resolution, nanosecond
  239. * time source that is monotonic per cpu argument and has bounded drift
  240. * between cpus.
  241. *
  242. * ######################### BIG FAT WARNING ##########################
  243. * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
  244. * # go backwards !! #
  245. * ####################################################################
  246. */
  247. u64 cpu_clock(int cpu)
  248. {
  249. u64 clock;
  250. unsigned long flags;
  251. local_irq_save(flags);
  252. clock = sched_clock_cpu(cpu);
  253. local_irq_restore(flags);
  254. return clock;
  255. }
  256. /*
  257. * Similar to cpu_clock() for the current cpu. Time will only be observed
  258. * to be monotonic if care is taken to only compare timestampt taken on the
  259. * same CPU.
  260. *
  261. * See cpu_clock().
  262. */
  263. u64 local_clock(void)
  264. {
  265. u64 clock;
  266. unsigned long flags;
  267. local_irq_save(flags);
  268. clock = sched_clock_cpu(smp_processor_id());
  269. local_irq_restore(flags);
  270. return clock;
  271. }
  272. #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
  273. void sched_clock_init(void)
  274. {
  275. sched_clock_running = 1;
  276. }
  277. u64 sched_clock_cpu(int cpu)
  278. {
  279. if (unlikely(!sched_clock_running))
  280. return 0;
  281. return sched_clock();
  282. }
  283. u64 cpu_clock(int cpu)
  284. {
  285. return sched_clock_cpu(cpu);
  286. }
  287. u64 local_clock(void)
  288. {
  289. return sched_clock_cpu(0);
  290. }
  291. #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
  292. EXPORT_SYMBOL_GPL(cpu_clock);
  293. EXPORT_SYMBOL_GPL(local_clock);