time.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /******************************************************************************
  2. * arch/ia64/xen/time.c
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/delay.h>
  23. #include <linux/kernel_stat.h>
  24. #include <linux/posix-timers.h>
  25. #include <linux/irq.h>
  26. #include <linux/clocksource.h>
  27. #include <asm/timex.h>
  28. #include <asm/xen/hypervisor.h>
  29. #include <xen/interface/vcpu.h>
  30. #include "../kernel/fsyscall_gtod_data.h"
  31. static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
  32. static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
  33. static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
  34. /* taken from i386/kernel/time-xen.c */
  35. static void xen_init_missing_ticks_accounting(int cpu)
  36. {
  37. struct vcpu_register_runstate_memory_area area;
  38. struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
  39. int rc;
  40. memset(runstate, 0, sizeof(*runstate));
  41. area.addr.v = runstate;
  42. rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
  43. &area);
  44. WARN_ON(rc && rc != -ENOSYS);
  45. per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
  46. per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
  47. + runstate->time[RUNSTATE_offline];
  48. }
  49. /*
  50. * Runstate accounting
  51. */
  52. /* stolen from arch/x86/xen/time.c */
  53. static void get_runstate_snapshot(struct vcpu_runstate_info *res)
  54. {
  55. u64 state_time;
  56. struct vcpu_runstate_info *state;
  57. BUG_ON(preemptible());
  58. state = &__get_cpu_var(xen_runstate);
  59. /*
  60. * The runstate info is always updated by the hypervisor on
  61. * the current CPU, so there's no need to use anything
  62. * stronger than a compiler barrier when fetching it.
  63. */
  64. do {
  65. state_time = state->state_entry_time;
  66. rmb();
  67. *res = *state;
  68. rmb();
  69. } while (state->state_entry_time != state_time);
  70. }
  71. #define NS_PER_TICK (1000000000LL/HZ)
  72. static unsigned long
  73. consider_steal_time(unsigned long new_itm)
  74. {
  75. unsigned long stolen, blocked;
  76. unsigned long delta_itm = 0, stolentick = 0;
  77. int cpu = smp_processor_id();
  78. struct vcpu_runstate_info runstate;
  79. struct task_struct *p = current;
  80. get_runstate_snapshot(&runstate);
  81. /*
  82. * Check for vcpu migration effect
  83. * In this case, itc value is reversed.
  84. * This causes huge stolen value.
  85. * This function just checks and reject this effect.
  86. */
  87. if (!time_after_eq(runstate.time[RUNSTATE_blocked],
  88. per_cpu(xen_blocked_time, cpu)))
  89. blocked = 0;
  90. if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
  91. runstate.time[RUNSTATE_offline],
  92. per_cpu(xen_stolen_time, cpu)))
  93. stolen = 0;
  94. if (!time_after(delta_itm + new_itm, ia64_get_itc()))
  95. stolentick = ia64_get_itc() - new_itm;
  96. do_div(stolentick, NS_PER_TICK);
  97. stolentick++;
  98. do_div(stolen, NS_PER_TICK);
  99. if (stolen > stolentick)
  100. stolen = stolentick;
  101. stolentick -= stolen;
  102. do_div(blocked, NS_PER_TICK);
  103. if (blocked > stolentick)
  104. blocked = stolentick;
  105. if (stolen > 0 || blocked > 0) {
  106. account_steal_ticks(stolen);
  107. account_idle_ticks(blocked);
  108. run_local_timers();
  109. rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
  110. scheduler_tick();
  111. run_posix_cpu_timers(p);
  112. delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
  113. if (cpu == time_keeper_id)
  114. xtime_update(stolen + blocked);
  115. local_cpu_data->itm_next = delta_itm + new_itm;
  116. per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
  117. per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
  118. }
  119. return delta_itm;
  120. }
  121. static int xen_do_steal_accounting(unsigned long *new_itm)
  122. {
  123. unsigned long delta_itm;
  124. delta_itm = consider_steal_time(*new_itm);
  125. *new_itm += delta_itm;
  126. if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
  127. return 1;
  128. return 0;
  129. }
  130. static void xen_itc_jitter_data_reset(void)
  131. {
  132. u64 lcycle, ret;
  133. do {
  134. lcycle = itc_jitter_data.itc_lastcycle;
  135. ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
  136. } while (unlikely(ret != lcycle));
  137. }
  138. /* based on xen_sched_clock() in arch/x86/xen/time.c. */
  139. /*
  140. * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
  141. * something similar logic should be implemented here.
  142. */
  143. /*
  144. * Xen sched_clock implementation. Returns the number of unstolen
  145. * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
  146. * states.
  147. */
  148. static unsigned long long xen_sched_clock(void)
  149. {
  150. struct vcpu_runstate_info runstate;
  151. unsigned long long now;
  152. unsigned long long offset;
  153. unsigned long long ret;
  154. /*
  155. * Ideally sched_clock should be called on a per-cpu basis
  156. * anyway, so preempt should already be disabled, but that's
  157. * not current practice at the moment.
  158. */
  159. preempt_disable();
  160. /*
  161. * both ia64_native_sched_clock() and xen's runstate are
  162. * based on mAR.ITC. So difference of them makes sense.
  163. */
  164. now = ia64_native_sched_clock();
  165. get_runstate_snapshot(&runstate);
  166. WARN_ON(runstate.state != RUNSTATE_running);
  167. offset = 0;
  168. if (now > runstate.state_entry_time)
  169. offset = now - runstate.state_entry_time;
  170. ret = runstate.time[RUNSTATE_blocked] +
  171. runstate.time[RUNSTATE_running] +
  172. offset;
  173. preempt_enable();
  174. return ret;
  175. }
  176. struct pv_time_ops xen_time_ops __initdata = {
  177. .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
  178. .do_steal_accounting = xen_do_steal_accounting,
  179. .clocksource_resume = xen_itc_jitter_data_reset,
  180. .sched_clock = xen_sched_clock,
  181. };
  182. /* Called after suspend, to resume time. */
  183. static void xen_local_tick_resume(void)
  184. {
  185. /* Just trigger a tick. */
  186. ia64_cpu_local_tick();
  187. touch_softlockup_watchdog();
  188. }
  189. void
  190. xen_timer_resume(void)
  191. {
  192. unsigned int cpu;
  193. xen_local_tick_resume();
  194. for_each_online_cpu(cpu)
  195. xen_init_missing_ticks_accounting(cpu);
  196. }
  197. static void ia64_cpu_local_tick_fn(void *unused)
  198. {
  199. xen_local_tick_resume();
  200. xen_init_missing_ticks_accounting(smp_processor_id());
  201. }
  202. void
  203. xen_timer_resume_on_aps(void)
  204. {
  205. smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
  206. }