context_tracking.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #ifndef _LINUX_CONTEXT_TRACKING_H
  2. #define _LINUX_CONTEXT_TRACKING_H
  3. #include <linux/sched.h>
  4. #include <linux/vtime.h>
  5. #include <linux/context_tracking_state.h>
  6. #include <asm/ptrace.h>
  7. #ifdef CONFIG_CONTEXT_TRACKING
  8. extern void context_tracking_cpu_set(int cpu);
  9. /* Called with interrupts disabled. */
  10. extern void __context_tracking_enter(enum ctx_state state);
  11. extern void __context_tracking_exit(enum ctx_state state);
  12. extern void context_tracking_enter(enum ctx_state state);
  13. extern void context_tracking_exit(enum ctx_state state);
  14. extern void context_tracking_user_enter(void);
  15. extern void context_tracking_user_exit(void);
  16. static inline void user_enter(void)
  17. {
  18. if (context_tracking_is_enabled())
  19. context_tracking_enter(CONTEXT_USER);
  20. }
  21. static inline void user_exit(void)
  22. {
  23. if (context_tracking_is_enabled())
  24. context_tracking_exit(CONTEXT_USER);
  25. }
  26. /* Called with interrupts disabled. */
  27. static inline void user_enter_irqoff(void)
  28. {
  29. if (context_tracking_is_enabled())
  30. __context_tracking_enter(CONTEXT_USER);
  31. }
  32. static inline void user_exit_irqoff(void)
  33. {
  34. if (context_tracking_is_enabled())
  35. __context_tracking_exit(CONTEXT_USER);
  36. }
  37. static inline enum ctx_state exception_enter(void)
  38. {
  39. enum ctx_state prev_ctx;
  40. if (!context_tracking_is_enabled())
  41. return 0;
  42. prev_ctx = this_cpu_read(context_tracking.state);
  43. if (prev_ctx != CONTEXT_KERNEL)
  44. context_tracking_exit(prev_ctx);
  45. return prev_ctx;
  46. }
  47. static inline void exception_exit(enum ctx_state prev_ctx)
  48. {
  49. if (context_tracking_is_enabled()) {
  50. if (prev_ctx != CONTEXT_KERNEL)
  51. context_tracking_enter(prev_ctx);
  52. }
  53. }
  54. /**
  55. * ct_state() - return the current context tracking state if known
  56. *
  57. * Returns the current cpu's context tracking state if context tracking
  58. * is enabled. If context tracking is disabled, returns
  59. * CONTEXT_DISABLED. This should be used primarily for debugging.
  60. */
  61. static inline enum ctx_state ct_state(void)
  62. {
  63. return context_tracking_is_enabled() ?
  64. this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
  65. }
  66. #else
  67. static inline void user_enter(void) { }
  68. static inline void user_exit(void) { }
  69. static inline void user_enter_irqoff(void) { }
  70. static inline void user_exit_irqoff(void) { }
  71. static inline enum ctx_state exception_enter(void) { return 0; }
  72. static inline void exception_exit(enum ctx_state prev_ctx) { }
  73. static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
  74. #endif /* !CONFIG_CONTEXT_TRACKING */
  75. #define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
  76. #ifdef CONFIG_CONTEXT_TRACKING_FORCE
  77. extern void context_tracking_init(void);
  78. #else
  79. static inline void context_tracking_init(void) { }
  80. #endif /* CONFIG_CONTEXT_TRACKING_FORCE */
  81. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  82. /* must be called with irqs disabled */
  83. static inline void guest_enter_irqoff(void)
  84. {
  85. if (vtime_accounting_cpu_enabled())
  86. vtime_guest_enter(current);
  87. else
  88. current->flags |= PF_VCPU;
  89. if (context_tracking_is_enabled())
  90. __context_tracking_enter(CONTEXT_GUEST);
  91. /* KVM does not hold any references to rcu protected data when it
  92. * switches CPU into a guest mode. In fact switching to a guest mode
  93. * is very similar to exiting to userspace from rcu point of view. In
  94. * addition CPU may stay in a guest mode for quite a long time (up to
  95. * one time slice). Lets treat guest mode as quiescent state, just like
  96. * we do with user-mode execution.
  97. */
  98. if (!context_tracking_cpu_is_enabled())
  99. rcu_virt_note_context_switch(smp_processor_id());
  100. }
  101. static inline void guest_exit_irqoff(void)
  102. {
  103. if (context_tracking_is_enabled())
  104. __context_tracking_exit(CONTEXT_GUEST);
  105. if (vtime_accounting_cpu_enabled())
  106. vtime_guest_exit(current);
  107. else
  108. current->flags &= ~PF_VCPU;
  109. }
  110. #else
  111. static inline void guest_enter_irqoff(void)
  112. {
  113. /*
  114. * This is running in ioctl context so its safe
  115. * to assume that it's the stime pending cputime
  116. * to flush.
  117. */
  118. vtime_account_system(current);
  119. current->flags |= PF_VCPU;
  120. rcu_virt_note_context_switch(smp_processor_id());
  121. }
  122. static inline void guest_exit_irqoff(void)
  123. {
  124. /* Flush the guest cputime we spent on the guest */
  125. vtime_account_system(current);
  126. current->flags &= ~PF_VCPU;
  127. }
  128. #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
  129. static inline void guest_enter(void)
  130. {
  131. unsigned long flags;
  132. local_irq_save(flags);
  133. guest_enter_irqoff();
  134. local_irq_restore(flags);
  135. }
  136. static inline void guest_exit(void)
  137. {
  138. unsigned long flags;
  139. local_irq_save(flags);
  140. guest_exit_irqoff();
  141. local_irq_restore(flags);
  142. }
  143. #endif