internal.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #include <linux/hardirq.h>
  4. #include <linux/uaccess.h>
  5. /* Buffer handling */
  6. #define RING_BUFFER_WRITABLE 0x01
  7. struct ring_buffer {
  8. atomic_t refcount;
  9. struct rcu_head rcu_head;
  10. #ifdef CONFIG_PERF_USE_VMALLOC
  11. struct work_struct work;
  12. int page_order; /* allocation order */
  13. #endif
  14. int nr_pages; /* nr of data pages */
  15. int overwrite; /* can overwrite itself */
  16. int paused; /* can write into ring buffer */
  17. atomic_t poll; /* POLL_ for wakeups */
  18. local_t head; /* write position */
  19. local_t nest; /* nested writers */
  20. local_t events; /* event limit */
  21. local_t wakeup; /* wakeup stamp */
  22. local_t lost; /* nr records lost */
  23. long watermark; /* wakeup watermark */
  24. long aux_watermark;
  25. /* poll crap */
  26. spinlock_t event_lock;
  27. struct list_head event_list;
  28. atomic_t mmap_count;
  29. unsigned long mmap_locked;
  30. struct user_struct *mmap_user;
  31. /* AUX area */
  32. local_t aux_head;
  33. local_t aux_nest;
  34. local_t aux_wakeup;
  35. unsigned long aux_pgoff;
  36. int aux_nr_pages;
  37. int aux_overwrite;
  38. atomic_t aux_mmap_count;
  39. unsigned long aux_mmap_locked;
  40. void (*free_aux)(void *);
  41. atomic_t aux_refcount;
  42. void **aux_pages;
  43. void *aux_priv;
  44. struct perf_event_mmap_page *user_page;
  45. void *data_pages[0];
  46. };
  47. extern void rb_free(struct ring_buffer *rb);
  48. static inline void rb_free_rcu(struct rcu_head *rcu_head)
  49. {
  50. struct ring_buffer *rb;
  51. rb = container_of(rcu_head, struct ring_buffer, rcu_head);
  52. rb_free(rb);
  53. }
  54. static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
  55. {
  56. if (!pause && rb->nr_pages)
  57. rb->paused = 0;
  58. else
  59. rb->paused = 1;
  60. }
  61. extern struct ring_buffer *
  62. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  63. extern void perf_event_wakeup(struct perf_event *event);
  64. extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  65. pgoff_t pgoff, int nr_pages, long watermark, int flags);
  66. extern void rb_free_aux(struct ring_buffer *rb);
  67. extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
  68. extern void ring_buffer_put(struct ring_buffer *rb);
  69. static inline bool rb_has_aux(struct ring_buffer *rb)
  70. {
  71. return !!rb->aux_nr_pages;
  72. }
  73. void perf_event_aux_event(struct perf_event *event, unsigned long head,
  74. unsigned long size, u64 flags);
  75. extern struct page *
  76. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  77. #ifdef CONFIG_PERF_USE_VMALLOC
  78. /*
  79. * Back perf_mmap() with vmalloc memory.
  80. *
  81. * Required for architectures that have d-cache aliasing issues.
  82. */
  83. static inline int page_order(struct ring_buffer *rb)
  84. {
  85. return rb->page_order;
  86. }
  87. #else
  88. static inline int page_order(struct ring_buffer *rb)
  89. {
  90. return 0;
  91. }
  92. #endif
  93. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  94. {
  95. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  96. }
  97. static inline unsigned long perf_aux_size(struct ring_buffer *rb)
  98. {
  99. return rb->aux_nr_pages << PAGE_SHIFT;
  100. }
  101. #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
  102. { \
  103. unsigned long size, written; \
  104. \
  105. do { \
  106. size = min(handle->size, len); \
  107. written = memcpy_func(__VA_ARGS__); \
  108. written = size - written; \
  109. \
  110. len -= written; \
  111. handle->addr += written; \
  112. if (advance_buf) \
  113. buf += written; \
  114. handle->size -= written; \
  115. if (!handle->size) { \
  116. struct ring_buffer *rb = handle->rb; \
  117. \
  118. handle->page++; \
  119. handle->page &= rb->nr_pages - 1; \
  120. handle->addr = rb->data_pages[handle->page]; \
  121. handle->size = PAGE_SIZE << page_order(rb); \
  122. } \
  123. } while (len && written == size); \
  124. \
  125. return len; \
  126. }
  127. #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
  128. static inline unsigned long \
  129. func_name(struct perf_output_handle *handle, \
  130. const void *buf, unsigned long len) \
  131. __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
  132. static inline unsigned long
  133. __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
  134. const void *buf, unsigned long len)
  135. {
  136. unsigned long orig_len = len;
  137. __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
  138. orig_len - len, size)
  139. }
  140. static inline unsigned long
  141. memcpy_common(void *dst, const void *src, unsigned long n)
  142. {
  143. memcpy(dst, src, n);
  144. return 0;
  145. }
  146. DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
  147. static inline unsigned long
  148. memcpy_skip(void *dst, const void *src, unsigned long n)
  149. {
  150. return 0;
  151. }
  152. DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
  153. #ifndef arch_perf_out_copy_user
  154. #define arch_perf_out_copy_user arch_perf_out_copy_user
  155. static inline unsigned long
  156. arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
  157. {
  158. unsigned long ret;
  159. pagefault_disable();
  160. ret = __copy_from_user_inatomic(dst, src, n);
  161. pagefault_enable();
  162. return ret;
  163. }
  164. #endif
  165. DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
  166. /* Callchain handling */
  167. extern struct perf_callchain_entry *
  168. perf_callchain(struct perf_event *event, struct pt_regs *regs);
  169. static inline int get_recursion_context(int *recursion)
  170. {
  171. int rctx;
  172. if (in_nmi())
  173. rctx = 3;
  174. else if (in_irq())
  175. rctx = 2;
  176. else if (in_softirq())
  177. rctx = 1;
  178. else
  179. rctx = 0;
  180. if (recursion[rctx])
  181. return -1;
  182. recursion[rctx]++;
  183. barrier();
  184. return rctx;
  185. }
  186. static inline void put_recursion_context(int *recursion, int rctx)
  187. {
  188. barrier();
  189. recursion[rctx]--;
  190. }
  191. #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
  192. static inline bool arch_perf_have_user_stack_dump(void)
  193. {
  194. return true;
  195. }
  196. #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
  197. #else
  198. static inline bool arch_perf_have_user_stack_dump(void)
  199. {
  200. return false;
  201. }
  202. #define perf_user_stack_pointer(regs) 0
  203. #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
  204. #endif /* _KERNEL_EVENTS_INTERNAL_H */