internal.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _KERNEL_EVENTS_INTERNAL_H
  3. #define _KERNEL_EVENTS_INTERNAL_H
  4. #include <linux/hardirq.h>
  5. #include <linux/uaccess.h>
  6. /* Buffer handling */
  7. #define RING_BUFFER_WRITABLE 0x01
  8. struct ring_buffer {
  9. atomic_t refcount;
  10. struct rcu_head rcu_head;
  11. #ifdef CONFIG_PERF_USE_VMALLOC
  12. struct work_struct work;
  13. int page_order; /* allocation order */
  14. #endif
  15. int nr_pages; /* nr of data pages */
  16. int overwrite; /* can overwrite itself */
  17. int paused; /* can write into ring buffer */
  18. atomic_t poll; /* POLL_ for wakeups */
  19. local_t head; /* write position */
  20. local_t nest; /* nested writers */
  21. local_t events; /* event limit */
  22. local_t wakeup; /* wakeup stamp */
  23. local_t lost; /* nr records lost */
  24. long watermark; /* wakeup watermark */
  25. long aux_watermark;
  26. /* poll crap */
  27. spinlock_t event_lock;
  28. struct list_head event_list;
  29. atomic_t mmap_count;
  30. unsigned long mmap_locked;
  31. struct user_struct *mmap_user;
  32. /* AUX area */
  33. long aux_head;
  34. local_t aux_nest;
  35. long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
  36. unsigned long aux_pgoff;
  37. int aux_nr_pages;
  38. int aux_overwrite;
  39. atomic_t aux_mmap_count;
  40. unsigned long aux_mmap_locked;
  41. void (*free_aux)(void *);
  42. atomic_t aux_refcount;
  43. void **aux_pages;
  44. void *aux_priv;
  45. struct perf_event_mmap_page *user_page;
  46. void *data_pages[0];
  47. };
  48. extern void rb_free(struct ring_buffer *rb);
  49. static inline void rb_free_rcu(struct rcu_head *rcu_head)
  50. {
  51. struct ring_buffer *rb;
  52. rb = container_of(rcu_head, struct ring_buffer, rcu_head);
  53. rb_free(rb);
  54. }
  55. static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
  56. {
  57. if (!pause && rb->nr_pages)
  58. rb->paused = 0;
  59. else
  60. rb->paused = 1;
  61. }
  62. extern struct ring_buffer *
  63. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  64. extern void perf_event_wakeup(struct perf_event *event);
  65. extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  66. pgoff_t pgoff, int nr_pages, long watermark, int flags);
  67. extern void rb_free_aux(struct ring_buffer *rb);
  68. extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
  69. extern void ring_buffer_put(struct ring_buffer *rb);
  70. static inline bool rb_has_aux(struct ring_buffer *rb)
  71. {
  72. return !!rb->aux_nr_pages;
  73. }
  74. void perf_event_aux_event(struct perf_event *event, unsigned long head,
  75. unsigned long size, u64 flags);
  76. extern struct page *
  77. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  78. #ifdef CONFIG_PERF_USE_VMALLOC
  79. /*
  80. * Back perf_mmap() with vmalloc memory.
  81. *
  82. * Required for architectures that have d-cache aliasing issues.
  83. */
  84. static inline int page_order(struct ring_buffer *rb)
  85. {
  86. return rb->page_order;
  87. }
  88. #else
  89. static inline int page_order(struct ring_buffer *rb)
  90. {
  91. return 0;
  92. }
  93. #endif
  94. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  95. {
  96. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  97. }
  98. static inline unsigned long perf_aux_size(struct ring_buffer *rb)
  99. {
  100. return rb->aux_nr_pages << PAGE_SHIFT;
  101. }
  102. #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
  103. { \
  104. unsigned long size, written; \
  105. \
  106. do { \
  107. size = min(handle->size, len); \
  108. written = memcpy_func(__VA_ARGS__); \
  109. written = size - written; \
  110. \
  111. len -= written; \
  112. handle->addr += written; \
  113. if (advance_buf) \
  114. buf += written; \
  115. handle->size -= written; \
  116. if (!handle->size) { \
  117. struct ring_buffer *rb = handle->rb; \
  118. \
  119. handle->page++; \
  120. handle->page &= rb->nr_pages - 1; \
  121. handle->addr = rb->data_pages[handle->page]; \
  122. handle->size = PAGE_SIZE << page_order(rb); \
  123. } \
  124. } while (len && written == size); \
  125. \
  126. return len; \
  127. }
  128. #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
  129. static inline unsigned long \
  130. func_name(struct perf_output_handle *handle, \
  131. const void *buf, unsigned long len) \
  132. __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
  133. static inline unsigned long
  134. __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
  135. const void *buf, unsigned long len)
  136. {
  137. unsigned long orig_len = len;
  138. __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
  139. orig_len - len, size)
  140. }
  141. static inline unsigned long
  142. memcpy_common(void *dst, const void *src, unsigned long n)
  143. {
  144. memcpy(dst, src, n);
  145. return 0;
  146. }
  147. DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
  148. static inline unsigned long
  149. memcpy_skip(void *dst, const void *src, unsigned long n)
  150. {
  151. return 0;
  152. }
  153. DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
  154. #ifndef arch_perf_out_copy_user
  155. #define arch_perf_out_copy_user arch_perf_out_copy_user
  156. static inline unsigned long
  157. arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
  158. {
  159. unsigned long ret;
  160. pagefault_disable();
  161. ret = __copy_from_user_inatomic(dst, src, n);
  162. pagefault_enable();
  163. return ret;
  164. }
  165. #endif
  166. DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
  167. /* Callchain handling */
  168. extern struct perf_callchain_entry *
  169. perf_callchain(struct perf_event *event, struct pt_regs *regs);
  170. static inline int get_recursion_context(int *recursion)
  171. {
  172. int rctx;
  173. if (unlikely(in_nmi()))
  174. rctx = 3;
  175. else if (in_irq())
  176. rctx = 2;
  177. else if (in_serving_softirq())
  178. rctx = 1;
  179. else
  180. rctx = 0;
  181. if (recursion[rctx])
  182. return -1;
  183. recursion[rctx]++;
  184. barrier();
  185. return rctx;
  186. }
  187. static inline void put_recursion_context(int *recursion, int rctx)
  188. {
  189. barrier();
  190. recursion[rctx]--;
  191. }
  192. #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
  193. static inline bool arch_perf_have_user_stack_dump(void)
  194. {
  195. return true;
  196. }
  197. #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
  198. #else
  199. static inline bool arch_perf_have_user_stack_dump(void)
  200. {
  201. return false;
  202. }
  203. #define perf_user_stack_pointer(regs) 0
  204. #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
  205. #endif /* _KERNEL_EVENTS_INTERNAL_H */