internal.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #include <linux/hardirq.h>
  4. #include <linux/uaccess.h>
  5. /* Buffer handling */
  6. #define RING_BUFFER_WRITABLE 0x01
  7. struct ring_buffer {
  8. atomic_t refcount;
  9. struct rcu_head rcu_head;
  10. #ifdef CONFIG_PERF_USE_VMALLOC
  11. struct work_struct work;
  12. int page_order; /* allocation order */
  13. #endif
  14. int nr_pages; /* nr of data pages */
  15. int writable; /* are we writable */
  16. atomic_t poll; /* POLL_ for wakeups */
  17. local_t head; /* write position */
  18. local_t nest; /* nested writers */
  19. local_t events; /* event limit */
  20. local_t wakeup; /* wakeup stamp */
  21. local_t lost; /* nr records lost */
  22. long watermark; /* wakeup watermark */
  23. long aux_watermark;
  24. /* poll crap */
  25. spinlock_t event_lock;
  26. struct list_head event_list;
  27. atomic_t mmap_count;
  28. unsigned long mmap_locked;
  29. struct user_struct *mmap_user;
  30. /* AUX area */
  31. local_t aux_wakeup;
  32. unsigned long aux_pgoff;
  33. int aux_nr_pages;
  34. int aux_overwrite;
  35. atomic_t aux_mmap_count;
  36. unsigned long aux_mmap_locked;
  37. void (*free_aux)(void *);
  38. atomic_t aux_refcount;
  39. void **aux_pages;
  40. void *aux_priv;
  41. struct perf_event_mmap_page *user_page;
  42. void *data_pages[0];
  43. };
  44. extern void rb_free(struct ring_buffer *rb);
  45. extern struct ring_buffer *
  46. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  47. extern void perf_event_wakeup(struct perf_event *event);
  48. extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  49. pgoff_t pgoff, int nr_pages, long watermark, int flags);
  50. extern void rb_free_aux(struct ring_buffer *rb);
  51. static inline bool rb_has_aux(struct ring_buffer *rb)
  52. {
  53. return !!rb->aux_nr_pages;
  54. }
  55. extern void
  56. perf_event_header__init_id(struct perf_event_header *header,
  57. struct perf_sample_data *data,
  58. struct perf_event *event);
  59. extern void
  60. perf_event__output_id_sample(struct perf_event *event,
  61. struct perf_output_handle *handle,
  62. struct perf_sample_data *sample);
  63. extern struct page *
  64. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  65. #ifdef CONFIG_PERF_USE_VMALLOC
  66. /*
  67. * Back perf_mmap() with vmalloc memory.
  68. *
  69. * Required for architectures that have d-cache aliasing issues.
  70. */
  71. static inline int page_order(struct ring_buffer *rb)
  72. {
  73. return rb->page_order;
  74. }
  75. #else
  76. static inline int page_order(struct ring_buffer *rb)
  77. {
  78. return 0;
  79. }
  80. #endif
  81. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  82. {
  83. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  84. }
  85. static inline unsigned long perf_aux_size(struct ring_buffer *rb)
  86. {
  87. return rb->aux_nr_pages << PAGE_SHIFT;
  88. }
  89. #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
  90. static inline unsigned long \
  91. func_name(struct perf_output_handle *handle, \
  92. const void *buf, unsigned long len) \
  93. { \
  94. unsigned long size, written; \
  95. \
  96. do { \
  97. size = min(handle->size, len); \
  98. written = memcpy_func(handle->addr, buf, size); \
  99. written = size - written; \
  100. \
  101. len -= written; \
  102. handle->addr += written; \
  103. buf += written; \
  104. handle->size -= written; \
  105. if (!handle->size) { \
  106. struct ring_buffer *rb = handle->rb; \
  107. \
  108. handle->page++; \
  109. handle->page &= rb->nr_pages - 1; \
  110. handle->addr = rb->data_pages[handle->page]; \
  111. handle->size = PAGE_SIZE << page_order(rb); \
  112. } \
  113. } while (len && written == size); \
  114. \
  115. return len; \
  116. }
  117. static inline unsigned long
  118. memcpy_common(void *dst, const void *src, unsigned long n)
  119. {
  120. memcpy(dst, src, n);
  121. return 0;
  122. }
  123. DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
  124. static inline unsigned long
  125. memcpy_skip(void *dst, const void *src, unsigned long n)
  126. {
  127. return 0;
  128. }
  129. DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
  130. #ifndef arch_perf_out_copy_user
  131. #define arch_perf_out_copy_user arch_perf_out_copy_user
  132. static inline unsigned long
  133. arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
  134. {
  135. unsigned long ret;
  136. pagefault_disable();
  137. ret = __copy_from_user_inatomic(dst, src, n);
  138. pagefault_enable();
  139. return ret;
  140. }
  141. #endif
  142. DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
  143. /* Callchain handling */
  144. extern struct perf_callchain_entry *
  145. perf_callchain(struct perf_event *event, struct pt_regs *regs);
  146. extern int get_callchain_buffers(void);
  147. extern void put_callchain_buffers(void);
  148. static inline int get_recursion_context(int *recursion)
  149. {
  150. int rctx;
  151. if (in_nmi())
  152. rctx = 3;
  153. else if (in_irq())
  154. rctx = 2;
  155. else if (in_softirq())
  156. rctx = 1;
  157. else
  158. rctx = 0;
  159. if (recursion[rctx])
  160. return -1;
  161. recursion[rctx]++;
  162. barrier();
  163. return rctx;
  164. }
  165. static inline void put_recursion_context(int *recursion, int rctx)
  166. {
  167. barrier();
  168. recursion[rctx]--;
  169. }
  170. #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
  171. static inline bool arch_perf_have_user_stack_dump(void)
  172. {
  173. return true;
  174. }
  175. #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
  176. #else
  177. static inline bool arch_perf_have_user_stack_dump(void)
  178. {
  179. return false;
  180. }
  181. #define perf_user_stack_pointer(regs) 0
  182. #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
  183. #endif /* _KERNEL_EVENTS_INTERNAL_H */