internal.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #include <linux/hardirq.h>
  4. /* Buffer handling */
  5. #define RING_BUFFER_WRITABLE 0x01
  6. struct ring_buffer {
  7. atomic_t refcount;
  8. struct rcu_head rcu_head;
  9. #ifdef CONFIG_PERF_USE_VMALLOC
  10. struct work_struct work;
  11. int page_order; /* allocation order */
  12. #endif
  13. int nr_pages; /* nr of data pages */
  14. int writable; /* are we writable */
  15. atomic_t poll; /* POLL_ for wakeups */
  16. local_t head; /* write position */
  17. local_t nest; /* nested writers */
  18. local_t events; /* event limit */
  19. local_t wakeup; /* wakeup stamp */
  20. local_t lost; /* nr records lost */
  21. long watermark; /* wakeup watermark */
  22. /* poll crap */
  23. spinlock_t event_lock;
  24. struct list_head event_list;
  25. atomic_t mmap_count;
  26. unsigned long mmap_locked;
  27. struct user_struct *mmap_user;
  28. struct perf_event_mmap_page *user_page;
  29. void *data_pages[0];
  30. };
  31. extern void rb_free(struct ring_buffer *rb);
  32. extern struct ring_buffer *
  33. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  34. extern void perf_event_wakeup(struct perf_event *event);
  35. extern void
  36. perf_event_header__init_id(struct perf_event_header *header,
  37. struct perf_sample_data *data,
  38. struct perf_event *event);
  39. extern void
  40. perf_event__output_id_sample(struct perf_event *event,
  41. struct perf_output_handle *handle,
  42. struct perf_sample_data *sample);
  43. extern struct page *
  44. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  45. #ifdef CONFIG_PERF_USE_VMALLOC
  46. /*
  47. * Back perf_mmap() with vmalloc memory.
  48. *
  49. * Required for architectures that have d-cache aliasing issues.
  50. */
  51. static inline int page_order(struct ring_buffer *rb)
  52. {
  53. return rb->page_order;
  54. }
  55. #else
  56. static inline int page_order(struct ring_buffer *rb)
  57. {
  58. return 0;
  59. }
  60. #endif
  61. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  62. {
  63. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  64. }
  65. static inline void
  66. __output_copy(struct perf_output_handle *handle,
  67. const void *buf, unsigned int len)
  68. {
  69. do {
  70. unsigned long size = min_t(unsigned long, handle->size, len);
  71. memcpy(handle->addr, buf, size);
  72. len -= size;
  73. handle->addr += size;
  74. buf += size;
  75. handle->size -= size;
  76. if (!handle->size) {
  77. struct ring_buffer *rb = handle->rb;
  78. handle->page++;
  79. handle->page &= rb->nr_pages - 1;
  80. handle->addr = rb->data_pages[handle->page];
  81. handle->size = PAGE_SIZE << page_order(rb);
  82. }
  83. } while (len);
  84. }
  85. /* Callchain handling */
  86. extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
  87. extern int get_callchain_buffers(void);
  88. extern void put_callchain_buffers(void);
  89. static inline int get_recursion_context(int *recursion)
  90. {
  91. int rctx;
  92. if (in_nmi())
  93. rctx = 3;
  94. else if (in_irq())
  95. rctx = 2;
  96. else if (in_softirq())
  97. rctx = 1;
  98. else
  99. rctx = 0;
  100. if (recursion[rctx])
  101. return -1;
  102. recursion[rctx]++;
  103. barrier();
  104. return rctx;
  105. }
  106. static inline void put_recursion_context(int *recursion, int rctx)
  107. {
  108. barrier();
  109. recursion[rctx]--;
  110. }
  111. #endif /* _KERNEL_EVENTS_INTERNAL_H */