cpu_buffer.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /**
  2. * @file cpu_buffer.h
  3. *
  4. * @remark Copyright 2002-2009 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Robert Richter <robert.richter@amd.com>
  9. */
  10. #ifndef OPROFILE_CPU_BUFFER_H
  11. #define OPROFILE_CPU_BUFFER_H
  12. #include <linux/types.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/cache.h>
  16. #include <linux/sched.h>
  17. #include <linux/ring_buffer.h>
  18. struct task_struct;
  19. int alloc_cpu_buffers(void);
  20. void free_cpu_buffers(void);
  21. void start_cpu_work(void);
  22. void end_cpu_work(void);
  23. void flush_cpu_work(void);
  24. /* CPU buffer is composed of such entries (which are
  25. * also used for context switch notes)
  26. */
  27. struct op_sample {
  28. unsigned long eip;
  29. unsigned long event;
  30. unsigned long data[0];
  31. };
  32. struct op_entry;
  33. struct oprofile_cpu_buffer {
  34. unsigned long buffer_size;
  35. struct task_struct *last_task;
  36. int last_is_kernel;
  37. int tracing;
  38. unsigned long sample_received;
  39. unsigned long sample_lost_overflow;
  40. unsigned long backtrace_aborted;
  41. unsigned long sample_invalid_eip;
  42. int cpu;
  43. struct delayed_work work;
  44. };
  45. DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
  46. /*
  47. * Resets the cpu buffer to a sane state.
  48. *
  49. * reset these to invalid values; the next sample collected will
  50. * populate the buffer with proper values to initialize the buffer
  51. */
  52. static inline void op_cpu_buffer_reset(int cpu)
  53. {
  54. struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
  55. cpu_buf->last_is_kernel = -1;
  56. cpu_buf->last_task = NULL;
  57. }
  58. /*
  59. * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
  60. * called only if op_cpu_buffer_write_reserve() did not return NULL or
  61. * entry->event != NULL, otherwise entry->size or entry->event will be
  62. * used uninitialized.
  63. */
  64. struct op_sample
  65. *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
  66. int op_cpu_buffer_write_commit(struct op_entry *entry);
  67. struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
  68. unsigned long op_cpu_buffer_entries(int cpu);
  69. /* returns the remaining free size of data in the entry */
  70. static inline
  71. int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
  72. {
  73. if (!entry->size)
  74. return 0;
  75. *entry->data = val;
  76. entry->size--;
  77. entry->data++;
  78. return entry->size;
  79. }
  80. /* returns the size of data in the entry */
  81. static inline
  82. int op_cpu_buffer_get_size(struct op_entry *entry)
  83. {
  84. return entry->size;
  85. }
  86. /* returns 0 if empty or the size of data including the current value */
  87. static inline
  88. int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
  89. {
  90. int size = entry->size;
  91. if (!size)
  92. return 0;
  93. *val = *entry->data;
  94. entry->size--;
  95. entry->data++;
  96. return size;
  97. }
  98. /* extra data flags */
  99. #define KERNEL_CTX_SWITCH (1UL << 0)
  100. #define IS_KERNEL (1UL << 1)
  101. #define TRACE_BEGIN (1UL << 2)
  102. #define USER_CTX_SWITCH (1UL << 3)
  103. #endif /* OPROFILE_CPU_BUFFER_H */