openat-syscall-all-cpus.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /* For the CPU_* macros */
  2. #include <pthread.h>
  3. #include <api/fs/fs.h>
  4. #include <linux/err.h>
  5. #include "evsel.h"
  6. #include "tests.h"
  7. #include "thread_map.h"
  8. #include "cpumap.h"
  9. #include "debug.h"
  10. #include "stat.h"
  11. int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
  12. {
  13. int err = -1, fd, cpu;
  14. struct cpu_map *cpus;
  15. struct perf_evsel *evsel;
  16. unsigned int nr_openat_calls = 111, i;
  17. cpu_set_t cpu_set;
  18. struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
  19. char sbuf[STRERR_BUFSIZE];
  20. char errbuf[BUFSIZ];
  21. if (threads == NULL) {
  22. pr_debug("thread_map__new\n");
  23. return -1;
  24. }
  25. cpus = cpu_map__new(NULL);
  26. if (cpus == NULL) {
  27. pr_debug("cpu_map__new\n");
  28. goto out_thread_map_delete;
  29. }
  30. CPU_ZERO(&cpu_set);
  31. evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
  32. if (IS_ERR(evsel)) {
  33. tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
  34. pr_debug("%s\n", errbuf);
  35. goto out_thread_map_delete;
  36. }
  37. if (perf_evsel__open(evsel, cpus, threads) < 0) {
  38. pr_debug("failed to open counter: %s, "
  39. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  40. str_error_r(errno, sbuf, sizeof(sbuf)));
  41. goto out_evsel_delete;
  42. }
  43. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  44. unsigned int ncalls = nr_openat_calls + cpu;
  45. /*
  46. * XXX eventually lift this restriction in a way that
  47. * keeps perf building on older glibc installations
  48. * without CPU_ALLOC. 1024 cpus in 2010 still seems
  49. * a reasonable upper limit tho :-)
  50. */
  51. if (cpus->map[cpu] >= CPU_SETSIZE) {
  52. pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
  53. continue;
  54. }
  55. CPU_SET(cpus->map[cpu], &cpu_set);
  56. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  57. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  58. cpus->map[cpu],
  59. str_error_r(errno, sbuf, sizeof(sbuf)));
  60. goto out_close_fd;
  61. }
  62. for (i = 0; i < ncalls; ++i) {
  63. fd = openat(0, "/etc/passwd", O_RDONLY);
  64. close(fd);
  65. }
  66. CPU_CLR(cpus->map[cpu], &cpu_set);
  67. }
  68. /*
  69. * Here we need to explicitly preallocate the counts, as if
  70. * we use the auto allocation it will allocate just for 1 cpu,
  71. * as we start by cpu 0.
  72. */
  73. if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
  74. pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  75. goto out_close_fd;
  76. }
  77. err = 0;
  78. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  79. unsigned int expected;
  80. if (cpus->map[cpu] >= CPU_SETSIZE)
  81. continue;
  82. if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  83. pr_debug("perf_evsel__read_on_cpu\n");
  84. err = -1;
  85. break;
  86. }
  87. expected = nr_openat_calls + cpu;
  88. if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
  89. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
  90. expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
  91. err = -1;
  92. }
  93. }
  94. perf_evsel__free_counts(evsel);
  95. out_close_fd:
  96. perf_evsel__close_fd(evsel, 1, threads->nr);
  97. out_evsel_delete:
  98. perf_evsel__delete(evsel);
  99. out_thread_map_delete:
  100. thread_map__put(threads);
  101. return err;
  102. }