ring_buffer_benchmark.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. /*
  2. * ring buffer tester and benchmark
  3. *
  4. * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/completion.h>
  8. #include <linux/kthread.h>
  9. #include <uapi/linux/sched/types.h>
  10. #include <linux/module.h>
  11. #include <linux/ktime.h>
  12. #include <asm/local.h>
  13. struct rb_page {
  14. u64 ts;
  15. local_t commit;
  16. char data[4080];
  17. };
  18. /* run time and sleep time in seconds */
  19. #define RUN_TIME 10ULL
  20. #define SLEEP_TIME 10
  21. /* number of events for writer to wake up the reader */
  22. static int wakeup_interval = 100;
  23. static int reader_finish;
  24. static DECLARE_COMPLETION(read_start);
  25. static DECLARE_COMPLETION(read_done);
  26. static struct ring_buffer *buffer;
  27. static struct task_struct *producer;
  28. static struct task_struct *consumer;
  29. static unsigned long read;
  30. static unsigned int disable_reader;
  31. module_param(disable_reader, uint, 0644);
  32. MODULE_PARM_DESC(disable_reader, "only run producer");
  33. static unsigned int write_iteration = 50;
  34. module_param(write_iteration, uint, 0644);
  35. MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
  36. static int producer_nice = MAX_NICE;
  37. static int consumer_nice = MAX_NICE;
  38. static int producer_fifo = -1;
  39. static int consumer_fifo = -1;
  40. module_param(producer_nice, int, 0644);
  41. MODULE_PARM_DESC(producer_nice, "nice prio for producer");
  42. module_param(consumer_nice, int, 0644);
  43. MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
  44. module_param(producer_fifo, int, 0644);
  45. MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
  46. module_param(consumer_fifo, int, 0644);
  47. MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
  48. static int read_events;
  49. static int test_error;
  50. #define TEST_ERROR() \
  51. do { \
  52. if (!test_error) { \
  53. test_error = 1; \
  54. WARN_ON(1); \
  55. } \
  56. } while (0)
  57. enum event_status {
  58. EVENT_FOUND,
  59. EVENT_DROPPED,
  60. };
  61. static bool break_test(void)
  62. {
  63. return test_error || kthread_should_stop();
  64. }
  65. static enum event_status read_event(int cpu)
  66. {
  67. struct ring_buffer_event *event;
  68. int *entry;
  69. u64 ts;
  70. event = ring_buffer_consume(buffer, cpu, &ts, NULL);
  71. if (!event)
  72. return EVENT_DROPPED;
  73. entry = ring_buffer_event_data(event);
  74. if (*entry != cpu) {
  75. TEST_ERROR();
  76. return EVENT_DROPPED;
  77. }
  78. read++;
  79. return EVENT_FOUND;
  80. }
  81. static enum event_status read_page(int cpu)
  82. {
  83. struct ring_buffer_event *event;
  84. struct rb_page *rpage;
  85. unsigned long commit;
  86. void *bpage;
  87. int *entry;
  88. int ret;
  89. int inc;
  90. int i;
  91. bpage = ring_buffer_alloc_read_page(buffer, cpu);
  92. if (IS_ERR(bpage))
  93. return EVENT_DROPPED;
  94. ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
  95. if (ret >= 0) {
  96. rpage = bpage;
  97. /* The commit may have missed event flags set, clear them */
  98. commit = local_read(&rpage->commit) & 0xfffff;
  99. for (i = 0; i < commit && !test_error ; i += inc) {
  100. if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
  101. TEST_ERROR();
  102. break;
  103. }
  104. inc = -1;
  105. event = (void *)&rpage->data[i];
  106. switch (event->type_len) {
  107. case RINGBUF_TYPE_PADDING:
  108. /* failed writes may be discarded events */
  109. if (!event->time_delta)
  110. TEST_ERROR();
  111. inc = event->array[0] + 4;
  112. break;
  113. case RINGBUF_TYPE_TIME_EXTEND:
  114. inc = 8;
  115. break;
  116. case 0:
  117. entry = ring_buffer_event_data(event);
  118. if (*entry != cpu) {
  119. TEST_ERROR();
  120. break;
  121. }
  122. read++;
  123. if (!event->array[0]) {
  124. TEST_ERROR();
  125. break;
  126. }
  127. inc = event->array[0] + 4;
  128. break;
  129. default:
  130. entry = ring_buffer_event_data(event);
  131. if (*entry != cpu) {
  132. TEST_ERROR();
  133. break;
  134. }
  135. read++;
  136. inc = ((event->type_len + 1) * 4);
  137. }
  138. if (test_error)
  139. break;
  140. if (inc <= 0) {
  141. TEST_ERROR();
  142. break;
  143. }
  144. }
  145. }
  146. ring_buffer_free_read_page(buffer, cpu, bpage);
  147. if (ret < 0)
  148. return EVENT_DROPPED;
  149. return EVENT_FOUND;
  150. }
  151. static void ring_buffer_consumer(void)
  152. {
  153. /* toggle between reading pages and events */
  154. read_events ^= 1;
  155. read = 0;
  156. /*
  157. * Continue running until the producer specifically asks to stop
  158. * and is ready for the completion.
  159. */
  160. while (!READ_ONCE(reader_finish)) {
  161. int found = 1;
  162. while (found && !test_error) {
  163. int cpu;
  164. found = 0;
  165. for_each_online_cpu(cpu) {
  166. enum event_status stat;
  167. if (read_events)
  168. stat = read_event(cpu);
  169. else
  170. stat = read_page(cpu);
  171. if (test_error)
  172. break;
  173. if (stat == EVENT_FOUND)
  174. found = 1;
  175. }
  176. }
  177. /* Wait till the producer wakes us up when there is more data
  178. * available or when the producer wants us to finish reading.
  179. */
  180. set_current_state(TASK_INTERRUPTIBLE);
  181. if (reader_finish)
  182. break;
  183. schedule();
  184. }
  185. __set_current_state(TASK_RUNNING);
  186. reader_finish = 0;
  187. complete(&read_done);
  188. }
  189. static void ring_buffer_producer(void)
  190. {
  191. ktime_t start_time, end_time, timeout;
  192. unsigned long long time;
  193. unsigned long long entries;
  194. unsigned long long overruns;
  195. unsigned long missed = 0;
  196. unsigned long hit = 0;
  197. unsigned long avg;
  198. int cnt = 0;
  199. /*
  200. * Hammer the buffer for 10 secs (this may
  201. * make the system stall)
  202. */
  203. trace_printk("Starting ring buffer hammer\n");
  204. start_time = ktime_get();
  205. timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
  206. do {
  207. struct ring_buffer_event *event;
  208. int *entry;
  209. int i;
  210. for (i = 0; i < write_iteration; i++) {
  211. event = ring_buffer_lock_reserve(buffer, 10);
  212. if (!event) {
  213. missed++;
  214. } else {
  215. hit++;
  216. entry = ring_buffer_event_data(event);
  217. *entry = smp_processor_id();
  218. ring_buffer_unlock_commit(buffer, event);
  219. }
  220. }
  221. end_time = ktime_get();
  222. cnt++;
  223. if (consumer && !(cnt % wakeup_interval))
  224. wake_up_process(consumer);
  225. #ifndef CONFIG_PREEMPT
  226. /*
  227. * If we are a non preempt kernel, the 10 second run will
  228. * stop everything while it runs. Instead, we will call
  229. * cond_resched and also add any time that was lost by a
  230. * rescedule.
  231. *
  232. * Do a cond resched at the same frequency we would wake up
  233. * the reader.
  234. */
  235. if (cnt % wakeup_interval)
  236. cond_resched();
  237. #endif
  238. } while (ktime_before(end_time, timeout) && !break_test());
  239. trace_printk("End ring buffer hammer\n");
  240. if (consumer) {
  241. /* Init both completions here to avoid races */
  242. init_completion(&read_start);
  243. init_completion(&read_done);
  244. /* the completions must be visible before the finish var */
  245. smp_wmb();
  246. reader_finish = 1;
  247. wake_up_process(consumer);
  248. wait_for_completion(&read_done);
  249. }
  250. time = ktime_us_delta(end_time, start_time);
  251. entries = ring_buffer_entries(buffer);
  252. overruns = ring_buffer_overruns(buffer);
  253. if (test_error)
  254. trace_printk("ERROR!\n");
  255. if (!disable_reader) {
  256. if (consumer_fifo < 0)
  257. trace_printk("Running Consumer at nice: %d\n",
  258. consumer_nice);
  259. else
  260. trace_printk("Running Consumer at SCHED_FIFO %d\n",
  261. consumer_fifo);
  262. }
  263. if (producer_fifo < 0)
  264. trace_printk("Running Producer at nice: %d\n",
  265. producer_nice);
  266. else
  267. trace_printk("Running Producer at SCHED_FIFO %d\n",
  268. producer_fifo);
  269. /* Let the user know that the test is running at low priority */
  270. if (producer_fifo < 0 && consumer_fifo < 0 &&
  271. producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
  272. trace_printk("WARNING!!! This test is running at lowest priority.\n");
  273. trace_printk("Time: %lld (usecs)\n", time);
  274. trace_printk("Overruns: %lld\n", overruns);
  275. if (disable_reader)
  276. trace_printk("Read: (reader disabled)\n");
  277. else
  278. trace_printk("Read: %ld (by %s)\n", read,
  279. read_events ? "events" : "pages");
  280. trace_printk("Entries: %lld\n", entries);
  281. trace_printk("Total: %lld\n", entries + overruns + read);
  282. trace_printk("Missed: %ld\n", missed);
  283. trace_printk("Hit: %ld\n", hit);
  284. /* Convert time from usecs to millisecs */
  285. do_div(time, USEC_PER_MSEC);
  286. if (time)
  287. hit /= (long)time;
  288. else
  289. trace_printk("TIME IS ZERO??\n");
  290. trace_printk("Entries per millisec: %ld\n", hit);
  291. if (hit) {
  292. /* Calculate the average time in nanosecs */
  293. avg = NSEC_PER_MSEC / hit;
  294. trace_printk("%ld ns per entry\n", avg);
  295. }
  296. if (missed) {
  297. if (time)
  298. missed /= (long)time;
  299. trace_printk("Total iterations per millisec: %ld\n",
  300. hit + missed);
  301. /* it is possible that hit + missed will overflow and be zero */
  302. if (!(hit + missed)) {
  303. trace_printk("hit + missed overflowed and totalled zero!\n");
  304. hit--; /* make it non zero */
  305. }
  306. /* Caculate the average time in nanosecs */
  307. avg = NSEC_PER_MSEC / (hit + missed);
  308. trace_printk("%ld ns per entry\n", avg);
  309. }
  310. }
  311. static void wait_to_die(void)
  312. {
  313. set_current_state(TASK_INTERRUPTIBLE);
  314. while (!kthread_should_stop()) {
  315. schedule();
  316. set_current_state(TASK_INTERRUPTIBLE);
  317. }
  318. __set_current_state(TASK_RUNNING);
  319. }
  320. static int ring_buffer_consumer_thread(void *arg)
  321. {
  322. while (!break_test()) {
  323. complete(&read_start);
  324. ring_buffer_consumer();
  325. set_current_state(TASK_INTERRUPTIBLE);
  326. if (break_test())
  327. break;
  328. schedule();
  329. }
  330. __set_current_state(TASK_RUNNING);
  331. if (!kthread_should_stop())
  332. wait_to_die();
  333. return 0;
  334. }
  335. static int ring_buffer_producer_thread(void *arg)
  336. {
  337. while (!break_test()) {
  338. ring_buffer_reset(buffer);
  339. if (consumer) {
  340. wake_up_process(consumer);
  341. wait_for_completion(&read_start);
  342. }
  343. ring_buffer_producer();
  344. if (break_test())
  345. goto out_kill;
  346. trace_printk("Sleeping for 10 secs\n");
  347. set_current_state(TASK_INTERRUPTIBLE);
  348. if (break_test())
  349. goto out_kill;
  350. schedule_timeout(HZ * SLEEP_TIME);
  351. }
  352. out_kill:
  353. __set_current_state(TASK_RUNNING);
  354. if (!kthread_should_stop())
  355. wait_to_die();
  356. return 0;
  357. }
  358. static int __init ring_buffer_benchmark_init(void)
  359. {
  360. int ret;
  361. /* make a one meg buffer in overwite mode */
  362. buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
  363. if (!buffer)
  364. return -ENOMEM;
  365. if (!disable_reader) {
  366. consumer = kthread_create(ring_buffer_consumer_thread,
  367. NULL, "rb_consumer");
  368. ret = PTR_ERR(consumer);
  369. if (IS_ERR(consumer))
  370. goto out_fail;
  371. }
  372. producer = kthread_run(ring_buffer_producer_thread,
  373. NULL, "rb_producer");
  374. ret = PTR_ERR(producer);
  375. if (IS_ERR(producer))
  376. goto out_kill;
  377. /*
  378. * Run them as low-prio background tasks by default:
  379. */
  380. if (!disable_reader) {
  381. if (consumer_fifo >= 0) {
  382. struct sched_param param = {
  383. .sched_priority = consumer_fifo
  384. };
  385. sched_setscheduler(consumer, SCHED_FIFO, &param);
  386. } else
  387. set_user_nice(consumer, consumer_nice);
  388. }
  389. if (producer_fifo >= 0) {
  390. struct sched_param param = {
  391. .sched_priority = producer_fifo
  392. };
  393. sched_setscheduler(producer, SCHED_FIFO, &param);
  394. } else
  395. set_user_nice(producer, producer_nice);
  396. return 0;
  397. out_kill:
  398. if (consumer)
  399. kthread_stop(consumer);
  400. out_fail:
  401. ring_buffer_free(buffer);
  402. return ret;
  403. }
  404. static void __exit ring_buffer_benchmark_exit(void)
  405. {
  406. kthread_stop(producer);
  407. if (consumer)
  408. kthread_stop(consumer);
  409. ring_buffer_free(buffer);
  410. }
  411. module_init(ring_buffer_benchmark_init);
  412. module_exit(ring_buffer_benchmark_exit);
  413. MODULE_AUTHOR("Steven Rostedt");
  414. MODULE_DESCRIPTION("ring_buffer_benchmark");
  415. MODULE_LICENSE("GPL");