ring_buffer_benchmark.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. /*
  2. * ring buffer tester and benchmark
  3. *
  4. * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/completion.h>
  8. #include <linux/kthread.h>
  9. #include <linux/module.h>
  10. #include <linux/time.h>
  11. #include <asm/local.h>
  12. struct rb_page {
  13. u64 ts;
  14. local_t commit;
  15. char data[4080];
  16. };
  17. /* run time and sleep time in seconds */
  18. #define RUN_TIME 10
  19. #define SLEEP_TIME 10
  20. /* number of events for writer to wake up the reader */
  21. static int wakeup_interval = 100;
  22. static int reader_finish;
  23. static struct completion read_start;
  24. static struct completion read_done;
  25. static struct ring_buffer *buffer;
  26. static struct task_struct *producer;
  27. static struct task_struct *consumer;
  28. static unsigned long read;
  29. static int disable_reader;
  30. module_param(disable_reader, uint, 0644);
  31. MODULE_PARM_DESC(disable_reader, "only run producer");
  32. static int write_iteration = 50;
  33. module_param(write_iteration, uint, 0644);
  34. MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
  35. static int producer_nice = 19;
  36. static int consumer_nice = 19;
  37. static int producer_fifo = -1;
  38. static int consumer_fifo = -1;
  39. module_param(producer_nice, uint, 0644);
  40. MODULE_PARM_DESC(producer_nice, "nice prio for producer");
  41. module_param(consumer_nice, uint, 0644);
  42. MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
  43. module_param(producer_fifo, uint, 0644);
  44. MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
  45. module_param(consumer_fifo, uint, 0644);
  46. MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
  47. static int read_events;
  48. static int kill_test;
  49. #define KILL_TEST() \
  50. do { \
  51. if (!kill_test) { \
  52. kill_test = 1; \
  53. WARN_ON(1); \
  54. } \
  55. } while (0)
  56. enum event_status {
  57. EVENT_FOUND,
  58. EVENT_DROPPED,
  59. };
  60. static enum event_status read_event(int cpu)
  61. {
  62. struct ring_buffer_event *event;
  63. int *entry;
  64. u64 ts;
  65. event = ring_buffer_consume(buffer, cpu, &ts, NULL);
  66. if (!event)
  67. return EVENT_DROPPED;
  68. entry = ring_buffer_event_data(event);
  69. if (*entry != cpu) {
  70. KILL_TEST();
  71. return EVENT_DROPPED;
  72. }
  73. read++;
  74. return EVENT_FOUND;
  75. }
  76. static enum event_status read_page(int cpu)
  77. {
  78. struct ring_buffer_event *event;
  79. struct rb_page *rpage;
  80. unsigned long commit;
  81. void *bpage;
  82. int *entry;
  83. int ret;
  84. int inc;
  85. int i;
  86. bpage = ring_buffer_alloc_read_page(buffer);
  87. if (!bpage)
  88. return EVENT_DROPPED;
  89. ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
  90. if (ret >= 0) {
  91. rpage = bpage;
  92. /* The commit may have missed event flags set, clear them */
  93. commit = local_read(&rpage->commit) & 0xfffff;
  94. for (i = 0; i < commit && !kill_test; i += inc) {
  95. if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
  96. KILL_TEST();
  97. break;
  98. }
  99. inc = -1;
  100. event = (void *)&rpage->data[i];
  101. switch (event->type_len) {
  102. case RINGBUF_TYPE_PADDING:
  103. /* failed writes may be discarded events */
  104. if (!event->time_delta)
  105. KILL_TEST();
  106. inc = event->array[0] + 4;
  107. break;
  108. case RINGBUF_TYPE_TIME_EXTEND:
  109. inc = 8;
  110. break;
  111. case 0:
  112. entry = ring_buffer_event_data(event);
  113. if (*entry != cpu) {
  114. KILL_TEST();
  115. break;
  116. }
  117. read++;
  118. if (!event->array[0]) {
  119. KILL_TEST();
  120. break;
  121. }
  122. inc = event->array[0] + 4;
  123. break;
  124. default:
  125. entry = ring_buffer_event_data(event);
  126. if (*entry != cpu) {
  127. KILL_TEST();
  128. break;
  129. }
  130. read++;
  131. inc = ((event->type_len + 1) * 4);
  132. }
  133. if (kill_test)
  134. break;
  135. if (inc <= 0) {
  136. KILL_TEST();
  137. break;
  138. }
  139. }
  140. }
  141. ring_buffer_free_read_page(buffer, bpage);
  142. if (ret < 0)
  143. return EVENT_DROPPED;
  144. return EVENT_FOUND;
  145. }
  146. static void ring_buffer_consumer(void)
  147. {
  148. /* toggle between reading pages and events */
  149. read_events ^= 1;
  150. read = 0;
  151. while (!reader_finish && !kill_test) {
  152. int found;
  153. do {
  154. int cpu;
  155. found = 0;
  156. for_each_online_cpu(cpu) {
  157. enum event_status stat;
  158. if (read_events)
  159. stat = read_event(cpu);
  160. else
  161. stat = read_page(cpu);
  162. if (kill_test)
  163. break;
  164. if (stat == EVENT_FOUND)
  165. found = 1;
  166. }
  167. } while (found && !kill_test);
  168. set_current_state(TASK_INTERRUPTIBLE);
  169. if (reader_finish)
  170. break;
  171. schedule();
  172. __set_current_state(TASK_RUNNING);
  173. }
  174. reader_finish = 0;
  175. complete(&read_done);
  176. }
  177. static void ring_buffer_producer(void)
  178. {
  179. struct timeval start_tv;
  180. struct timeval end_tv;
  181. unsigned long long time;
  182. unsigned long long entries;
  183. unsigned long long overruns;
  184. unsigned long missed = 0;
  185. unsigned long hit = 0;
  186. unsigned long avg;
  187. int cnt = 0;
  188. /*
  189. * Hammer the buffer for 10 secs (this may
  190. * make the system stall)
  191. */
  192. trace_printk("Starting ring buffer hammer\n");
  193. do_gettimeofday(&start_tv);
  194. do {
  195. struct ring_buffer_event *event;
  196. int *entry;
  197. int i;
  198. for (i = 0; i < write_iteration; i++) {
  199. event = ring_buffer_lock_reserve(buffer, 10);
  200. if (!event) {
  201. missed++;
  202. } else {
  203. hit++;
  204. entry = ring_buffer_event_data(event);
  205. *entry = smp_processor_id();
  206. ring_buffer_unlock_commit(buffer, event);
  207. }
  208. }
  209. do_gettimeofday(&end_tv);
  210. cnt++;
  211. if (consumer && !(cnt % wakeup_interval))
  212. wake_up_process(consumer);
  213. #ifndef CONFIG_PREEMPT
  214. /*
  215. * If we are a non preempt kernel, the 10 second run will
  216. * stop everything while it runs. Instead, we will call
  217. * cond_resched and also add any time that was lost by a
  218. * rescedule.
  219. *
  220. * Do a cond resched at the same frequency we would wake up
  221. * the reader.
  222. */
  223. if (cnt % wakeup_interval)
  224. cond_resched();
  225. #endif
  226. } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
  227. trace_printk("End ring buffer hammer\n");
  228. if (consumer) {
  229. /* Init both completions here to avoid races */
  230. init_completion(&read_start);
  231. init_completion(&read_done);
  232. /* the completions must be visible before the finish var */
  233. smp_wmb();
  234. reader_finish = 1;
  235. /* finish var visible before waking up the consumer */
  236. smp_wmb();
  237. wake_up_process(consumer);
  238. wait_for_completion(&read_done);
  239. }
  240. time = end_tv.tv_sec - start_tv.tv_sec;
  241. time *= USEC_PER_SEC;
  242. time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec);
  243. entries = ring_buffer_entries(buffer);
  244. overruns = ring_buffer_overruns(buffer);
  245. if (kill_test)
  246. trace_printk("ERROR!\n");
  247. if (!disable_reader) {
  248. if (consumer_fifo < 0)
  249. trace_printk("Running Consumer at nice: %d\n",
  250. consumer_nice);
  251. else
  252. trace_printk("Running Consumer at SCHED_FIFO %d\n",
  253. consumer_fifo);
  254. }
  255. if (producer_fifo < 0)
  256. trace_printk("Running Producer at nice: %d\n",
  257. producer_nice);
  258. else
  259. trace_printk("Running Producer at SCHED_FIFO %d\n",
  260. producer_fifo);
  261. /* Let the user know that the test is running at low priority */
  262. if (producer_fifo < 0 && consumer_fifo < 0 &&
  263. producer_nice == 19 && consumer_nice == 19)
  264. trace_printk("WARNING!!! This test is running at lowest priority.\n");
  265. trace_printk("Time: %lld (usecs)\n", time);
  266. trace_printk("Overruns: %lld\n", overruns);
  267. if (disable_reader)
  268. trace_printk("Read: (reader disabled)\n");
  269. else
  270. trace_printk("Read: %ld (by %s)\n", read,
  271. read_events ? "events" : "pages");
  272. trace_printk("Entries: %lld\n", entries);
  273. trace_printk("Total: %lld\n", entries + overruns + read);
  274. trace_printk("Missed: %ld\n", missed);
  275. trace_printk("Hit: %ld\n", hit);
  276. /* Convert time from usecs to millisecs */
  277. do_div(time, USEC_PER_MSEC);
  278. if (time)
  279. hit /= (long)time;
  280. else
  281. trace_printk("TIME IS ZERO??\n");
  282. trace_printk("Entries per millisec: %ld\n", hit);
  283. if (hit) {
  284. /* Calculate the average time in nanosecs */
  285. avg = NSEC_PER_MSEC / hit;
  286. trace_printk("%ld ns per entry\n", avg);
  287. }
  288. if (missed) {
  289. if (time)
  290. missed /= (long)time;
  291. trace_printk("Total iterations per millisec: %ld\n",
  292. hit + missed);
  293. /* it is possible that hit + missed will overflow and be zero */
  294. if (!(hit + missed)) {
  295. trace_printk("hit + missed overflowed and totalled zero!\n");
  296. hit--; /* make it non zero */
  297. }
  298. /* Caculate the average time in nanosecs */
  299. avg = NSEC_PER_MSEC / (hit + missed);
  300. trace_printk("%ld ns per entry\n", avg);
  301. }
  302. }
  303. static void wait_to_die(void)
  304. {
  305. set_current_state(TASK_INTERRUPTIBLE);
  306. while (!kthread_should_stop()) {
  307. schedule();
  308. set_current_state(TASK_INTERRUPTIBLE);
  309. }
  310. __set_current_state(TASK_RUNNING);
  311. }
  312. static int ring_buffer_consumer_thread(void *arg)
  313. {
  314. while (!kthread_should_stop() && !kill_test) {
  315. complete(&read_start);
  316. ring_buffer_consumer();
  317. set_current_state(TASK_INTERRUPTIBLE);
  318. if (kthread_should_stop() || kill_test)
  319. break;
  320. schedule();
  321. __set_current_state(TASK_RUNNING);
  322. }
  323. __set_current_state(TASK_RUNNING);
  324. if (kill_test)
  325. wait_to_die();
  326. return 0;
  327. }
  328. static int ring_buffer_producer_thread(void *arg)
  329. {
  330. init_completion(&read_start);
  331. while (!kthread_should_stop() && !kill_test) {
  332. ring_buffer_reset(buffer);
  333. if (consumer) {
  334. smp_wmb();
  335. wake_up_process(consumer);
  336. wait_for_completion(&read_start);
  337. }
  338. ring_buffer_producer();
  339. trace_printk("Sleeping for 10 secs\n");
  340. set_current_state(TASK_INTERRUPTIBLE);
  341. schedule_timeout(HZ * SLEEP_TIME);
  342. __set_current_state(TASK_RUNNING);
  343. }
  344. if (kill_test)
  345. wait_to_die();
  346. return 0;
  347. }
  348. static int __init ring_buffer_benchmark_init(void)
  349. {
  350. int ret;
  351. /* make a one meg buffer in overwite mode */
  352. buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
  353. if (!buffer)
  354. return -ENOMEM;
  355. if (!disable_reader) {
  356. consumer = kthread_create(ring_buffer_consumer_thread,
  357. NULL, "rb_consumer");
  358. ret = PTR_ERR(consumer);
  359. if (IS_ERR(consumer))
  360. goto out_fail;
  361. }
  362. producer = kthread_run(ring_buffer_producer_thread,
  363. NULL, "rb_producer");
  364. ret = PTR_ERR(producer);
  365. if (IS_ERR(producer))
  366. goto out_kill;
  367. /*
  368. * Run them as low-prio background tasks by default:
  369. */
  370. if (!disable_reader) {
  371. if (consumer_fifo >= 0) {
  372. struct sched_param param = {
  373. .sched_priority = consumer_fifo
  374. };
  375. sched_setscheduler(consumer, SCHED_FIFO, &param);
  376. } else
  377. set_user_nice(consumer, consumer_nice);
  378. }
  379. if (producer_fifo >= 0) {
  380. struct sched_param param = {
  381. .sched_priority = consumer_fifo
  382. };
  383. sched_setscheduler(producer, SCHED_FIFO, &param);
  384. } else
  385. set_user_nice(producer, producer_nice);
  386. return 0;
  387. out_kill:
  388. if (consumer)
  389. kthread_stop(consumer);
  390. out_fail:
  391. ring_buffer_free(buffer);
  392. return ret;
  393. }
  394. static void __exit ring_buffer_benchmark_exit(void)
  395. {
  396. kthread_stop(producer);
  397. if (consumer)
  398. kthread_stop(consumer);
  399. ring_buffer_free(buffer);
  400. }
  401. module_init(ring_buffer_benchmark_init);
  402. module_exit(ring_buffer_benchmark_exit);
  403. MODULE_AUTHOR("Steven Rostedt");
  404. MODULE_DESCRIPTION("ring_buffer_benchmark");
  405. MODULE_LICENSE("GPL");