test_epoll.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /*
  2. * tools/testing/selftests/epoll/test_epoll.c
  3. *
  4. * Copyright 2012 Adobe Systems Incorporated
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * Paton J. Lewis <palewis@adobe.com>
  12. *
  13. */
  14. #include <errno.h>
  15. #include <fcntl.h>
  16. #include <pthread.h>
  17. #include <stdio.h>
  18. #include <stdlib.h>
  19. #include <unistd.h>
  20. #include <sys/epoll.h>
  21. #include <sys/socket.h>
  22. /*
  23. * A pointer to an epoll_item_private structure will be stored in the epoll
  24. * item's event structure so that we can get access to the epoll_item_private
  25. * data after calling epoll_wait:
  26. */
  27. struct epoll_item_private {
  28. int index; /* Position of this struct within the epoll_items array. */
  29. int fd;
  30. uint32_t events;
  31. pthread_mutex_t mutex; /* Guards the following variables... */
  32. int stop;
  33. int status; /* Stores any error encountered while handling item. */
  34. /* The following variable allows us to test whether we have encountered
  35. a problem while attempting to cancel and delete the associated
  36. event. When the test program exits, 'deleted' should be exactly
  37. one. If it is greater than one, then the failed test reflects a real
  38. world situation where we would have tried to access the epoll item's
  39. private data after deleting it: */
  40. int deleted;
  41. };
  42. struct epoll_item_private *epoll_items;
  43. /*
  44. * Delete the specified item from the epoll set. In a real-world secneario this
  45. * is where we would free the associated data structure, but in this testing
  46. * environment we retain the structure so that we can test for double-deletion:
  47. */
  48. void delete_item(int index)
  49. {
  50. __sync_fetch_and_add(&epoll_items[index].deleted, 1);
  51. }
  52. /*
  53. * A pointer to a read_thread_data structure will be passed as the argument to
  54. * each read thread:
  55. */
  56. struct read_thread_data {
  57. int stop;
  58. int status; /* Indicates any error encountered by the read thread. */
  59. int epoll_set;
  60. };
  61. /*
  62. * The function executed by the read threads:
  63. */
  64. void *read_thread_function(void *function_data)
  65. {
  66. struct read_thread_data *thread_data =
  67. (struct read_thread_data *)function_data;
  68. struct epoll_event event_data;
  69. struct epoll_item_private *item_data;
  70. char socket_data;
  71. /* Handle events until we encounter an error or this thread's 'stop'
  72. condition is set: */
  73. while (1) {
  74. int result = epoll_wait(thread_data->epoll_set,
  75. &event_data,
  76. 1, /* Number of desired events */
  77. 1000); /* Timeout in ms */
  78. if (result < 0) {
  79. /* Breakpoints signal all threads. Ignore that while
  80. debugging: */
  81. if (errno == EINTR)
  82. continue;
  83. thread_data->status = errno;
  84. return 0;
  85. } else if (thread_data->stop)
  86. return 0;
  87. else if (result == 0) /* Timeout */
  88. continue;
  89. /* We need the mutex here because checking for the stop
  90. condition and re-enabling the epoll item need to be done
  91. together as one atomic operation when EPOLL_CTL_DISABLE is
  92. available: */
  93. item_data = (struct epoll_item_private *)event_data.data.ptr;
  94. pthread_mutex_lock(&item_data->mutex);
  95. /* Remove the item from the epoll set if we want to stop
  96. handling that event: */
  97. if (item_data->stop)
  98. delete_item(item_data->index);
  99. else {
  100. /* Clear the data that was written to the other end of
  101. our non-blocking socket: */
  102. do {
  103. if (read(item_data->fd, &socket_data, 1) < 1) {
  104. if ((errno == EAGAIN) ||
  105. (errno == EWOULDBLOCK))
  106. break;
  107. else
  108. goto error_unlock;
  109. }
  110. } while (item_data->events & EPOLLET);
  111. /* The item was one-shot, so re-enable it: */
  112. event_data.events = item_data->events;
  113. if (epoll_ctl(thread_data->epoll_set,
  114. EPOLL_CTL_MOD,
  115. item_data->fd,
  116. &event_data) < 0)
  117. goto error_unlock;
  118. }
  119. pthread_mutex_unlock(&item_data->mutex);
  120. }
  121. error_unlock:
  122. thread_data->status = item_data->status = errno;
  123. pthread_mutex_unlock(&item_data->mutex);
  124. return 0;
  125. }
  126. /*
  127. * A pointer to a write_thread_data structure will be passed as the argument to
  128. * the write thread:
  129. */
  130. struct write_thread_data {
  131. int stop;
  132. int status; /* Indicates any error encountered by the write thread. */
  133. int n_fds;
  134. int *fds;
  135. };
  136. /*
  137. * The function executed by the write thread. It writes a single byte to each
  138. * socket in turn until the stop condition for this thread is set. If writing to
  139. * a socket would block (i.e. errno was EAGAIN), we leave that socket alone for
  140. * the moment and just move on to the next socket in the list. We don't care
  141. * about the order in which we deliver events to the epoll set. In fact we don't
  142. * care about the data we're writing to the pipes at all; we just want to
  143. * trigger epoll events:
  144. */
  145. void *write_thread_function(void *function_data)
  146. {
  147. const char data = 'X';
  148. int index;
  149. struct write_thread_data *thread_data =
  150. (struct write_thread_data *)function_data;
  151. while (!write_thread_data->stop)
  152. for (index = 0;
  153. !thread_data->stop && (index < thread_data->n_fds);
  154. ++index)
  155. if ((write(thread_data->fds[index], &data, 1) < 1) &&
  156. (errno != EAGAIN) &&
  157. (errno != EWOULDBLOCK)) {
  158. write_thread_data->status = errno;
  159. return;
  160. }
  161. }
  162. /*
  163. * Arguments are currently ignored:
  164. */
  165. int main(int argc, char **argv)
  166. {
  167. const int n_read_threads = 100;
  168. const int n_epoll_items = 500;
  169. int index;
  170. int epoll_set = epoll_create1(0);
  171. struct write_thread_data write_thread_data = {
  172. 0, 0, n_epoll_items, malloc(n_epoll_items * sizeof(int))
  173. };
  174. struct read_thread_data *read_thread_data =
  175. malloc(n_read_threads * sizeof(struct read_thread_data));
  176. pthread_t *read_threads = malloc(n_read_threads * sizeof(pthread_t));
  177. pthread_t write_thread;
  178. printf("-----------------\n");
  179. printf("Runing test_epoll\n");
  180. printf("-----------------\n");
  181. epoll_items = malloc(n_epoll_items * sizeof(struct epoll_item_private));
  182. if (epoll_set < 0 || epoll_items == 0 || write_thread_data.fds == 0 ||
  183. read_thread_data == 0 || read_threads == 0)
  184. goto error;
  185. if (sysconf(_SC_NPROCESSORS_ONLN) < 2) {
  186. printf("Error: please run this test on a multi-core system.\n");
  187. goto error;
  188. }
  189. /* Create the socket pairs and epoll items: */
  190. for (index = 0; index < n_epoll_items; ++index) {
  191. int socket_pair[2];
  192. struct epoll_event event_data;
  193. if (socketpair(AF_UNIX,
  194. SOCK_STREAM | SOCK_NONBLOCK,
  195. 0,
  196. socket_pair) < 0)
  197. goto error;
  198. write_thread_data.fds[index] = socket_pair[0];
  199. epoll_items[index].index = index;
  200. epoll_items[index].fd = socket_pair[1];
  201. if (pthread_mutex_init(&epoll_items[index].mutex, NULL) != 0)
  202. goto error;
  203. /* We always use EPOLLONESHOT because this test is currently
  204. structured to demonstrate the need for EPOLL_CTL_DISABLE,
  205. which only produces useful information in the EPOLLONESHOT
  206. case (without EPOLLONESHOT, calling epoll_ctl with
  207. EPOLL_CTL_DISABLE will never return EBUSY). If support for
  208. testing events without EPOLLONESHOT is desired, it should
  209. probably be implemented in a separate unit test. */
  210. epoll_items[index].events = EPOLLIN | EPOLLONESHOT;
  211. if (index < n_epoll_items / 2)
  212. epoll_items[index].events |= EPOLLET;
  213. epoll_items[index].stop = 0;
  214. epoll_items[index].status = 0;
  215. epoll_items[index].deleted = 0;
  216. event_data.events = epoll_items[index].events;
  217. event_data.data.ptr = &epoll_items[index];
  218. if (epoll_ctl(epoll_set,
  219. EPOLL_CTL_ADD,
  220. epoll_items[index].fd,
  221. &event_data) < 0)
  222. goto error;
  223. }
  224. /* Create and start the read threads: */
  225. for (index = 0; index < n_read_threads; ++index) {
  226. read_thread_data[index].stop = 0;
  227. read_thread_data[index].status = 0;
  228. read_thread_data[index].epoll_set = epoll_set;
  229. if (pthread_create(&read_threads[index],
  230. NULL,
  231. read_thread_function,
  232. &read_thread_data[index]) != 0)
  233. goto error;
  234. }
  235. if (pthread_create(&write_thread,
  236. NULL,
  237. write_thread_function,
  238. &write_thread_data) != 0)
  239. goto error;
  240. /* Cancel all event pollers: */
  241. #ifdef EPOLL_CTL_DISABLE
  242. for (index = 0; index < n_epoll_items; ++index) {
  243. pthread_mutex_lock(&epoll_items[index].mutex);
  244. ++epoll_items[index].stop;
  245. if (epoll_ctl(epoll_set,
  246. EPOLL_CTL_DISABLE,
  247. epoll_items[index].fd,
  248. NULL) == 0)
  249. delete_item(index);
  250. else if (errno != EBUSY) {
  251. pthread_mutex_unlock(&epoll_items[index].mutex);
  252. goto error;
  253. }
  254. /* EBUSY means events were being handled; allow the other thread
  255. to delete the item. */
  256. pthread_mutex_unlock(&epoll_items[index].mutex);
  257. }
  258. #else
  259. for (index = 0; index < n_epoll_items; ++index) {
  260. pthread_mutex_lock(&epoll_items[index].mutex);
  261. ++epoll_items[index].stop;
  262. pthread_mutex_unlock(&epoll_items[index].mutex);
  263. /* Wait in case a thread running read_thread_function is
  264. currently executing code between epoll_wait and
  265. pthread_mutex_lock with this item. Note that a longer delay
  266. would make double-deletion less likely (at the expense of
  267. performance), but there is no guarantee that any delay would
  268. ever be sufficient. Note also that we delete all event
  269. pollers at once for testing purposes, but in a real-world
  270. environment we are likely to want to be able to cancel event
  271. pollers at arbitrary times. Therefore we can't improve this
  272. situation by just splitting this loop into two loops
  273. (i.e. signal 'stop' for all items, sleep, and then delete all
  274. items). We also can't fix the problem via EPOLL_CTL_DEL
  275. because that command can't prevent the case where some other
  276. thread is executing read_thread_function within the region
  277. mentioned above: */
  278. usleep(1);
  279. pthread_mutex_lock(&epoll_items[index].mutex);
  280. if (!epoll_items[index].deleted)
  281. delete_item(index);
  282. pthread_mutex_unlock(&epoll_items[index].mutex);
  283. }
  284. #endif
  285. /* Shut down the read threads: */
  286. for (index = 0; index < n_read_threads; ++index)
  287. __sync_fetch_and_add(&read_thread_data[index].stop, 1);
  288. for (index = 0; index < n_read_threads; ++index) {
  289. if (pthread_join(read_threads[index], NULL) != 0)
  290. goto error;
  291. if (read_thread_data[index].status)
  292. goto error;
  293. }
  294. /* Shut down the write thread: */
  295. __sync_fetch_and_add(&write_thread_data.stop, 1);
  296. if ((pthread_join(write_thread, NULL) != 0) || write_thread_data.status)
  297. goto error;
  298. /* Check for final error conditions: */
  299. for (index = 0; index < n_epoll_items; ++index) {
  300. if (epoll_items[index].status != 0)
  301. goto error;
  302. if (pthread_mutex_destroy(&epoll_items[index].mutex) < 0)
  303. goto error;
  304. }
  305. for (index = 0; index < n_epoll_items; ++index)
  306. if (epoll_items[index].deleted != 1) {
  307. printf("Error: item data deleted %1d times.\n",
  308. epoll_items[index].deleted);
  309. goto error;
  310. }
  311. printf("[PASS]\n");
  312. return 0;
  313. error:
  314. printf("[FAIL]\n");
  315. return errno;
  316. }