thread.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. /*
  2. * Server-side thread management
  3. *
  4. * Copyright (C) 1998 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include "config.h"
  21. #include "wine/port.h"
  22. #include <assert.h>
  23. #include <errno.h>
  24. #include <fcntl.h>
  25. #include <signal.h>
  26. #include <stdarg.h>
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include <sys/types.h>
  31. #include <unistd.h>
  32. #include <time.h>
  33. #include "windef.h"
  34. #include "winbase.h"
  35. #include "file.h"
  36. #include "handle.h"
  37. #include "process.h"
  38. #include "thread.h"
  39. #include "request.h"
  40. #include "user.h"
  41. /* thread queues */
  42. struct thread_wait
  43. {
  44. struct thread_wait *next; /* next wait structure for this thread */
  45. struct thread *thread; /* owner thread */
  46. int count; /* count of objects */
  47. int flags;
  48. void *cookie; /* magic cookie to return to client */
  49. struct timeval timeout;
  50. struct timeout_user *user;
  51. struct wait_queue_entry queues[1];
  52. };
  53. /* asynchronous procedure calls */
  54. struct thread_apc
  55. {
  56. struct thread_apc *next; /* queue linked list */
  57. struct thread_apc *prev;
  58. struct object *owner; /* object that queued this apc */
  59. void *func; /* function to call in client */
  60. enum apc_type type; /* type of apc function */
  61. int nb_args; /* number of arguments */
  62. void *arg1; /* function arguments */
  63. void *arg2;
  64. void *arg3;
  65. };
  66. /* thread operations */
  67. static void dump_thread( struct object *obj, int verbose );
  68. static int thread_signaled( struct object *obj, struct thread *thread );
  69. static void thread_poll_event( struct fd *fd, int event );
  70. static void destroy_thread( struct object *obj );
  71. static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only );
  72. static const struct object_ops thread_ops =
  73. {
  74. sizeof(struct thread), /* size */
  75. dump_thread, /* dump */
  76. add_queue, /* add_queue */
  77. remove_queue, /* remove_queue */
  78. thread_signaled, /* signaled */
  79. no_satisfied, /* satisfied */
  80. no_get_fd, /* get_fd */
  81. destroy_thread /* destroy */
  82. };
  83. static const struct fd_ops thread_fd_ops =
  84. {
  85. NULL, /* get_poll_events */
  86. thread_poll_event, /* poll_event */
  87. no_flush, /* flush */
  88. no_get_file_info, /* get_file_info */
  89. no_queue_async, /* queue_async */
  90. no_cancel_async /* cancel_async */
  91. };
  92. static struct thread *first_thread;
  93. static struct thread *booting_thread;
  94. /* initialize the structure for a newly allocated thread */
  95. inline static void init_thread_structure( struct thread *thread )
  96. {
  97. int i;
  98. thread->unix_pid = -1; /* not known yet */
  99. thread->unix_tid = -1; /* not known yet */
  100. thread->context = NULL;
  101. thread->teb = NULL;
  102. thread->mutex = NULL;
  103. thread->debug_ctx = NULL;
  104. thread->debug_event = NULL;
  105. thread->queue = NULL;
  106. thread->wait = NULL;
  107. thread->system_apc.head = NULL;
  108. thread->system_apc.tail = NULL;
  109. thread->user_apc.head = NULL;
  110. thread->user_apc.tail = NULL;
  111. thread->error = 0;
  112. thread->req_data = NULL;
  113. thread->req_toread = 0;
  114. thread->reply_data = NULL;
  115. thread->reply_towrite = 0;
  116. thread->request_fd = NULL;
  117. thread->reply_fd = NULL;
  118. thread->wait_fd = NULL;
  119. thread->state = RUNNING;
  120. thread->attached = 0;
  121. thread->exit_code = 0;
  122. thread->next = NULL;
  123. thread->prev = NULL;
  124. thread->priority = THREAD_PRIORITY_NORMAL;
  125. thread->affinity = 1;
  126. thread->suspend = 0;
  127. thread->creation_time = time(NULL);
  128. thread->exit_time = 0;
  129. for (i = 0; i < MAX_INFLIGHT_FDS; i++)
  130. thread->inflight[i].server = thread->inflight[i].client = -1;
  131. }
  132. /* create a new thread */
  133. struct thread *create_thread( int fd, struct process *process )
  134. {
  135. struct thread *thread;
  136. if (!(thread = alloc_object( &thread_ops ))) return NULL;
  137. init_thread_structure( thread );
  138. thread->process = (struct process *)grab_object( process );
  139. if (!current) current = thread;
  140. if (!booting_thread) /* first thread ever */
  141. {
  142. booting_thread = thread;
  143. lock_master_socket(1);
  144. }
  145. if ((thread->next = first_thread) != NULL) thread->next->prev = thread;
  146. first_thread = thread;
  147. if (!(thread->id = alloc_ptid( thread )))
  148. {
  149. release_object( thread );
  150. return NULL;
  151. }
  152. if (!(thread->request_fd = create_anonymous_fd( &thread_fd_ops, fd, &thread->obj )))
  153. {
  154. release_object( thread );
  155. return NULL;
  156. }
  157. thread->token = (struct token *) grab_object( process->token );
  158. set_fd_events( thread->request_fd, POLLIN ); /* start listening to events */
  159. add_process_thread( thread->process, thread );
  160. return thread;
  161. }
  162. /* handle a client event */
  163. static void thread_poll_event( struct fd *fd, int event )
  164. {
  165. struct thread *thread = get_fd_user( fd );
  166. assert( thread->obj.ops == &thread_ops );
  167. if (event & (POLLERR | POLLHUP)) kill_thread( thread, 0 );
  168. else if (event & POLLIN) read_request( thread );
  169. else if (event & POLLOUT) write_reply( thread );
  170. }
  171. /* cleanup everything that is no longer needed by a dead thread */
  172. /* used by destroy_thread and kill_thread */
  173. static void cleanup_thread( struct thread *thread )
  174. {
  175. int i;
  176. struct thread_apc *apc;
  177. while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
  178. if (thread->req_data) free( thread->req_data );
  179. if (thread->reply_data) free( thread->reply_data );
  180. if (thread->request_fd) release_object( thread->request_fd );
  181. if (thread->reply_fd) release_object( thread->reply_fd );
  182. if (thread->wait_fd) release_object( thread->wait_fd );
  183. free_msg_queue( thread );
  184. cleanup_clipboard_thread(thread);
  185. destroy_thread_windows( thread );
  186. for (i = 0; i < MAX_INFLIGHT_FDS; i++)
  187. {
  188. if (thread->inflight[i].client != -1)
  189. {
  190. close( thread->inflight[i].server );
  191. thread->inflight[i].client = thread->inflight[i].server = -1;
  192. }
  193. }
  194. thread->req_data = NULL;
  195. thread->reply_data = NULL;
  196. thread->request_fd = NULL;
  197. thread->reply_fd = NULL;
  198. thread->wait_fd = NULL;
  199. if (thread == booting_thread) /* killing booting thread */
  200. {
  201. booting_thread = NULL;
  202. lock_master_socket(0);
  203. }
  204. }
  205. /* destroy a thread when its refcount is 0 */
  206. static void destroy_thread( struct object *obj )
  207. {
  208. struct thread_apc *apc;
  209. struct thread *thread = (struct thread *)obj;
  210. assert( obj->ops == &thread_ops );
  211. assert( !thread->debug_ctx ); /* cannot still be debugging something */
  212. if (thread->next) thread->next->prev = thread->prev;
  213. if (thread->prev) thread->prev->next = thread->next;
  214. else first_thread = thread->next;
  215. while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
  216. cleanup_thread( thread );
  217. release_object( thread->process );
  218. if (thread->id) free_ptid( thread->id );
  219. if (thread->token) release_object( thread->token );
  220. }
  221. /* dump a thread on stdout for debugging purposes */
  222. static void dump_thread( struct object *obj, int verbose )
  223. {
  224. struct thread *thread = (struct thread *)obj;
  225. assert( obj->ops == &thread_ops );
  226. fprintf( stderr, "Thread id=%04x unix pid=%d unix tid=%d teb=%p state=%d\n",
  227. thread->id, thread->unix_pid, thread->unix_tid, thread->teb, thread->state );
  228. }
  229. static int thread_signaled( struct object *obj, struct thread *thread )
  230. {
  231. struct thread *mythread = (struct thread *)obj;
  232. return (mythread->state == TERMINATED);
  233. }
  234. /* get a thread pointer from a thread id (and increment the refcount) */
  235. struct thread *get_thread_from_id( thread_id_t id )
  236. {
  237. struct object *obj = get_ptid_entry( id );
  238. if (obj && obj->ops == &thread_ops) return (struct thread *)grab_object( obj );
  239. set_win32_error( ERROR_INVALID_THREAD_ID );
  240. return NULL;
  241. }
  242. /* get a thread from a handle (and increment the refcount) */
  243. struct thread *get_thread_from_handle( obj_handle_t handle, unsigned int access )
  244. {
  245. return (struct thread *)get_handle_obj( current->process, handle,
  246. access, &thread_ops );
  247. }
  248. /* find a thread from a Unix pid */
  249. struct thread *get_thread_from_pid( int pid )
  250. {
  251. struct thread *t;
  252. for (t = first_thread; t; t = t->next) if (t->unix_tid == pid) return t;
  253. for (t = first_thread; t; t = t->next) if (t->unix_pid == pid) return t;
  254. return NULL;
  255. }
  256. /* set all information about a thread */
  257. static void set_thread_info( struct thread *thread,
  258. const struct set_thread_info_request *req )
  259. {
  260. if (req->mask & SET_THREAD_INFO_PRIORITY)
  261. thread->priority = req->priority;
  262. if (req->mask & SET_THREAD_INFO_AFFINITY)
  263. {
  264. if (req->affinity != 1) set_error( STATUS_INVALID_PARAMETER );
  265. else thread->affinity = req->affinity;
  266. }
  267. }
  268. /* stop a thread (at the Unix level) */
  269. void stop_thread( struct thread *thread )
  270. {
  271. /* can't stop a thread while initialisation is in progress */
  272. if (is_process_init_done(thread->process)) send_thread_signal( thread, SIGUSR1 );
  273. }
  274. /* suspend a thread */
  275. static int suspend_thread( struct thread *thread )
  276. {
  277. int old_count = thread->suspend;
  278. if (thread->suspend < MAXIMUM_SUSPEND_COUNT)
  279. {
  280. if (!(thread->process->suspend + thread->suspend++)) stop_thread( thread );
  281. }
  282. else set_error( STATUS_SUSPEND_COUNT_EXCEEDED );
  283. return old_count;
  284. }
  285. /* resume a thread */
  286. static int resume_thread( struct thread *thread )
  287. {
  288. int old_count = thread->suspend;
  289. if (thread->suspend > 0)
  290. {
  291. if (!(--thread->suspend + thread->process->suspend)) wake_thread( thread );
  292. }
  293. return old_count;
  294. }
  295. /* add a thread to an object wait queue; return 1 if OK, 0 on error */
  296. int add_queue( struct object *obj, struct wait_queue_entry *entry )
  297. {
  298. grab_object( obj );
  299. entry->obj = obj;
  300. entry->prev = obj->tail;
  301. entry->next = NULL;
  302. if (obj->tail) obj->tail->next = entry;
  303. else obj->head = entry;
  304. obj->tail = entry;
  305. return 1;
  306. }
  307. /* remove a thread from an object wait queue */
  308. void remove_queue( struct object *obj, struct wait_queue_entry *entry )
  309. {
  310. if (entry->next) entry->next->prev = entry->prev;
  311. else obj->tail = entry->prev;
  312. if (entry->prev) entry->prev->next = entry->next;
  313. else obj->head = entry->next;
  314. release_object( obj );
  315. }
  316. /* finish waiting */
  317. static void end_wait( struct thread *thread )
  318. {
  319. struct thread_wait *wait = thread->wait;
  320. struct wait_queue_entry *entry;
  321. int i;
  322. assert( wait );
  323. for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
  324. entry->obj->ops->remove_queue( entry->obj, entry );
  325. if (wait->user) remove_timeout_user( wait->user );
  326. thread->wait = wait->next;
  327. free( wait );
  328. }
  329. /* build the thread wait structure */
  330. static int wait_on( int count, struct object *objects[], int flags, const abs_time_t *timeout )
  331. {
  332. struct thread_wait *wait;
  333. struct wait_queue_entry *entry;
  334. int i;
  335. if (!(wait = mem_alloc( sizeof(*wait) + (count-1) * sizeof(*entry) ))) return 0;
  336. wait->next = current->wait;
  337. wait->thread = current;
  338. wait->count = count;
  339. wait->flags = flags;
  340. wait->user = NULL;
  341. current->wait = wait;
  342. if (flags & SELECT_TIMEOUT)
  343. {
  344. wait->timeout.tv_sec = timeout->sec;
  345. wait->timeout.tv_usec = timeout->usec;
  346. }
  347. for (i = 0, entry = wait->queues; i < count; i++, entry++)
  348. {
  349. struct object *obj = objects[i];
  350. entry->thread = current;
  351. if (!obj->ops->add_queue( obj, entry ))
  352. {
  353. wait->count = i;
  354. end_wait( current );
  355. return 0;
  356. }
  357. }
  358. return 1;
  359. }
  360. /* check if the thread waiting condition is satisfied */
  361. static int check_wait( struct thread *thread )
  362. {
  363. int i, signaled;
  364. struct thread_wait *wait = thread->wait;
  365. struct wait_queue_entry *entry = wait->queues;
  366. /* Suspended threads may not acquire locks */
  367. if( thread->process->suspend + thread->suspend > 0 ) return -1;
  368. assert( wait );
  369. if (wait->flags & SELECT_ALL)
  370. {
  371. int not_ok = 0;
  372. /* Note: we must check them all anyway, as some objects may
  373. * want to do something when signaled, even if others are not */
  374. for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
  375. not_ok |= !entry->obj->ops->signaled( entry->obj, thread );
  376. if (not_ok) goto other_checks;
  377. /* Wait satisfied: tell it to all objects */
  378. signaled = 0;
  379. for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
  380. if (entry->obj->ops->satisfied( entry->obj, thread ))
  381. signaled = STATUS_ABANDONED_WAIT_0;
  382. return signaled;
  383. }
  384. else
  385. {
  386. for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
  387. {
  388. if (!entry->obj->ops->signaled( entry->obj, thread )) continue;
  389. /* Wait satisfied: tell it to the object */
  390. signaled = i;
  391. if (entry->obj->ops->satisfied( entry->obj, thread ))
  392. signaled = i + STATUS_ABANDONED_WAIT_0;
  393. return signaled;
  394. }
  395. }
  396. other_checks:
  397. if ((wait->flags & SELECT_INTERRUPTIBLE) && thread->system_apc.head) return STATUS_USER_APC;
  398. if ((wait->flags & SELECT_ALERTABLE) && thread->user_apc.head) return STATUS_USER_APC;
  399. if (wait->flags & SELECT_TIMEOUT)
  400. {
  401. struct timeval now;
  402. gettimeofday( &now, NULL );
  403. if (!time_before( &now, &wait->timeout )) return STATUS_TIMEOUT;
  404. }
  405. return -1;
  406. }
  407. /* send the wakeup signal to a thread */
  408. static int send_thread_wakeup( struct thread *thread, void *cookie, int signaled )
  409. {
  410. struct wake_up_reply reply;
  411. int ret;
  412. reply.cookie = cookie;
  413. reply.signaled = signaled;
  414. if ((ret = write( get_unix_fd( thread->wait_fd ), &reply, sizeof(reply) )) == sizeof(reply))
  415. return 0;
  416. if (ret >= 0)
  417. fatal_protocol_error( thread, "partial wakeup write %d\n", ret );
  418. else if (errno == EPIPE)
  419. kill_thread( thread, 0 ); /* normal death */
  420. else
  421. fatal_protocol_perror( thread, "write" );
  422. return -1;
  423. }
  424. /* attempt to wake up a thread */
  425. /* return >0 if OK, 0 if the wait condition is still not satisfied */
  426. int wake_thread( struct thread *thread )
  427. {
  428. int signaled, count;
  429. void *cookie;
  430. for (count = 0; thread->wait; count++)
  431. {
  432. if ((signaled = check_wait( thread )) == -1) break;
  433. cookie = thread->wait->cookie;
  434. if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
  435. thread->id, signaled, cookie );
  436. end_wait( thread );
  437. if (send_thread_wakeup( thread, cookie, signaled ) == -1) /* error */
  438. break;
  439. }
  440. return count;
  441. }
  442. /* thread wait timeout */
  443. static void thread_timeout( void *ptr )
  444. {
  445. struct thread_wait *wait = ptr;
  446. struct thread *thread = wait->thread;
  447. void *cookie = wait->cookie;
  448. wait->user = NULL;
  449. if (thread->wait != wait) return; /* not the top-level wait, ignore it */
  450. if (thread->suspend + thread->process->suspend > 0) return; /* suspended, ignore it */
  451. if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
  452. thread->id, STATUS_TIMEOUT, cookie );
  453. end_wait( thread );
  454. if (send_thread_wakeup( thread, cookie, STATUS_TIMEOUT ) == -1) return;
  455. /* check if other objects have become signaled in the meantime */
  456. wake_thread( thread );
  457. }
  458. /* select on a list of handles */
  459. static void select_on( int count, void *cookie, const obj_handle_t *handles,
  460. int flags, const abs_time_t *timeout )
  461. {
  462. int ret, i;
  463. struct object *objects[MAXIMUM_WAIT_OBJECTS];
  464. if ((count < 0) || (count > MAXIMUM_WAIT_OBJECTS))
  465. {
  466. set_error( STATUS_INVALID_PARAMETER );
  467. return;
  468. }
  469. for (i = 0; i < count; i++)
  470. {
  471. if (!(objects[i] = get_handle_obj( current->process, handles[i], SYNCHRONIZE, NULL )))
  472. break;
  473. }
  474. if (i < count) goto done;
  475. if (!wait_on( count, objects, flags, timeout )) goto done;
  476. if ((ret = check_wait( current )) != -1)
  477. {
  478. /* condition is already satisfied */
  479. end_wait( current );
  480. set_error( ret );
  481. goto done;
  482. }
  483. /* now we need to wait */
  484. if (flags & SELECT_TIMEOUT)
  485. {
  486. if (!(current->wait->user = add_timeout_user( &current->wait->timeout,
  487. thread_timeout, current->wait )))
  488. {
  489. end_wait( current );
  490. goto done;
  491. }
  492. }
  493. current->wait->cookie = cookie;
  494. set_error( STATUS_PENDING );
  495. done:
  496. while (--i >= 0) release_object( objects[i] );
  497. }
  498. /* attempt to wake threads sleeping on the object wait queue */
  499. void wake_up( struct object *obj, int max )
  500. {
  501. struct wait_queue_entry *entry = obj->head;
  502. while (entry)
  503. {
  504. struct thread *thread = entry->thread;
  505. entry = entry->next;
  506. if (wake_thread( thread ))
  507. {
  508. if (max && !--max) break;
  509. }
  510. }
  511. }
  512. /* queue an async procedure call */
  513. int thread_queue_apc( struct thread *thread, struct object *owner, void *func,
  514. enum apc_type type, int system, void *arg1, void *arg2, void *arg3 )
  515. {
  516. struct thread_apc *apc;
  517. struct apc_queue *queue = system ? &thread->system_apc : &thread->user_apc;
  518. /* cancel a possible previous APC with the same owner */
  519. if (owner) thread_cancel_apc( thread, owner, system );
  520. if (thread->state == TERMINATED) return 0;
  521. if (!(apc = mem_alloc( sizeof(*apc) ))) return 0;
  522. apc->prev = queue->tail;
  523. apc->next = NULL;
  524. apc->owner = owner;
  525. apc->func = func;
  526. apc->type = type;
  527. apc->arg1 = arg1;
  528. apc->arg2 = arg2;
  529. apc->arg3 = arg3;
  530. queue->tail = apc;
  531. if (!apc->prev) /* first one */
  532. {
  533. queue->head = apc;
  534. wake_thread( thread );
  535. }
  536. else apc->prev->next = apc;
  537. return 1;
  538. }
  539. /* cancel the async procedure call owned by a specific object */
  540. void thread_cancel_apc( struct thread *thread, struct object *owner, int system )
  541. {
  542. struct thread_apc *apc;
  543. struct apc_queue *queue = system ? &thread->system_apc : &thread->user_apc;
  544. for (apc = queue->head; apc; apc = apc->next)
  545. {
  546. if (apc->owner != owner) continue;
  547. if (apc->next) apc->next->prev = apc->prev;
  548. else queue->tail = apc->prev;
  549. if (apc->prev) apc->prev->next = apc->next;
  550. else queue->head = apc->next;
  551. free( apc );
  552. return;
  553. }
  554. }
  555. /* remove the head apc from the queue; the returned pointer must be freed by the caller */
  556. static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only )
  557. {
  558. struct thread_apc *apc;
  559. struct apc_queue *queue = &thread->system_apc;
  560. if (!queue->head && !system_only) queue = &thread->user_apc;
  561. if ((apc = queue->head))
  562. {
  563. if (apc->next) apc->next->prev = NULL;
  564. else queue->tail = NULL;
  565. queue->head = apc->next;
  566. }
  567. return apc;
  568. }
  569. /* add an fd to the inflight list */
  570. /* return list index, or -1 on error */
  571. int thread_add_inflight_fd( struct thread *thread, int client, int server )
  572. {
  573. int i;
  574. if (server == -1) return -1;
  575. if (client == -1)
  576. {
  577. close( server );
  578. return -1;
  579. }
  580. /* first check if we already have an entry for this fd */
  581. for (i = 0; i < MAX_INFLIGHT_FDS; i++)
  582. if (thread->inflight[i].client == client)
  583. {
  584. close( thread->inflight[i].server );
  585. thread->inflight[i].server = server;
  586. return i;
  587. }
  588. /* now find a free spot to store it */
  589. for (i = 0; i < MAX_INFLIGHT_FDS; i++)
  590. if (thread->inflight[i].client == -1)
  591. {
  592. thread->inflight[i].client = client;
  593. thread->inflight[i].server = server;
  594. return i;
  595. }
  596. return -1;
  597. }
  598. /* get an inflight fd and purge it from the list */
  599. /* the fd must be closed when no longer used */
  600. int thread_get_inflight_fd( struct thread *thread, int client )
  601. {
  602. int i, ret;
  603. if (client == -1) return -1;
  604. do
  605. {
  606. for (i = 0; i < MAX_INFLIGHT_FDS; i++)
  607. {
  608. if (thread->inflight[i].client == client)
  609. {
  610. ret = thread->inflight[i].server;
  611. thread->inflight[i].server = thread->inflight[i].client = -1;
  612. return ret;
  613. }
  614. }
  615. } while (!receive_fd( thread->process )); /* in case it is still in the socket buffer */
  616. return -1;
  617. }
  618. /* retrieve an LDT selector entry */
  619. static void get_selector_entry( struct thread *thread, int entry,
  620. unsigned int *base, unsigned int *limit,
  621. unsigned char *flags )
  622. {
  623. if (!thread->process->ldt_copy)
  624. {
  625. set_error( STATUS_ACCESS_DENIED );
  626. return;
  627. }
  628. if (entry >= 8192)
  629. {
  630. set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
  631. return;
  632. }
  633. if (suspend_for_ptrace( thread ))
  634. {
  635. unsigned char flags_buf[4];
  636. int *addr = (int *)thread->process->ldt_copy + entry;
  637. if (read_thread_int( thread, addr, base ) == -1) goto done;
  638. if (read_thread_int( thread, addr + 8192, limit ) == -1) goto done;
  639. addr = (int *)thread->process->ldt_copy + 2*8192 + (entry >> 2);
  640. if (read_thread_int( thread, addr, (int *)flags_buf ) == -1) goto done;
  641. *flags = flags_buf[entry & 3];
  642. done:
  643. resume_after_ptrace( thread );
  644. }
  645. }
  646. /* kill a thread on the spot */
  647. void kill_thread( struct thread *thread, int violent_death )
  648. {
  649. if (thread->state == TERMINATED) return; /* already killed */
  650. thread->state = TERMINATED;
  651. thread->exit_time = time(NULL);
  652. if (current == thread) current = NULL;
  653. if (debug_level)
  654. fprintf( stderr,"%04x: *killed* exit_code=%d\n",
  655. thread->id, thread->exit_code );
  656. if (thread->wait)
  657. {
  658. while (thread->wait) end_wait( thread );
  659. send_thread_wakeup( thread, NULL, STATUS_PENDING );
  660. /* if it is waiting on the socket, we don't need to send a SIGTERM */
  661. violent_death = 0;
  662. }
  663. kill_console_processes( thread, 0 );
  664. debug_exit_thread( thread );
  665. abandon_mutexes( thread );
  666. remove_process_thread( thread->process, thread );
  667. wake_up( &thread->obj, 0 );
  668. detach_thread( thread, violent_death ? SIGTERM : 0 );
  669. cleanup_thread( thread );
  670. release_object( thread );
  671. }
  672. /* take a snapshot of currently running threads */
  673. struct thread_snapshot *thread_snap( int *count )
  674. {
  675. struct thread_snapshot *snapshot, *ptr;
  676. struct thread *thread;
  677. int total = 0;
  678. for (thread = first_thread; thread; thread = thread->next)
  679. if (thread->state != TERMINATED) total++;
  680. if (!total || !(snapshot = mem_alloc( sizeof(*snapshot) * total ))) return NULL;
  681. ptr = snapshot;
  682. for (thread = first_thread; thread; thread = thread->next)
  683. {
  684. if (thread->state == TERMINATED) continue;
  685. ptr->thread = thread;
  686. ptr->count = thread->obj.refcount;
  687. ptr->priority = thread->priority;
  688. grab_object( thread );
  689. ptr++;
  690. }
  691. *count = total;
  692. return snapshot;
  693. }
  694. /* signal that we are finished booting on the client side */
  695. DECL_HANDLER(boot_done)
  696. {
  697. debug_level = max( debug_level, req->debug_level );
  698. if (current == booting_thread)
  699. {
  700. booting_thread = (struct thread *)~0UL; /* make sure it doesn't match other threads */
  701. lock_master_socket(0); /* allow other clients now */
  702. }
  703. }
  704. /* create a new thread */
  705. DECL_HANDLER(new_thread)
  706. {
  707. struct thread *thread;
  708. int request_fd = thread_get_inflight_fd( current, req->request_fd );
  709. if (request_fd == -1 || fcntl( request_fd, F_SETFL, O_NONBLOCK ) == -1)
  710. {
  711. if (request_fd != -1) close( request_fd );
  712. set_error( STATUS_INVALID_HANDLE );
  713. return;
  714. }
  715. if ((thread = create_thread( request_fd, current->process )))
  716. {
  717. if (req->suspend) thread->suspend++;
  718. reply->tid = get_thread_id( thread );
  719. if ((reply->handle = alloc_handle( current->process, thread,
  720. THREAD_ALL_ACCESS, req->inherit )))
  721. {
  722. /* thread object will be released when the thread gets killed */
  723. return;
  724. }
  725. kill_thread( thread, 1 );
  726. }
  727. }
  728. /* initialize a new thread */
  729. DECL_HANDLER(init_thread)
  730. {
  731. int reply_fd = thread_get_inflight_fd( current, req->reply_fd );
  732. int wait_fd = thread_get_inflight_fd( current, req->wait_fd );
  733. if (current->unix_pid != -1)
  734. {
  735. fatal_protocol_error( current, "init_thread: already running\n" );
  736. goto error;
  737. }
  738. if (reply_fd == -1 || fcntl( reply_fd, F_SETFL, O_NONBLOCK ) == -1)
  739. {
  740. fatal_protocol_error( current, "bad reply fd\n" );
  741. goto error;
  742. }
  743. if (wait_fd == -1)
  744. {
  745. fatal_protocol_error( current, "bad wait fd\n" );
  746. goto error;
  747. }
  748. if (!(current->reply_fd = create_anonymous_fd( &thread_fd_ops, reply_fd, &current->obj )))
  749. {
  750. reply_fd = -1;
  751. fatal_protocol_error( current, "could not allocate reply fd\n" );
  752. goto error;
  753. }
  754. if (!(current->wait_fd = create_anonymous_fd( &thread_fd_ops, wait_fd, &current->obj )))
  755. return;
  756. current->unix_pid = req->unix_pid;
  757. current->unix_tid = req->unix_tid;
  758. current->teb = req->teb;
  759. if (current->suspend + current->process->suspend > 0) stop_thread( current );
  760. if (current->process->running_threads > 1)
  761. generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, req->entry );
  762. reply->pid = get_process_id( current->process );
  763. reply->tid = get_thread_id( current );
  764. reply->boot = (current == booting_thread);
  765. reply->version = SERVER_PROTOCOL_VERSION;
  766. return;
  767. error:
  768. if (reply_fd != -1) close( reply_fd );
  769. if (wait_fd != -1) close( wait_fd );
  770. }
  771. /* terminate a thread */
  772. DECL_HANDLER(terminate_thread)
  773. {
  774. struct thread *thread;
  775. reply->self = 0;
  776. reply->last = 0;
  777. if ((thread = get_thread_from_handle( req->handle, THREAD_TERMINATE )))
  778. {
  779. thread->exit_code = req->exit_code;
  780. if (thread != current) kill_thread( thread, 1 );
  781. else
  782. {
  783. reply->self = 1;
  784. reply->last = (thread->process->running_threads == 1);
  785. }
  786. release_object( thread );
  787. }
  788. }
  789. /* open a handle to a thread */
  790. DECL_HANDLER(open_thread)
  791. {
  792. struct thread *thread = get_thread_from_id( req->tid );
  793. reply->handle = 0;
  794. if (thread)
  795. {
  796. reply->handle = alloc_handle( current->process, thread, req->access, req->inherit );
  797. release_object( thread );
  798. }
  799. }
  800. /* fetch information about a thread */
  801. DECL_HANDLER(get_thread_info)
  802. {
  803. struct thread *thread;
  804. obj_handle_t handle = req->handle;
  805. if (!handle) thread = get_thread_from_id( req->tid_in );
  806. else thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION );
  807. if (thread)
  808. {
  809. reply->pid = get_process_id( thread->process );
  810. reply->tid = get_thread_id( thread );
  811. reply->teb = thread->teb;
  812. reply->exit_code = (thread->state == TERMINATED) ? thread->exit_code : STILL_ACTIVE;
  813. reply->priority = thread->priority;
  814. reply->affinity = thread->affinity;
  815. reply->creation_time = thread->creation_time;
  816. reply->exit_time = thread->exit_time;
  817. release_object( thread );
  818. }
  819. }
  820. /* set information about a thread */
  821. DECL_HANDLER(set_thread_info)
  822. {
  823. struct thread *thread;
  824. if ((thread = get_thread_from_handle( req->handle, THREAD_SET_INFORMATION )))
  825. {
  826. set_thread_info( thread, req );
  827. release_object( thread );
  828. }
  829. }
  830. /* suspend a thread */
  831. DECL_HANDLER(suspend_thread)
  832. {
  833. struct thread *thread;
  834. if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
  835. {
  836. if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
  837. else reply->count = suspend_thread( thread );
  838. release_object( thread );
  839. }
  840. }
  841. /* resume a thread */
  842. DECL_HANDLER(resume_thread)
  843. {
  844. struct thread *thread;
  845. if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
  846. {
  847. if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
  848. else reply->count = resume_thread( thread );
  849. release_object( thread );
  850. }
  851. }
  852. /* select on a handle list */
  853. DECL_HANDLER(select)
  854. {
  855. int count = get_req_data_size() / sizeof(int);
  856. select_on( count, req->cookie, get_req_data(), req->flags, &req->timeout );
  857. }
  858. /* queue an APC for a thread */
  859. DECL_HANDLER(queue_apc)
  860. {
  861. struct thread *thread;
  862. if ((thread = get_thread_from_handle( req->handle, THREAD_SET_CONTEXT )))
  863. {
  864. thread_queue_apc( thread, NULL, req->func, APC_USER, !req->user,
  865. req->arg1, req->arg2, req->arg3 );
  866. release_object( thread );
  867. }
  868. }
  869. /* get next APC to call */
  870. DECL_HANDLER(get_apc)
  871. {
  872. struct thread_apc *apc;
  873. for (;;)
  874. {
  875. if (!(apc = thread_dequeue_apc( current, !req->alertable )))
  876. {
  877. /* no more APCs */
  878. reply->func = NULL;
  879. reply->type = APC_NONE;
  880. return;
  881. }
  882. /* Optimization: ignore APCs that have a NULL func; they are only used
  883. * to wake up a thread, but since we got here the thread woke up already.
  884. * Exception: for APC_ASYNC_IO, func == NULL is legal.
  885. */
  886. if (apc->func || apc->type == APC_ASYNC_IO) break;
  887. free( apc );
  888. }
  889. reply->func = apc->func;
  890. reply->type = apc->type;
  891. reply->arg1 = apc->arg1;
  892. reply->arg2 = apc->arg2;
  893. reply->arg3 = apc->arg3;
  894. free( apc );
  895. }
  896. /* fetch a selector entry for a thread */
  897. DECL_HANDLER(get_selector_entry)
  898. {
  899. struct thread *thread;
  900. if ((thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION )))
  901. {
  902. get_selector_entry( thread, req->entry, &reply->base, &reply->limit, &reply->flags );
  903. release_object( thread );
  904. }
  905. }