device.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * Server-side device support
  3. *
  4. * Copyright (C) 2007 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include <assert.h>
  22. #include <fcntl.h>
  23. #include <stdio.h>
  24. #include <stdlib.h>
  25. #include <stdarg.h>
  26. #include "ntstatus.h"
  27. #define WIN32_NO_STATUS
  28. #include "windef.h"
  29. #include "winternl.h"
  30. #include "ddk/wdm.h"
  31. #include "wine/rbtree.h"
  32. #include "object.h"
  33. #include "file.h"
  34. #include "handle.h"
  35. #include "request.h"
  36. #include "process.h"
  37. /* IRP object */
  38. struct irp_call
  39. {
  40. struct object obj; /* object header */
  41. struct list dev_entry; /* entry in device queue */
  42. struct list mgr_entry; /* entry in manager queue */
  43. struct device_file *file; /* file containing this irp */
  44. struct thread *thread; /* thread that queued the irp */
  45. struct async *async; /* pending async op */
  46. irp_params_t params; /* irp parameters */
  47. struct iosb *iosb; /* I/O status block */
  48. int canceled; /* the call was canceled */
  49. client_ptr_t user_ptr; /* client side pointer */
  50. };
  51. static void irp_call_dump( struct object *obj, int verbose );
  52. static void irp_call_destroy( struct object *obj );
  53. static const struct object_ops irp_call_ops =
  54. {
  55. sizeof(struct irp_call), /* size */
  56. &no_type, /* type */
  57. irp_call_dump, /* dump */
  58. no_add_queue, /* add_queue */
  59. NULL, /* remove_queue */
  60. NULL, /* signaled */
  61. NULL, /* satisfied */
  62. no_signal, /* signal */
  63. no_get_fd, /* get_fd */
  64. default_map_access, /* map_access */
  65. default_get_sd, /* get_sd */
  66. default_set_sd, /* set_sd */
  67. no_get_full_name, /* get_full_name */
  68. no_lookup_name, /* lookup_name */
  69. no_link_name, /* link_name */
  70. NULL, /* unlink_name */
  71. no_open_file, /* open_file */
  72. no_kernel_obj_list, /* get_kernel_obj_list */
  73. no_get_fast_sync, /* get_fast_sync */
  74. no_close_handle, /* close_handle */
  75. irp_call_destroy /* destroy */
  76. };
  77. /* device manager (a list of devices managed by the same client process) */
  78. struct device_manager
  79. {
  80. struct object obj; /* object header */
  81. struct list devices; /* list of devices */
  82. struct list requests; /* list of pending irps across all devices */
  83. struct irp_call *current_call; /* call currently executed on client side */
  84. struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
  85. struct fast_sync *fast_sync; /* fast synchronization object */
  86. };
  87. static void device_manager_dump( struct object *obj, int verbose );
  88. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
  89. static struct fast_sync *device_manager_get_fast_sync( struct object *obj );
  90. static void device_manager_destroy( struct object *obj );
  91. static const struct object_ops device_manager_ops =
  92. {
  93. sizeof(struct device_manager), /* size */
  94. &no_type, /* type */
  95. device_manager_dump, /* dump */
  96. add_queue, /* add_queue */
  97. remove_queue, /* remove_queue */
  98. device_manager_signaled, /* signaled */
  99. no_satisfied, /* satisfied */
  100. no_signal, /* signal */
  101. no_get_fd, /* get_fd */
  102. default_map_access, /* map_access */
  103. default_get_sd, /* get_sd */
  104. default_set_sd, /* set_sd */
  105. no_get_full_name, /* get_full_name */
  106. no_lookup_name, /* lookup_name */
  107. no_link_name, /* link_name */
  108. NULL, /* unlink_name */
  109. no_open_file, /* open_file */
  110. no_kernel_obj_list, /* get_kernel_obj_list */
  111. device_manager_get_fast_sync, /* get_fast_sync */
  112. no_close_handle, /* close_handle */
  113. device_manager_destroy /* destroy */
  114. };
  115. /* device (a single device object) */
  116. static const WCHAR device_name[] = {'D','e','v','i','c','e'};
  117. struct type_descr device_type =
  118. {
  119. { device_name, sizeof(device_name) }, /* name */
  120. FILE_ALL_ACCESS, /* valid_access */
  121. { /* mapping */
  122. FILE_GENERIC_READ,
  123. FILE_GENERIC_WRITE,
  124. FILE_GENERIC_EXECUTE,
  125. FILE_ALL_ACCESS
  126. },
  127. };
  128. struct device
  129. {
  130. struct object obj; /* object header */
  131. struct device_manager *manager; /* manager for this device (or NULL if deleted) */
  132. char *unix_path; /* path to unix device if any */
  133. struct list kernel_object; /* list of kernel object pointers */
  134. struct list entry; /* entry in device manager list */
  135. struct list files; /* list of open files */
  136. };
  137. static void device_dump( struct object *obj, int verbose );
  138. static void device_destroy( struct object *obj );
  139. static struct object *device_open_file( struct object *obj, unsigned int access,
  140. unsigned int sharing, unsigned int options );
  141. static struct list *device_get_kernel_obj_list( struct object *obj );
  142. static const struct object_ops device_ops =
  143. {
  144. sizeof(struct device), /* size */
  145. &device_type, /* type */
  146. device_dump, /* dump */
  147. no_add_queue, /* add_queue */
  148. NULL, /* remove_queue */
  149. NULL, /* signaled */
  150. no_satisfied, /* satisfied */
  151. no_signal, /* signal */
  152. no_get_fd, /* get_fd */
  153. default_map_access, /* map_access */
  154. default_get_sd, /* get_sd */
  155. default_set_sd, /* set_sd */
  156. default_get_full_name, /* get_full_name */
  157. no_lookup_name, /* lookup_name */
  158. directory_link_name, /* link_name */
  159. default_unlink_name, /* unlink_name */
  160. device_open_file, /* open_file */
  161. device_get_kernel_obj_list, /* get_kernel_obj_list */
  162. no_get_fast_sync, /* get_fast_sync */
  163. no_close_handle, /* close_handle */
  164. device_destroy /* destroy */
  165. };
  166. /* device file (an open file handle to a device) */
  167. struct device_file
  168. {
  169. struct object obj; /* object header */
  170. struct device *device; /* device for this file */
  171. struct fd *fd; /* file descriptor for irp */
  172. struct list kernel_object; /* list of kernel object pointers */
  173. int closed; /* closed file flag */
  174. struct list entry; /* entry in device list */
  175. struct list requests; /* list of pending irp requests */
  176. };
  177. static void device_file_dump( struct object *obj, int verbose );
  178. static struct fd *device_file_get_fd( struct object *obj );
  179. static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len );
  180. static struct list *device_file_get_kernel_obj_list( struct object *obj );
  181. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  182. static void device_file_destroy( struct object *obj );
  183. static enum server_fd_type device_file_get_fd_type( struct fd *fd );
  184. static void device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
  185. static void device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
  186. static void device_file_flush( struct fd *fd, struct async *async );
  187. static void device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
  188. static void device_file_cancel_async( struct fd *fd, struct async *async );
  189. static void device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class );
  190. static const struct object_ops device_file_ops =
  191. {
  192. sizeof(struct device_file), /* size */
  193. &file_type, /* type */
  194. device_file_dump, /* dump */
  195. add_queue, /* add_queue */
  196. remove_queue, /* remove_queue */
  197. default_fd_signaled, /* signaled */
  198. no_satisfied, /* satisfied */
  199. no_signal, /* signal */
  200. device_file_get_fd, /* get_fd */
  201. default_map_access, /* map_access */
  202. default_get_sd, /* get_sd */
  203. default_set_sd, /* set_sd */
  204. device_file_get_full_name, /* get_full_name */
  205. no_lookup_name, /* lookup_name */
  206. no_link_name, /* link_name */
  207. NULL, /* unlink_name */
  208. no_open_file, /* open_file */
  209. device_file_get_kernel_obj_list, /* get_kernel_obj_list */
  210. default_fd_get_fast_sync, /* get_fast_sync */
  211. device_file_close_handle, /* close_handle */
  212. device_file_destroy /* destroy */
  213. };
  214. static const struct fd_ops device_file_fd_ops =
  215. {
  216. default_fd_get_poll_events, /* get_poll_events */
  217. default_poll_event, /* poll_event */
  218. device_file_get_fd_type, /* get_fd_type */
  219. device_file_read, /* read */
  220. device_file_write, /* write */
  221. device_file_flush, /* flush */
  222. default_fd_get_file_info, /* get_file_info */
  223. device_file_get_volume_info, /* get_volume_info */
  224. device_file_ioctl, /* ioctl */
  225. device_file_cancel_async, /* cancel_async */
  226. default_fd_queue_async, /* queue_async */
  227. default_fd_reselect_async, /* reselect_async */
  228. };
  229. struct list *no_kernel_obj_list( struct object *obj )
  230. {
  231. return NULL;
  232. }
  233. struct kernel_object
  234. {
  235. struct device_manager *manager;
  236. client_ptr_t user_ptr;
  237. struct object *object;
  238. int owned;
  239. struct list list_entry;
  240. struct wine_rb_entry rb_entry;
  241. };
  242. static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
  243. {
  244. struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
  245. return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
  246. }
  247. static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
  248. {
  249. struct kernel_object *kernel_object;
  250. struct list *list;
  251. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  252. LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
  253. {
  254. if (kernel_object->manager != manager) continue;
  255. return kernel_object;
  256. }
  257. return NULL;
  258. }
  259. static client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
  260. {
  261. struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
  262. return kernel_object ? kernel_object->user_ptr : 0;
  263. }
  264. static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
  265. {
  266. struct kernel_object *kernel_object;
  267. struct list *list;
  268. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  269. if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
  270. kernel_object->manager = manager;
  271. kernel_object->user_ptr = user_ptr;
  272. kernel_object->object = obj;
  273. kernel_object->owned = 0;
  274. if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
  275. {
  276. /* kernel_object pointer already set */
  277. free( kernel_object );
  278. return NULL;
  279. }
  280. list_add_head( list, &kernel_object->list_entry );
  281. return kernel_object;
  282. }
  283. static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
  284. {
  285. struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
  286. return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
  287. }
  288. static void grab_kernel_object( struct kernel_object *ptr )
  289. {
  290. if (!ptr->owned)
  291. {
  292. grab_object( ptr->object );
  293. ptr->owned = 1;
  294. }
  295. }
  296. static void irp_call_dump( struct object *obj, int verbose )
  297. {
  298. struct irp_call *irp = (struct irp_call *)obj;
  299. fprintf( stderr, "IRP call file=%p\n", irp->file );
  300. }
  301. static void irp_call_destroy( struct object *obj )
  302. {
  303. struct irp_call *irp = (struct irp_call *)obj;
  304. if (irp->async)
  305. {
  306. async_terminate( irp->async, STATUS_CANCELLED );
  307. release_object( irp->async );
  308. }
  309. if (irp->iosb) release_object( irp->iosb );
  310. if (irp->file) release_object( irp->file );
  311. if (irp->thread) release_object( irp->thread );
  312. }
  313. static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  314. {
  315. struct irp_call *irp;
  316. if (file && !file->device->manager) /* it has been deleted */
  317. {
  318. set_error( STATUS_FILE_DELETED );
  319. return NULL;
  320. }
  321. if ((irp = alloc_object( &irp_call_ops )))
  322. {
  323. irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
  324. irp->thread = NULL;
  325. irp->async = NULL;
  326. irp->params = *params;
  327. irp->iosb = NULL;
  328. irp->canceled = 0;
  329. irp->user_ptr = 0;
  330. if (async) irp->iosb = async_get_iosb( async );
  331. }
  332. return irp;
  333. }
  334. static void set_irp_result( struct irp_call *irp, unsigned int status,
  335. const void *out_data, data_size_t out_size, data_size_t result )
  336. {
  337. struct device_file *file = irp->file;
  338. if (!file) return; /* already finished */
  339. /* remove it from the device queue */
  340. list_remove( &irp->dev_entry );
  341. irp->file = NULL;
  342. if (irp->async)
  343. {
  344. out_size = min( irp->iosb->out_size, out_size );
  345. async_request_complete_alloc( irp->async, status, result, out_size, out_data );
  346. release_object( irp->async );
  347. irp->async = NULL;
  348. }
  349. release_object( irp ); /* no longer on the device queue */
  350. release_object( file );
  351. }
  352. static void device_dump( struct object *obj, int verbose )
  353. {
  354. fputs( "Device\n", stderr );
  355. }
  356. static void device_destroy( struct object *obj )
  357. {
  358. struct device *device = (struct device *)obj;
  359. assert( list_empty( &device->files ));
  360. free( device->unix_path );
  361. if (device->manager) list_remove( &device->entry );
  362. }
  363. static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
  364. {
  365. grab_object( irp ); /* grab reference for queued irp */
  366. irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
  367. if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
  368. list_add_tail( &manager->requests, &irp->mgr_entry );
  369. if (list_head( &manager->requests ) == &irp->mgr_entry)
  370. {
  371. /* first one */
  372. wake_up( &manager->obj, 0 );
  373. fast_set_event( manager->fast_sync );
  374. }
  375. }
  376. static struct object *device_open_file( struct object *obj, unsigned int access,
  377. unsigned int sharing, unsigned int options )
  378. {
  379. struct device *device = (struct device *)obj;
  380. struct device_file *file;
  381. struct unicode_str nt_name;
  382. if (!(file = alloc_object( &device_file_ops ))) return NULL;
  383. file->device = (struct device *)grab_object( device );
  384. file->closed = 0;
  385. list_init( &file->kernel_object );
  386. list_init( &file->requests );
  387. list_add_tail( &device->files, &file->entry );
  388. if (device->unix_path)
  389. {
  390. mode_t mode = 0666;
  391. access = file->obj.ops->map_access( &file->obj, access );
  392. nt_name.str = device->obj.ops->get_full_name( &device->obj, &nt_name.len );
  393. file->fd = open_fd( NULL, device->unix_path, nt_name, O_NONBLOCK, &mode, access, sharing, options );
  394. if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
  395. }
  396. else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
  397. if (!file->fd)
  398. {
  399. release_object( file );
  400. return NULL;
  401. }
  402. allow_fd_caching( file->fd );
  403. if (device->manager)
  404. {
  405. struct irp_call *irp;
  406. irp_params_t params;
  407. memset( &params, 0, sizeof(params) );
  408. params.create.type = IRP_CALL_CREATE;
  409. params.create.access = access;
  410. params.create.sharing = sharing;
  411. params.create.options = options;
  412. params.create.device = get_kernel_object_ptr( device->manager, &device->obj );
  413. if ((irp = create_irp( file, &params, NULL )))
  414. {
  415. add_irp_to_queue( device->manager, irp, current );
  416. release_object( irp );
  417. }
  418. }
  419. return &file->obj;
  420. }
  421. static struct list *device_get_kernel_obj_list( struct object *obj )
  422. {
  423. struct device *device = (struct device *)obj;
  424. return &device->kernel_object;
  425. }
  426. static void device_file_dump( struct object *obj, int verbose )
  427. {
  428. struct device_file *file = (struct device_file *)obj;
  429. fprintf( stderr, "File on device %p\n", file->device );
  430. }
  431. static struct fd *device_file_get_fd( struct object *obj )
  432. {
  433. struct device_file *file = (struct device_file *)obj;
  434. return (struct fd *)grab_object( file->fd );
  435. }
  436. static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len )
  437. {
  438. struct device_file *file = (struct device_file *)obj;
  439. return file->device->obj.ops->get_full_name( &file->device->obj, len );
  440. }
  441. static struct list *device_file_get_kernel_obj_list( struct object *obj )
  442. {
  443. struct device_file *file = (struct device_file *)obj;
  444. return &file->kernel_object;
  445. }
  446. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  447. {
  448. struct device_file *file = (struct device_file *)obj;
  449. if (!file->closed && file->device->manager && obj->handle_count == 1) /* last handle */
  450. {
  451. struct irp_call *irp;
  452. irp_params_t params;
  453. file->closed = 1;
  454. memset( &params, 0, sizeof(params) );
  455. params.close.type = IRP_CALL_CLOSE;
  456. if ((irp = create_irp( file, &params, NULL )))
  457. {
  458. add_irp_to_queue( file->device->manager, irp, current );
  459. release_object( irp );
  460. }
  461. }
  462. return 1;
  463. }
  464. static void device_file_destroy( struct object *obj )
  465. {
  466. struct device_file *file = (struct device_file *)obj;
  467. struct irp_call *irp, *next;
  468. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  469. {
  470. list_remove( &irp->dev_entry );
  471. release_object( irp ); /* no longer on the device queue */
  472. }
  473. if (file->fd) release_object( file->fd );
  474. list_remove( &file->entry );
  475. release_object( file->device );
  476. }
  477. static int fill_irp_params( struct device_manager *manager, struct irp_call *irp, irp_params_t *params )
  478. {
  479. switch (irp->params.type)
  480. {
  481. case IRP_CALL_NONE:
  482. case IRP_CALL_FREE:
  483. case IRP_CALL_CANCEL:
  484. break;
  485. case IRP_CALL_CREATE:
  486. irp->params.create.file = alloc_handle( current->process, irp->file,
  487. irp->params.create.access, 0 );
  488. if (!irp->params.create.file) return 0;
  489. break;
  490. case IRP_CALL_CLOSE:
  491. irp->params.close.file = get_kernel_object_ptr( manager, &irp->file->obj );
  492. break;
  493. case IRP_CALL_READ:
  494. irp->params.read.file = get_kernel_object_ptr( manager, &irp->file->obj );
  495. irp->params.read.out_size = irp->iosb->out_size;
  496. break;
  497. case IRP_CALL_WRITE:
  498. irp->params.write.file = get_kernel_object_ptr( manager, &irp->file->obj );
  499. break;
  500. case IRP_CALL_FLUSH:
  501. irp->params.flush.file = get_kernel_object_ptr( manager, &irp->file->obj );
  502. break;
  503. case IRP_CALL_IOCTL:
  504. irp->params.ioctl.file = get_kernel_object_ptr( manager, &irp->file->obj );
  505. irp->params.ioctl.out_size = irp->iosb->out_size;
  506. break;
  507. case IRP_CALL_VOLUME:
  508. irp->params.volume.file = get_kernel_object_ptr( manager, &irp->file->obj );
  509. irp->params.volume.out_size = irp->iosb->out_size;
  510. break;
  511. }
  512. *params = irp->params;
  513. return 1;
  514. }
  515. static void free_irp_params( struct irp_call *irp )
  516. {
  517. switch (irp->params.type)
  518. {
  519. case IRP_CALL_CREATE:
  520. close_handle( current->process, irp->params.create.file );
  521. break;
  522. default:
  523. break;
  524. }
  525. }
  526. /* queue an irp to the device */
  527. static void queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  528. {
  529. struct irp_call *irp = create_irp( file, params, async );
  530. if (!irp) return;
  531. fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
  532. irp->async = (struct async *)grab_object( async );
  533. add_irp_to_queue( file->device->manager, irp, current );
  534. release_object( irp );
  535. async_set_unknown_status( async );
  536. }
  537. static enum server_fd_type device_file_get_fd_type( struct fd *fd )
  538. {
  539. return FD_TYPE_DEVICE;
  540. }
  541. static void device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class )
  542. {
  543. struct device_file *file = get_fd_user( fd );
  544. irp_params_t params;
  545. memset( &params, 0, sizeof(params) );
  546. params.volume.type = IRP_CALL_VOLUME;
  547. params.volume.info_class = info_class;
  548. queue_irp( file, &params, async );
  549. }
  550. static void device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
  551. {
  552. struct device_file *file = get_fd_user( fd );
  553. irp_params_t params;
  554. memset( &params, 0, sizeof(params) );
  555. params.read.type = IRP_CALL_READ;
  556. params.read.key = 0;
  557. params.read.pos = pos;
  558. queue_irp( file, &params, async );
  559. }
  560. static void device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
  561. {
  562. struct device_file *file = get_fd_user( fd );
  563. irp_params_t params;
  564. memset( &params, 0, sizeof(params) );
  565. params.write.type = IRP_CALL_WRITE;
  566. params.write.key = 0;
  567. params.write.pos = pos;
  568. queue_irp( file, &params, async );
  569. }
  570. static void device_file_flush( struct fd *fd, struct async *async )
  571. {
  572. struct device_file *file = get_fd_user( fd );
  573. irp_params_t params;
  574. memset( &params, 0, sizeof(params) );
  575. params.flush.type = IRP_CALL_FLUSH;
  576. queue_irp( file, &params, async );
  577. }
  578. static void device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
  579. {
  580. struct device_file *file = get_fd_user( fd );
  581. irp_params_t params;
  582. memset( &params, 0, sizeof(params) );
  583. params.ioctl.type = IRP_CALL_IOCTL;
  584. params.ioctl.code = code;
  585. queue_irp( file, &params, async );
  586. }
  587. static void cancel_irp_call( struct irp_call *irp )
  588. {
  589. struct irp_call *cancel_irp;
  590. irp_params_t params;
  591. irp->canceled = 1;
  592. if (!irp->user_ptr || !irp->file || !irp->file->device->manager) return;
  593. memset( &params, 0, sizeof(params) );
  594. params.cancel.type = IRP_CALL_CANCEL;
  595. params.cancel.irp = irp->user_ptr;
  596. if ((cancel_irp = create_irp( NULL, &params, NULL )))
  597. {
  598. add_irp_to_queue( irp->file->device->manager, cancel_irp, NULL );
  599. release_object( cancel_irp );
  600. }
  601. }
  602. static void device_file_cancel_async( struct fd *fd, struct async *async )
  603. {
  604. struct device_file *file = get_fd_user( fd );
  605. struct irp_call *irp;
  606. LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
  607. {
  608. if (irp->async == async)
  609. {
  610. cancel_irp_call( irp );
  611. return;
  612. }
  613. }
  614. }
  615. static struct device *create_device( struct object *root, const struct unicode_str *name,
  616. struct device_manager *manager )
  617. {
  618. struct device *device;
  619. if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
  620. {
  621. device->unix_path = NULL;
  622. device->manager = manager;
  623. grab_object( device );
  624. list_add_tail( &manager->devices, &device->entry );
  625. list_init( &device->kernel_object );
  626. list_init( &device->files );
  627. }
  628. return device;
  629. }
  630. struct object *create_unix_device( struct object *root, const struct unicode_str *name,
  631. unsigned int attr, const struct security_descriptor *sd,
  632. const char *unix_path )
  633. {
  634. struct device *device;
  635. if ((device = create_named_object( root, &device_ops, name, attr, sd )))
  636. {
  637. device->unix_path = strdup( unix_path );
  638. device->manager = NULL; /* no manager, requests go straight to the Unix device */
  639. list_init( &device->kernel_object );
  640. list_init( &device->files );
  641. }
  642. return &device->obj;
  643. }
  644. /* terminate requests when the underlying device is deleted */
  645. static void delete_file( struct device_file *file )
  646. {
  647. struct irp_call *irp, *next;
  648. /* the pending requests may be the only thing holding a reference to the file */
  649. grab_object( file );
  650. /* terminate all pending requests */
  651. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  652. {
  653. list_remove( &irp->mgr_entry );
  654. set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
  655. }
  656. if (list_empty( &file->device->manager->requests ))
  657. fast_reset_event( file->device->manager->fast_sync );
  658. release_object( file );
  659. }
  660. static void delete_device( struct device *device )
  661. {
  662. struct device_file *file, *next;
  663. if (!device->manager) return; /* already deleted */
  664. LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
  665. delete_file( file );
  666. unlink_named_object( &device->obj );
  667. list_remove( &device->entry );
  668. device->manager = NULL;
  669. release_object( device );
  670. }
  671. static void device_manager_dump( struct object *obj, int verbose )
  672. {
  673. fprintf( stderr, "Device manager\n" );
  674. }
  675. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
  676. {
  677. struct device_manager *manager = (struct device_manager *)obj;
  678. return !list_empty( &manager->requests );
  679. }
  680. static struct fast_sync *device_manager_get_fast_sync( struct object *obj )
  681. {
  682. struct device_manager *manager = (struct device_manager *)obj;
  683. if (!manager->fast_sync)
  684. manager->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !list_empty( &manager->requests ) );
  685. if (manager->fast_sync) grab_object( manager->fast_sync );
  686. return manager->fast_sync;
  687. }
  688. static void device_manager_destroy( struct object *obj )
  689. {
  690. struct device_manager *manager = (struct device_manager *)obj;
  691. struct kernel_object *kernel_object;
  692. struct list *ptr;
  693. if (manager->current_call)
  694. {
  695. release_object( manager->current_call );
  696. manager->current_call = NULL;
  697. }
  698. while (manager->kernel_objects.root)
  699. {
  700. kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
  701. wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
  702. list_remove( &kernel_object->list_entry );
  703. if (kernel_object->owned) release_object( kernel_object->object );
  704. free( kernel_object );
  705. }
  706. while ((ptr = list_head( &manager->devices )))
  707. {
  708. struct device *device = LIST_ENTRY( ptr, struct device, entry );
  709. delete_device( device );
  710. }
  711. while ((ptr = list_head( &manager->requests )))
  712. {
  713. struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  714. list_remove( &irp->mgr_entry );
  715. assert( !irp->file && !irp->async );
  716. release_object( irp );
  717. }
  718. if (manager->fast_sync) release_object( manager->fast_sync );
  719. }
  720. static struct device_manager *create_device_manager(void)
  721. {
  722. struct device_manager *manager;
  723. if ((manager = alloc_object( &device_manager_ops )))
  724. {
  725. manager->current_call = NULL;
  726. manager->fast_sync = NULL;
  727. list_init( &manager->devices );
  728. list_init( &manager->requests );
  729. wine_rb_init( &manager->kernel_objects, compare_kernel_object );
  730. }
  731. return manager;
  732. }
  733. void free_kernel_objects( struct object *obj )
  734. {
  735. struct list *ptr, *list;
  736. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
  737. while ((ptr = list_head( list )))
  738. {
  739. struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
  740. struct irp_call *irp;
  741. irp_params_t params;
  742. assert( !kernel_object->owned );
  743. memset( &params, 0, sizeof(params) );
  744. params.free.type = IRP_CALL_FREE;
  745. params.free.obj = kernel_object->user_ptr;
  746. if ((irp = create_irp( NULL, &params, NULL )))
  747. {
  748. add_irp_to_queue( kernel_object->manager, irp, NULL );
  749. release_object( irp );
  750. }
  751. list_remove( &kernel_object->list_entry );
  752. wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
  753. free( kernel_object );
  754. }
  755. }
  756. /* create a device manager */
  757. DECL_HANDLER(create_device_manager)
  758. {
  759. struct device_manager *manager = create_device_manager();
  760. if (manager)
  761. {
  762. reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
  763. release_object( manager );
  764. }
  765. }
  766. /* create a device */
  767. DECL_HANDLER(create_device)
  768. {
  769. struct device *device;
  770. struct unicode_str name = get_req_unicode_str();
  771. struct device_manager *manager;
  772. struct object *root = NULL;
  773. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  774. 0, &device_manager_ops )))
  775. return;
  776. if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
  777. {
  778. release_object( manager );
  779. return;
  780. }
  781. if ((device = create_device( root, &name, manager )))
  782. {
  783. struct kernel_object *ptr = set_kernel_object( manager, &device->obj, req->user_ptr );
  784. if (ptr)
  785. grab_kernel_object( ptr );
  786. else
  787. set_error( STATUS_NO_MEMORY );
  788. release_object( device );
  789. }
  790. if (root) release_object( root );
  791. release_object( manager );
  792. }
  793. /* delete a device */
  794. DECL_HANDLER(delete_device)
  795. {
  796. struct device_manager *manager;
  797. struct kernel_object *ref;
  798. struct device *device;
  799. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  800. 0, &device_manager_ops )))
  801. return;
  802. if ((ref = kernel_object_from_ptr( manager, req->device )) && ref->object->ops == &device_ops)
  803. {
  804. device = (struct device *)grab_object( ref->object );
  805. delete_device( device );
  806. release_object( device );
  807. }
  808. else set_error( STATUS_INVALID_HANDLE );
  809. release_object( manager );
  810. }
  811. /* retrieve the next pending device irp request */
  812. DECL_HANDLER(get_next_device_request)
  813. {
  814. struct irp_call *irp;
  815. struct device_manager *manager;
  816. struct list *ptr;
  817. struct iosb *iosb;
  818. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  819. 0, &device_manager_ops )))
  820. return;
  821. /* process result of previous call */
  822. if (manager->current_call)
  823. {
  824. irp = manager->current_call;
  825. irp->user_ptr = req->user_ptr;
  826. if (irp->async)
  827. {
  828. if (req->pending)
  829. set_async_pending( irp->async );
  830. async_set_initial_status( irp->async, req->status );
  831. if (req->prev)
  832. {
  833. set_irp_result( irp, req->iosb_status, get_req_data(), get_req_data_size(), req->result );
  834. }
  835. else
  836. {
  837. async_wake_obj( irp->async );
  838. if (irp->canceled)
  839. {
  840. /* if it was canceled during dispatch, we couldn't queue cancel
  841. * call without client pointer, so we need to do it now */
  842. cancel_irp_call( irp );
  843. }
  844. }
  845. }
  846. else
  847. {
  848. set_irp_result( irp, req->status, NULL, 0, 0 );
  849. }
  850. if (req->prev)
  851. close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
  852. free_irp_params( irp );
  853. release_object( irp );
  854. manager->current_call = NULL;
  855. }
  856. clear_error();
  857. if ((ptr = list_head( &manager->requests )))
  858. {
  859. struct thread *thread;
  860. irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  861. thread = irp->thread ? irp->thread : current;
  862. reply->client_thread = get_kernel_object_ptr( manager, &thread->obj );
  863. reply->client_tid = get_thread_id( thread );
  864. iosb = irp->iosb;
  865. if (iosb)
  866. reply->in_size = iosb->in_size;
  867. if (iosb && iosb->in_size > get_reply_max_size())
  868. set_error( STATUS_BUFFER_OVERFLOW );
  869. else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
  870. {
  871. if (fill_irp_params( manager, irp, &reply->params ))
  872. {
  873. if (iosb)
  874. {
  875. set_reply_data_ptr( iosb->in_data, iosb->in_size );
  876. iosb->in_data = NULL;
  877. iosb->in_size = 0;
  878. }
  879. list_remove( &irp->mgr_entry );
  880. list_init( &irp->mgr_entry );
  881. if (list_empty( &manager->requests ))
  882. fast_reset_event( manager->fast_sync );
  883. /* we already own the object if it's only on manager queue */
  884. if (irp->file) grab_object( irp );
  885. manager->current_call = irp;
  886. }
  887. else close_handle( current->process, reply->next );
  888. }
  889. }
  890. else set_error( STATUS_PENDING );
  891. release_object( manager );
  892. }
  893. /* store results of an async irp */
  894. DECL_HANDLER(set_irp_result)
  895. {
  896. struct irp_call *irp;
  897. if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
  898. {
  899. set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
  900. close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
  901. release_object( irp );
  902. }
  903. }
  904. /* get kernel pointer from server object */
  905. DECL_HANDLER(get_kernel_object_ptr)
  906. {
  907. struct device_manager *manager;
  908. struct object *object = NULL;
  909. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  910. 0, &device_manager_ops )))
  911. return;
  912. if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
  913. {
  914. reply->user_ptr = get_kernel_object_ptr( manager, object );
  915. release_object( object );
  916. }
  917. release_object( manager );
  918. }
  919. /* associate kernel pointer with server object */
  920. DECL_HANDLER(set_kernel_object_ptr)
  921. {
  922. struct device_manager *manager;
  923. struct object *object = NULL;
  924. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  925. 0, &device_manager_ops )))
  926. return;
  927. if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
  928. {
  929. release_object( manager );
  930. return;
  931. }
  932. if (!set_kernel_object( manager, object, req->user_ptr ))
  933. set_error( STATUS_INVALID_HANDLE );
  934. release_object( object );
  935. release_object( manager );
  936. }
  937. /* grab server object reference from kernel object pointer */
  938. DECL_HANDLER(grab_kernel_object)
  939. {
  940. struct device_manager *manager;
  941. struct kernel_object *ref;
  942. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  943. 0, &device_manager_ops )))
  944. return;
  945. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
  946. grab_kernel_object( ref );
  947. else
  948. set_error( STATUS_INVALID_HANDLE );
  949. release_object( manager );
  950. }
  951. /* release server object reference from kernel object pointer */
  952. DECL_HANDLER(release_kernel_object)
  953. {
  954. struct device_manager *manager;
  955. struct kernel_object *ref;
  956. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  957. 0, &device_manager_ops )))
  958. return;
  959. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
  960. {
  961. ref->owned = 0;
  962. release_object( ref->object );
  963. }
  964. else set_error( STATUS_INVALID_HANDLE );
  965. release_object( manager );
  966. }
  967. /* get handle from kernel object pointer */
  968. DECL_HANDLER(get_kernel_object_handle)
  969. {
  970. struct device_manager *manager;
  971. struct kernel_object *ref;
  972. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  973. 0, &device_manager_ops )))
  974. return;
  975. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
  976. reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
  977. else
  978. set_error( STATUS_INVALID_HANDLE );
  979. release_object( manager );
  980. }