device.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150
  1. /*
  2. * Server-side device support
  3. *
  4. * Copyright (C) 2007 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include "wine/port.h"
  22. #include "wine/rbtree.h"
  23. #include <assert.h>
  24. #include <fcntl.h>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <stdarg.h>
  28. #include "ntstatus.h"
  29. #define WIN32_NO_STATUS
  30. #include "windef.h"
  31. #include "winternl.h"
  32. #include "ddk/wdm.h"
  33. #include "object.h"
  34. #include "file.h"
  35. #include "handle.h"
  36. #include "request.h"
  37. #include "process.h"
  38. /* IRP object */
  39. struct irp_call
  40. {
  41. struct object obj; /* object header */
  42. struct list dev_entry; /* entry in device queue */
  43. struct list mgr_entry; /* entry in manager queue */
  44. struct device_file *file; /* file containing this irp */
  45. struct thread *thread; /* thread that queued the irp */
  46. struct async *async; /* pending async op */
  47. irp_params_t params; /* irp parameters */
  48. struct iosb *iosb; /* I/O status block */
  49. int canceled; /* the call was canceled */
  50. client_ptr_t user_ptr; /* client side pointer */
  51. };
  52. static void irp_call_dump( struct object *obj, int verbose );
  53. static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
  54. static void irp_call_destroy( struct object *obj );
  55. static const struct object_ops irp_call_ops =
  56. {
  57. sizeof(struct irp_call), /* size */
  58. &no_type, /* type */
  59. irp_call_dump, /* dump */
  60. add_queue, /* add_queue */
  61. remove_queue, /* remove_queue */
  62. irp_call_signaled, /* signaled */
  63. no_satisfied, /* satisfied */
  64. no_signal, /* signal */
  65. no_get_fd, /* get_fd */
  66. default_map_access, /* map_access */
  67. default_get_sd, /* get_sd */
  68. default_set_sd, /* set_sd */
  69. no_get_full_name, /* get_full_name */
  70. no_lookup_name, /* lookup_name */
  71. no_link_name, /* link_name */
  72. NULL, /* unlink_name */
  73. no_open_file, /* open_file */
  74. no_kernel_obj_list, /* get_kernel_obj_list */
  75. no_close_handle, /* close_handle */
  76. irp_call_destroy /* destroy */
  77. };
  78. /* device manager (a list of devices managed by the same client process) */
  79. struct device_manager
  80. {
  81. struct object obj; /* object header */
  82. struct list devices; /* list of devices */
  83. struct list requests; /* list of pending irps across all devices */
  84. struct irp_call *current_call; /* call currently executed on client side */
  85. struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
  86. };
  87. static void device_manager_dump( struct object *obj, int verbose );
  88. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
  89. static void device_manager_destroy( struct object *obj );
  90. static const struct object_ops device_manager_ops =
  91. {
  92. sizeof(struct device_manager), /* size */
  93. &no_type, /* type */
  94. device_manager_dump, /* dump */
  95. add_queue, /* add_queue */
  96. remove_queue, /* remove_queue */
  97. device_manager_signaled, /* signaled */
  98. no_satisfied, /* satisfied */
  99. no_signal, /* signal */
  100. no_get_fd, /* get_fd */
  101. default_map_access, /* map_access */
  102. default_get_sd, /* get_sd */
  103. default_set_sd, /* set_sd */
  104. no_get_full_name, /* get_full_name */
  105. no_lookup_name, /* lookup_name */
  106. no_link_name, /* link_name */
  107. NULL, /* unlink_name */
  108. no_open_file, /* open_file */
  109. no_kernel_obj_list, /* get_kernel_obj_list */
  110. no_close_handle, /* close_handle */
  111. device_manager_destroy /* destroy */
  112. };
  113. /* device (a single device object) */
  114. static const WCHAR device_name[] = {'D','e','v','i','c','e'};
  115. struct type_descr device_type =
  116. {
  117. { device_name, sizeof(device_name) }, /* name */
  118. FILE_ALL_ACCESS, /* valid_access */
  119. { /* mapping */
  120. FILE_GENERIC_READ,
  121. FILE_GENERIC_WRITE,
  122. FILE_GENERIC_EXECUTE,
  123. FILE_ALL_ACCESS
  124. },
  125. };
  126. struct device
  127. {
  128. struct object obj; /* object header */
  129. struct device_manager *manager; /* manager for this device (or NULL if deleted) */
  130. char *unix_path; /* path to unix device if any */
  131. struct list kernel_object; /* list of kernel object pointers */
  132. struct list entry; /* entry in device manager list */
  133. struct list files; /* list of open files */
  134. };
  135. static void device_dump( struct object *obj, int verbose );
  136. static void device_destroy( struct object *obj );
  137. static struct object *device_open_file( struct object *obj, unsigned int access,
  138. unsigned int sharing, unsigned int options );
  139. static struct list *device_get_kernel_obj_list( struct object *obj );
  140. static const struct object_ops device_ops =
  141. {
  142. sizeof(struct device), /* size */
  143. &device_type, /* type */
  144. device_dump, /* dump */
  145. no_add_queue, /* add_queue */
  146. NULL, /* remove_queue */
  147. NULL, /* signaled */
  148. no_satisfied, /* satisfied */
  149. no_signal, /* signal */
  150. no_get_fd, /* get_fd */
  151. default_map_access, /* map_access */
  152. default_get_sd, /* get_sd */
  153. default_set_sd, /* set_sd */
  154. default_get_full_name, /* get_full_name */
  155. no_lookup_name, /* lookup_name */
  156. directory_link_name, /* link_name */
  157. default_unlink_name, /* unlink_name */
  158. device_open_file, /* open_file */
  159. device_get_kernel_obj_list, /* get_kernel_obj_list */
  160. no_close_handle, /* close_handle */
  161. device_destroy /* destroy */
  162. };
  163. /* device file (an open file handle to a device) */
  164. struct device_file
  165. {
  166. struct object obj; /* object header */
  167. struct device *device; /* device for this file */
  168. struct fd *fd; /* file descriptor for irp */
  169. struct list kernel_object; /* list of kernel object pointers */
  170. int closed; /* closed file flag */
  171. struct list entry; /* entry in device list */
  172. struct list requests; /* list of pending irp requests */
  173. };
  174. static void device_file_dump( struct object *obj, int verbose );
  175. static struct fd *device_file_get_fd( struct object *obj );
  176. static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len );
  177. static struct list *device_file_get_kernel_obj_list( struct object *obj );
  178. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  179. static void device_file_destroy( struct object *obj );
  180. static enum server_fd_type device_file_get_fd_type( struct fd *fd );
  181. static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
  182. static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
  183. static int device_file_flush( struct fd *fd, struct async *async );
  184. static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
  185. static void device_file_reselect_async( struct fd *fd, struct async_queue *queue );
  186. static int device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class );
  187. static const struct object_ops device_file_ops =
  188. {
  189. sizeof(struct device_file), /* size */
  190. &file_type, /* type */
  191. device_file_dump, /* dump */
  192. add_queue, /* add_queue */
  193. remove_queue, /* remove_queue */
  194. default_fd_signaled, /* signaled */
  195. no_satisfied, /* satisfied */
  196. no_signal, /* signal */
  197. device_file_get_fd, /* get_fd */
  198. default_map_access, /* map_access */
  199. default_get_sd, /* get_sd */
  200. default_set_sd, /* set_sd */
  201. device_file_get_full_name, /* get_full_name */
  202. no_lookup_name, /* lookup_name */
  203. no_link_name, /* link_name */
  204. NULL, /* unlink_name */
  205. no_open_file, /* open_file */
  206. device_file_get_kernel_obj_list, /* get_kernel_obj_list */
  207. device_file_close_handle, /* close_handle */
  208. device_file_destroy /* destroy */
  209. };
  210. static const struct fd_ops device_file_fd_ops =
  211. {
  212. default_fd_get_poll_events, /* get_poll_events */
  213. default_poll_event, /* poll_event */
  214. device_file_get_fd_type, /* get_fd_type */
  215. device_file_read, /* read */
  216. device_file_write, /* write */
  217. device_file_flush, /* flush */
  218. default_fd_get_file_info, /* get_file_info */
  219. device_file_get_volume_info, /* get_volume_info */
  220. device_file_ioctl, /* ioctl */
  221. default_fd_queue_async, /* queue_async */
  222. device_file_reselect_async /* reselect_async */
  223. };
  224. struct list *no_kernel_obj_list( struct object *obj )
  225. {
  226. return NULL;
  227. }
  228. struct kernel_object
  229. {
  230. struct device_manager *manager;
  231. client_ptr_t user_ptr;
  232. struct object *object;
  233. int owned;
  234. struct list list_entry;
  235. struct wine_rb_entry rb_entry;
  236. };
  237. static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
  238. {
  239. struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
  240. return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
  241. }
  242. static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
  243. {
  244. struct kernel_object *kernel_object;
  245. struct list *list;
  246. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  247. LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
  248. {
  249. if (kernel_object->manager != manager) continue;
  250. return kernel_object;
  251. }
  252. return NULL;
  253. }
  254. static client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
  255. {
  256. struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
  257. return kernel_object ? kernel_object->user_ptr : 0;
  258. }
  259. static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
  260. {
  261. struct kernel_object *kernel_object;
  262. struct list *list;
  263. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  264. if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
  265. kernel_object->manager = manager;
  266. kernel_object->user_ptr = user_ptr;
  267. kernel_object->object = obj;
  268. kernel_object->owned = 0;
  269. if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
  270. {
  271. /* kernel_object pointer already set */
  272. free( kernel_object );
  273. return NULL;
  274. }
  275. list_add_head( list, &kernel_object->list_entry );
  276. return kernel_object;
  277. }
  278. static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
  279. {
  280. struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
  281. return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
  282. }
  283. static void grab_kernel_object( struct kernel_object *ptr )
  284. {
  285. if (!ptr->owned)
  286. {
  287. grab_object( ptr->object );
  288. ptr->owned = 1;
  289. }
  290. }
  291. static void irp_call_dump( struct object *obj, int verbose )
  292. {
  293. struct irp_call *irp = (struct irp_call *)obj;
  294. fprintf( stderr, "IRP call file=%p\n", irp->file );
  295. }
  296. static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
  297. {
  298. struct irp_call *irp = (struct irp_call *)obj;
  299. return !irp->file; /* file is cleared once the irp has completed */
  300. }
  301. static void irp_call_destroy( struct object *obj )
  302. {
  303. struct irp_call *irp = (struct irp_call *)obj;
  304. if (irp->async)
  305. {
  306. async_terminate( irp->async, STATUS_CANCELLED );
  307. release_object( irp->async );
  308. }
  309. if (irp->iosb) release_object( irp->iosb );
  310. if (irp->file) release_object( irp->file );
  311. if (irp->thread) release_object( irp->thread );
  312. }
  313. static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  314. {
  315. struct irp_call *irp;
  316. if (file && !file->device->manager) /* it has been deleted */
  317. {
  318. set_error( STATUS_FILE_DELETED );
  319. return NULL;
  320. }
  321. if ((irp = alloc_object( &irp_call_ops )))
  322. {
  323. irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
  324. irp->thread = NULL;
  325. irp->async = NULL;
  326. irp->params = *params;
  327. irp->iosb = NULL;
  328. irp->canceled = 0;
  329. irp->user_ptr = 0;
  330. if (async) irp->iosb = async_get_iosb( async );
  331. if (!irp->iosb && !(irp->iosb = create_iosb( NULL, 0, 0 )))
  332. {
  333. release_object( irp );
  334. irp = NULL;
  335. }
  336. }
  337. return irp;
  338. }
  339. static void set_irp_result( struct irp_call *irp, unsigned int status,
  340. const void *out_data, data_size_t out_size, data_size_t result )
  341. {
  342. struct device_file *file = irp->file;
  343. struct iosb *iosb = irp->iosb;
  344. if (!file) return; /* already finished */
  345. /* FIXME: handle the STATUS_PENDING case */
  346. iosb->status = status;
  347. iosb->result = result;
  348. iosb->out_size = min( iosb->out_size, out_size );
  349. if (iosb->out_size && !(iosb->out_data = memdup( out_data, iosb->out_size )))
  350. iosb->out_size = 0;
  351. /* remove it from the device queue */
  352. list_remove( &irp->dev_entry );
  353. irp->file = NULL;
  354. if (irp->async)
  355. {
  356. if (result) status = STATUS_ALERTED;
  357. async_terminate( irp->async, status );
  358. release_object( irp->async );
  359. irp->async = NULL;
  360. }
  361. wake_up( &irp->obj, 0 );
  362. release_object( irp ); /* no longer on the device queue */
  363. release_object( file );
  364. }
  365. static void device_dump( struct object *obj, int verbose )
  366. {
  367. fputs( "Device\n", stderr );
  368. }
  369. static void device_destroy( struct object *obj )
  370. {
  371. struct device *device = (struct device *)obj;
  372. assert( list_empty( &device->files ));
  373. free( device->unix_path );
  374. if (device->manager) list_remove( &device->entry );
  375. }
  376. static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
  377. {
  378. grab_object( irp ); /* grab reference for queued irp */
  379. irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
  380. if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
  381. list_add_tail( &manager->requests, &irp->mgr_entry );
  382. if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
  383. }
  384. static struct object *device_open_file( struct object *obj, unsigned int access,
  385. unsigned int sharing, unsigned int options )
  386. {
  387. struct device *device = (struct device *)obj;
  388. struct device_file *file;
  389. struct unicode_str nt_name;
  390. if (!(file = alloc_object( &device_file_ops ))) return NULL;
  391. file->device = (struct device *)grab_object( device );
  392. file->closed = 0;
  393. list_init( &file->kernel_object );
  394. list_init( &file->requests );
  395. list_add_tail( &device->files, &file->entry );
  396. if (device->unix_path)
  397. {
  398. mode_t mode = 0666;
  399. access = file->obj.ops->map_access( &file->obj, access );
  400. nt_name.str = device->obj.ops->get_full_name( &device->obj, &nt_name.len );
  401. file->fd = open_fd( NULL, device->unix_path, nt_name, O_NONBLOCK | O_LARGEFILE,
  402. &mode, access, sharing, options );
  403. if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
  404. }
  405. else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
  406. if (!file->fd)
  407. {
  408. release_object( file );
  409. return NULL;
  410. }
  411. allow_fd_caching( file->fd );
  412. if (device->manager)
  413. {
  414. struct irp_call *irp;
  415. irp_params_t params;
  416. memset( &params, 0, sizeof(params) );
  417. params.create.type = IRP_CALL_CREATE;
  418. params.create.access = access;
  419. params.create.sharing = sharing;
  420. params.create.options = options;
  421. params.create.device = get_kernel_object_ptr( device->manager, &device->obj );
  422. if ((irp = create_irp( file, &params, NULL )))
  423. {
  424. add_irp_to_queue( device->manager, irp, current );
  425. release_object( irp );
  426. }
  427. }
  428. return &file->obj;
  429. }
  430. static struct list *device_get_kernel_obj_list( struct object *obj )
  431. {
  432. struct device *device = (struct device *)obj;
  433. return &device->kernel_object;
  434. }
  435. static void device_file_dump( struct object *obj, int verbose )
  436. {
  437. struct device_file *file = (struct device_file *)obj;
  438. fprintf( stderr, "File on device %p\n", file->device );
  439. }
  440. static struct fd *device_file_get_fd( struct object *obj )
  441. {
  442. struct device_file *file = (struct device_file *)obj;
  443. return (struct fd *)grab_object( file->fd );
  444. }
  445. static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len )
  446. {
  447. struct device_file *file = (struct device_file *)obj;
  448. return file->device->obj.ops->get_full_name( &file->device->obj, len );
  449. }
  450. static struct list *device_file_get_kernel_obj_list( struct object *obj )
  451. {
  452. struct device_file *file = (struct device_file *)obj;
  453. return &file->kernel_object;
  454. }
  455. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  456. {
  457. struct device_file *file = (struct device_file *)obj;
  458. if (!file->closed && file->device->manager && obj->handle_count == 1) /* last handle */
  459. {
  460. struct irp_call *irp;
  461. irp_params_t params;
  462. file->closed = 1;
  463. memset( &params, 0, sizeof(params) );
  464. params.close.type = IRP_CALL_CLOSE;
  465. if ((irp = create_irp( file, &params, NULL )))
  466. {
  467. add_irp_to_queue( file->device->manager, irp, current );
  468. release_object( irp );
  469. }
  470. }
  471. return 1;
  472. }
  473. static void device_file_destroy( struct object *obj )
  474. {
  475. struct device_file *file = (struct device_file *)obj;
  476. struct irp_call *irp, *next;
  477. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  478. {
  479. list_remove( &irp->dev_entry );
  480. release_object( irp ); /* no longer on the device queue */
  481. }
  482. if (file->fd) release_object( file->fd );
  483. list_remove( &file->entry );
  484. release_object( file->device );
  485. }
  486. static int fill_irp_params( struct device_manager *manager, struct irp_call *irp, irp_params_t *params )
  487. {
  488. switch (irp->params.type)
  489. {
  490. case IRP_CALL_NONE:
  491. case IRP_CALL_FREE:
  492. case IRP_CALL_CANCEL:
  493. break;
  494. case IRP_CALL_CREATE:
  495. irp->params.create.file = alloc_handle( current->process, irp->file,
  496. irp->params.create.access, 0 );
  497. if (!irp->params.create.file) return 0;
  498. break;
  499. case IRP_CALL_CLOSE:
  500. irp->params.close.file = get_kernel_object_ptr( manager, &irp->file->obj );
  501. break;
  502. case IRP_CALL_READ:
  503. irp->params.read.file = get_kernel_object_ptr( manager, &irp->file->obj );
  504. irp->params.read.out_size = irp->iosb->out_size;
  505. break;
  506. case IRP_CALL_WRITE:
  507. irp->params.write.file = get_kernel_object_ptr( manager, &irp->file->obj );
  508. break;
  509. case IRP_CALL_FLUSH:
  510. irp->params.flush.file = get_kernel_object_ptr( manager, &irp->file->obj );
  511. break;
  512. case IRP_CALL_IOCTL:
  513. irp->params.ioctl.file = get_kernel_object_ptr( manager, &irp->file->obj );
  514. irp->params.ioctl.out_size = irp->iosb->out_size;
  515. break;
  516. case IRP_CALL_VOLUME:
  517. irp->params.volume.file = get_kernel_object_ptr( manager, &irp->file->obj );
  518. irp->params.volume.out_size = irp->iosb->out_size;
  519. break;
  520. }
  521. *params = irp->params;
  522. return 1;
  523. }
  524. static void free_irp_params( struct irp_call *irp )
  525. {
  526. switch (irp->params.type)
  527. {
  528. case IRP_CALL_CREATE:
  529. close_handle( current->process, irp->params.create.file );
  530. break;
  531. default:
  532. break;
  533. }
  534. }
  535. /* queue an irp to the device */
  536. static int queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  537. {
  538. struct irp_call *irp = create_irp( file, params, async );
  539. if (!irp) return 0;
  540. fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
  541. irp->async = (struct async *)grab_object( async );
  542. add_irp_to_queue( file->device->manager, irp, current );
  543. release_object( irp );
  544. set_error( STATUS_PENDING );
  545. return 0;
  546. }
  547. static enum server_fd_type device_file_get_fd_type( struct fd *fd )
  548. {
  549. return FD_TYPE_DEVICE;
  550. }
  551. static int device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class )
  552. {
  553. struct device_file *file = get_fd_user( fd );
  554. irp_params_t params;
  555. memset( &params, 0, sizeof(params) );
  556. params.volume.type = IRP_CALL_VOLUME;
  557. params.volume.info_class = info_class;
  558. return queue_irp( file, &params, async );
  559. }
  560. static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
  561. {
  562. struct device_file *file = get_fd_user( fd );
  563. irp_params_t params;
  564. memset( &params, 0, sizeof(params) );
  565. params.read.type = IRP_CALL_READ;
  566. params.read.key = 0;
  567. params.read.pos = pos;
  568. return queue_irp( file, &params, async );
  569. }
  570. static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
  571. {
  572. struct device_file *file = get_fd_user( fd );
  573. irp_params_t params;
  574. memset( &params, 0, sizeof(params) );
  575. params.write.type = IRP_CALL_WRITE;
  576. params.write.key = 0;
  577. params.write.pos = pos;
  578. return queue_irp( file, &params, async );
  579. }
  580. static int device_file_flush( struct fd *fd, struct async *async )
  581. {
  582. struct device_file *file = get_fd_user( fd );
  583. irp_params_t params;
  584. memset( &params, 0, sizeof(params) );
  585. params.flush.type = IRP_CALL_FLUSH;
  586. return queue_irp( file, &params, async );
  587. }
  588. static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
  589. {
  590. struct device_file *file = get_fd_user( fd );
  591. irp_params_t params;
  592. memset( &params, 0, sizeof(params) );
  593. params.ioctl.type = IRP_CALL_IOCTL;
  594. params.ioctl.code = code;
  595. return queue_irp( file, &params, async );
  596. }
  597. static void cancel_irp_call( struct irp_call *irp )
  598. {
  599. struct irp_call *cancel_irp;
  600. irp_params_t params;
  601. irp->canceled = 1;
  602. if (!irp->user_ptr || !irp->file || !irp->file->device->manager) return;
  603. memset( &params, 0, sizeof(params) );
  604. params.cancel.type = IRP_CALL_CANCEL;
  605. params.cancel.irp = irp->user_ptr;
  606. if ((cancel_irp = create_irp( NULL, &params, NULL )))
  607. {
  608. add_irp_to_queue( irp->file->device->manager, cancel_irp, NULL );
  609. release_object( cancel_irp );
  610. }
  611. set_irp_result( irp, STATUS_CANCELLED, NULL, 0, 0 );
  612. }
  613. static void device_file_reselect_async( struct fd *fd, struct async_queue *queue )
  614. {
  615. struct device_file *file = get_fd_user( fd );
  616. struct irp_call *irp;
  617. LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
  618. if (irp->iosb->status != STATUS_PENDING)
  619. {
  620. cancel_irp_call( irp );
  621. return;
  622. }
  623. }
  624. static struct device *create_device( struct object *root, const struct unicode_str *name,
  625. struct device_manager *manager )
  626. {
  627. struct device *device;
  628. if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
  629. {
  630. device->unix_path = NULL;
  631. device->manager = manager;
  632. grab_object( device );
  633. list_add_tail( &manager->devices, &device->entry );
  634. list_init( &device->kernel_object );
  635. list_init( &device->files );
  636. }
  637. return device;
  638. }
  639. struct object *create_unix_device( struct object *root, const struct unicode_str *name,
  640. unsigned int attr, const struct security_descriptor *sd,
  641. const char *unix_path )
  642. {
  643. struct device *device;
  644. if ((device = create_named_object( root, &device_ops, name, attr, sd )))
  645. {
  646. device->unix_path = strdup( unix_path );
  647. device->manager = NULL; /* no manager, requests go straight to the Unix device */
  648. list_init( &device->kernel_object );
  649. list_init( &device->files );
  650. }
  651. return &device->obj;
  652. }
  653. /* terminate requests when the underlying device is deleted */
  654. static void delete_file( struct device_file *file )
  655. {
  656. struct irp_call *irp, *next;
  657. /* the pending requests may be the only thing holding a reference to the file */
  658. grab_object( file );
  659. /* terminate all pending requests */
  660. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  661. {
  662. list_remove( &irp->mgr_entry );
  663. set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
  664. }
  665. release_object( file );
  666. }
  667. static void delete_device( struct device *device )
  668. {
  669. struct device_file *file, *next;
  670. if (!device->manager) return; /* already deleted */
  671. LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
  672. delete_file( file );
  673. unlink_named_object( &device->obj );
  674. list_remove( &device->entry );
  675. device->manager = NULL;
  676. release_object( device );
  677. }
  678. static void device_manager_dump( struct object *obj, int verbose )
  679. {
  680. fprintf( stderr, "Device manager\n" );
  681. }
  682. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
  683. {
  684. struct device_manager *manager = (struct device_manager *)obj;
  685. return !list_empty( &manager->requests );
  686. }
  687. static void device_manager_destroy( struct object *obj )
  688. {
  689. struct device_manager *manager = (struct device_manager *)obj;
  690. struct kernel_object *kernel_object;
  691. struct list *ptr;
  692. if (manager->current_call)
  693. {
  694. release_object( manager->current_call );
  695. manager->current_call = NULL;
  696. }
  697. while (manager->kernel_objects.root)
  698. {
  699. kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
  700. wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
  701. list_remove( &kernel_object->list_entry );
  702. if (kernel_object->owned) release_object( kernel_object->object );
  703. free( kernel_object );
  704. }
  705. while ((ptr = list_head( &manager->devices )))
  706. {
  707. struct device *device = LIST_ENTRY( ptr, struct device, entry );
  708. delete_device( device );
  709. }
  710. while ((ptr = list_head( &manager->requests )))
  711. {
  712. struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  713. list_remove( &irp->mgr_entry );
  714. assert( !irp->file && !irp->async );
  715. release_object( irp );
  716. }
  717. }
  718. static struct device_manager *create_device_manager(void)
  719. {
  720. struct device_manager *manager;
  721. if ((manager = alloc_object( &device_manager_ops )))
  722. {
  723. manager->current_call = NULL;
  724. list_init( &manager->devices );
  725. list_init( &manager->requests );
  726. wine_rb_init( &manager->kernel_objects, compare_kernel_object );
  727. }
  728. return manager;
  729. }
  730. void free_kernel_objects( struct object *obj )
  731. {
  732. struct list *ptr, *list;
  733. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
  734. while ((ptr = list_head( list )))
  735. {
  736. struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
  737. struct irp_call *irp;
  738. irp_params_t params;
  739. assert( !kernel_object->owned );
  740. memset( &params, 0, sizeof(params) );
  741. params.free.type = IRP_CALL_FREE;
  742. params.free.obj = kernel_object->user_ptr;
  743. if ((irp = create_irp( NULL, &params, NULL )))
  744. {
  745. add_irp_to_queue( kernel_object->manager, irp, NULL );
  746. release_object( irp );
  747. }
  748. list_remove( &kernel_object->list_entry );
  749. wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
  750. free( kernel_object );
  751. }
  752. }
  753. /* create a device manager */
  754. DECL_HANDLER(create_device_manager)
  755. {
  756. struct device_manager *manager = create_device_manager();
  757. if (manager)
  758. {
  759. reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
  760. release_object( manager );
  761. }
  762. }
  763. /* create a device */
  764. DECL_HANDLER(create_device)
  765. {
  766. struct device *device;
  767. struct unicode_str name = get_req_unicode_str();
  768. struct device_manager *manager;
  769. struct object *root = NULL;
  770. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  771. 0, &device_manager_ops )))
  772. return;
  773. if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
  774. {
  775. release_object( manager );
  776. return;
  777. }
  778. if ((device = create_device( root, &name, manager )))
  779. {
  780. struct kernel_object *ptr = set_kernel_object( manager, &device->obj, req->user_ptr );
  781. if (ptr)
  782. grab_kernel_object( ptr );
  783. else
  784. set_error( STATUS_NO_MEMORY );
  785. release_object( device );
  786. }
  787. if (root) release_object( root );
  788. release_object( manager );
  789. }
  790. /* delete a device */
  791. DECL_HANDLER(delete_device)
  792. {
  793. struct device_manager *manager;
  794. struct kernel_object *ref;
  795. struct device *device;
  796. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  797. 0, &device_manager_ops )))
  798. return;
  799. if ((ref = kernel_object_from_ptr( manager, req->device )) && ref->object->ops == &device_ops)
  800. {
  801. device = (struct device *)grab_object( ref->object );
  802. delete_device( device );
  803. release_object( device );
  804. }
  805. else set_error( STATUS_INVALID_HANDLE );
  806. release_object( manager );
  807. }
  808. /* retrieve the next pending device irp request */
  809. DECL_HANDLER(get_next_device_request)
  810. {
  811. struct irp_call *irp;
  812. struct device_manager *manager;
  813. struct list *ptr;
  814. struct iosb *iosb;
  815. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  816. 0, &device_manager_ops )))
  817. return;
  818. if (req->prev) close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
  819. /* process result of previous call */
  820. if (manager->current_call)
  821. {
  822. irp = manager->current_call;
  823. irp->user_ptr = req->user_ptr;
  824. if (req->status)
  825. set_irp_result( irp, req->status, NULL, 0, 0 );
  826. if (irp->canceled)
  827. /* if it was canceled during dispatch, we couldn't queue cancel call without client pointer,
  828. * so we need to do it now */
  829. cancel_irp_call( irp );
  830. else if (irp->async)
  831. set_async_pending( irp->async, irp->file && is_fd_overlapped( irp->file->fd ) );
  832. free_irp_params( irp );
  833. release_object( irp );
  834. manager->current_call = NULL;
  835. }
  836. clear_error();
  837. if ((ptr = list_head( &manager->requests )))
  838. {
  839. struct thread *thread;
  840. irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  841. thread = irp->thread ? irp->thread : current;
  842. reply->client_thread = get_kernel_object_ptr( manager, &thread->obj );
  843. reply->client_tid = get_thread_id( thread );
  844. iosb = irp->iosb;
  845. reply->in_size = iosb->in_size;
  846. if (iosb->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
  847. else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
  848. {
  849. if (fill_irp_params( manager, irp, &reply->params ))
  850. {
  851. set_reply_data_ptr( iosb->in_data, iosb->in_size );
  852. iosb->in_data = NULL;
  853. iosb->in_size = 0;
  854. list_remove( &irp->mgr_entry );
  855. list_init( &irp->mgr_entry );
  856. /* we already own the object if it's only on manager queue */
  857. if (irp->file) grab_object( irp );
  858. manager->current_call = irp;
  859. }
  860. else close_handle( current->process, reply->next );
  861. }
  862. }
  863. else set_error( STATUS_PENDING );
  864. release_object( manager );
  865. }
  866. /* store results of an async irp */
  867. DECL_HANDLER(set_irp_result)
  868. {
  869. struct irp_call *irp;
  870. if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
  871. {
  872. if (!irp->canceled)
  873. set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
  874. else if(irp->user_ptr) /* cancel already queued */
  875. set_error( STATUS_MORE_PROCESSING_REQUIRED );
  876. else /* we may be still dispatching the IRP. don't bother queuing cancel if it's already complete */
  877. irp->canceled = 0;
  878. close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
  879. release_object( irp );
  880. }
  881. }
  882. /* get kernel pointer from server object */
  883. DECL_HANDLER(get_kernel_object_ptr)
  884. {
  885. struct device_manager *manager;
  886. struct object *object = NULL;
  887. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  888. 0, &device_manager_ops )))
  889. return;
  890. if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
  891. {
  892. reply->user_ptr = get_kernel_object_ptr( manager, object );
  893. release_object( object );
  894. }
  895. release_object( manager );
  896. }
  897. /* associate kernel pointer with server object */
  898. DECL_HANDLER(set_kernel_object_ptr)
  899. {
  900. struct device_manager *manager;
  901. struct object *object = NULL;
  902. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  903. 0, &device_manager_ops )))
  904. return;
  905. if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
  906. {
  907. release_object( manager );
  908. return;
  909. }
  910. if (!set_kernel_object( manager, object, req->user_ptr ))
  911. set_error( STATUS_INVALID_HANDLE );
  912. release_object( object );
  913. release_object( manager );
  914. }
  915. /* grab server object reference from kernel object pointer */
  916. DECL_HANDLER(grab_kernel_object)
  917. {
  918. struct device_manager *manager;
  919. struct kernel_object *ref;
  920. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  921. 0, &device_manager_ops )))
  922. return;
  923. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
  924. grab_kernel_object( ref );
  925. else
  926. set_error( STATUS_INVALID_HANDLE );
  927. release_object( manager );
  928. }
  929. /* release server object reference from kernel object pointer */
  930. DECL_HANDLER(release_kernel_object)
  931. {
  932. struct device_manager *manager;
  933. struct kernel_object *ref;
  934. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  935. 0, &device_manager_ops )))
  936. return;
  937. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
  938. {
  939. ref->owned = 0;
  940. release_object( ref->object );
  941. }
  942. else set_error( STATUS_INVALID_HANDLE );
  943. release_object( manager );
  944. }
  945. /* get handle from kernel object pointer */
  946. DECL_HANDLER(get_kernel_object_handle)
  947. {
  948. struct device_manager *manager;
  949. struct kernel_object *ref;
  950. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  951. 0, &device_manager_ops )))
  952. return;
  953. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
  954. reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
  955. else
  956. set_error( STATUS_INVALID_HANDLE );
  957. release_object( manager );
  958. }