mapping.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326
  1. /*
  2. * Server-side file mapping management
  3. *
  4. * Copyright (C) 1999 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include <assert.h>
  22. #include <fcntl.h>
  23. #include <stdarg.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <sys/types.h>
  27. #include <sys/stat.h>
  28. #include <sys/mman.h>
  29. #include <unistd.h>
  30. #include "ntstatus.h"
  31. #define WIN32_NO_STATUS
  32. #include "windef.h"
  33. #include "winternl.h"
  34. #include "ddk/wdm.h"
  35. #include "file.h"
  36. #include "handle.h"
  37. #include "thread.h"
  38. #include "process.h"
  39. #include "request.h"
  40. #include "security.h"
  41. /* list of memory ranges, used to store committed info */
  42. struct ranges
  43. {
  44. struct object obj; /* object header */
  45. unsigned int count; /* number of used ranges */
  46. unsigned int max; /* number of allocated ranges */
  47. struct range
  48. {
  49. file_pos_t start;
  50. file_pos_t end;
  51. } *ranges;
  52. };
  53. static void ranges_dump( struct object *obj, int verbose );
  54. static void ranges_destroy( struct object *obj );
  55. static const struct object_ops ranges_ops =
  56. {
  57. sizeof(struct ranges), /* size */
  58. &no_type, /* type */
  59. ranges_dump, /* dump */
  60. no_add_queue, /* add_queue */
  61. NULL, /* remove_queue */
  62. NULL, /* signaled */
  63. NULL, /* satisfied */
  64. no_signal, /* signal */
  65. no_get_fd, /* get_fd */
  66. default_map_access, /* map_access */
  67. default_get_sd, /* get_sd */
  68. default_set_sd, /* set_sd */
  69. no_get_full_name, /* get_full_name */
  70. no_lookup_name, /* lookup_name */
  71. no_link_name, /* link_name */
  72. NULL, /* unlink_name */
  73. no_open_file, /* open_file */
  74. no_kernel_obj_list, /* get_kernel_obj_list */
  75. no_get_fast_sync, /* get_fast_sync */
  76. no_close_handle, /* close_handle */
  77. ranges_destroy /* destroy */
  78. };
  79. /* file backing the shared sections of a PE image mapping */
  80. struct shared_map
  81. {
  82. struct object obj; /* object header */
  83. struct fd *fd; /* file descriptor of the mapped PE file */
  84. struct file *file; /* temp file holding the shared data */
  85. struct list entry; /* entry in global shared maps list */
  86. };
  87. static void shared_map_dump( struct object *obj, int verbose );
  88. static void shared_map_destroy( struct object *obj );
  89. static const struct object_ops shared_map_ops =
  90. {
  91. sizeof(struct shared_map), /* size */
  92. &no_type, /* type */
  93. shared_map_dump, /* dump */
  94. no_add_queue, /* add_queue */
  95. NULL, /* remove_queue */
  96. NULL, /* signaled */
  97. NULL, /* satisfied */
  98. no_signal, /* signal */
  99. no_get_fd, /* get_fd */
  100. default_map_access, /* map_access */
  101. default_get_sd, /* get_sd */
  102. default_set_sd, /* set_sd */
  103. no_get_full_name, /* get_full_name */
  104. no_lookup_name, /* lookup_name */
  105. no_link_name, /* link_name */
  106. NULL, /* unlink_name */
  107. no_open_file, /* open_file */
  108. no_kernel_obj_list, /* get_kernel_obj_list */
  109. no_get_fast_sync, /* get_fast_sync */
  110. no_close_handle, /* close_handle */
  111. shared_map_destroy /* destroy */
  112. };
  113. static struct list shared_map_list = LIST_INIT( shared_map_list );
  114. /* memory view mapped in client address space */
  115. struct memory_view
  116. {
  117. struct list entry; /* entry in per-process view list */
  118. struct fd *fd; /* fd for mapped file */
  119. struct ranges *committed; /* list of committed ranges in this mapping */
  120. struct shared_map *shared; /* temp file for shared PE mapping */
  121. pe_image_info_t image; /* image info (for PE image mapping) */
  122. unsigned int flags; /* SEC_* flags */
  123. client_ptr_t base; /* view base address (in process addr space) */
  124. mem_size_t size; /* view size */
  125. file_pos_t start; /* start offset in mapping */
  126. data_size_t namelen;
  127. WCHAR name[1]; /* filename for .so dll image views */
  128. };
  129. static const WCHAR mapping_name[] = {'S','e','c','t','i','o','n'};
  130. struct type_descr mapping_type =
  131. {
  132. { mapping_name, sizeof(mapping_name) }, /* name */
  133. SECTION_ALL_ACCESS | SYNCHRONIZE, /* valid_access */
  134. { /* mapping */
  135. STANDARD_RIGHTS_READ | SECTION_QUERY | SECTION_MAP_READ,
  136. STANDARD_RIGHTS_WRITE | SECTION_MAP_WRITE,
  137. STANDARD_RIGHTS_EXECUTE | SECTION_MAP_EXECUTE,
  138. SECTION_ALL_ACCESS
  139. },
  140. };
  141. struct mapping
  142. {
  143. struct object obj; /* object header */
  144. mem_size_t size; /* mapping size */
  145. unsigned int flags; /* SEC_* flags */
  146. struct fd *fd; /* fd for mapped file */
  147. pe_image_info_t image; /* image info (for PE image mapping) */
  148. struct ranges *committed; /* list of committed ranges in this mapping */
  149. struct shared_map *shared; /* temp file for shared PE mapping */
  150. };
  151. static void mapping_dump( struct object *obj, int verbose );
  152. static struct fd *mapping_get_fd( struct object *obj );
  153. static void mapping_destroy( struct object *obj );
  154. static enum server_fd_type mapping_get_fd_type( struct fd *fd );
  155. static const struct object_ops mapping_ops =
  156. {
  157. sizeof(struct mapping), /* size */
  158. &mapping_type, /* type */
  159. mapping_dump, /* dump */
  160. no_add_queue, /* add_queue */
  161. NULL, /* remove_queue */
  162. NULL, /* signaled */
  163. NULL, /* satisfied */
  164. no_signal, /* signal */
  165. mapping_get_fd, /* get_fd */
  166. default_map_access, /* map_access */
  167. default_get_sd, /* get_sd */
  168. default_set_sd, /* set_sd */
  169. default_get_full_name, /* get_full_name */
  170. no_lookup_name, /* lookup_name */
  171. directory_link_name, /* link_name */
  172. default_unlink_name, /* unlink_name */
  173. no_open_file, /* open_file */
  174. no_kernel_obj_list, /* get_kernel_obj_list */
  175. no_get_fast_sync, /* get_fast_sync */
  176. no_close_handle, /* close_handle */
  177. mapping_destroy /* destroy */
  178. };
  179. static const struct fd_ops mapping_fd_ops =
  180. {
  181. default_fd_get_poll_events, /* get_poll_events */
  182. default_poll_event, /* poll_event */
  183. mapping_get_fd_type, /* get_fd_type */
  184. no_fd_read, /* read */
  185. no_fd_write, /* write */
  186. no_fd_flush, /* flush */
  187. no_fd_get_file_info, /* get_file_info */
  188. no_fd_get_volume_info, /* get_volume_info */
  189. no_fd_ioctl, /* ioctl */
  190. default_fd_cancel_async, /* cancel_async */
  191. no_fd_queue_async, /* queue_async */
  192. default_fd_reselect_async /* reselect_async */
  193. };
  194. static size_t page_mask;
  195. #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
  196. static void ranges_dump( struct object *obj, int verbose )
  197. {
  198. struct ranges *ranges = (struct ranges *)obj;
  199. fprintf( stderr, "Memory ranges count=%u\n", ranges->count );
  200. }
  201. static void ranges_destroy( struct object *obj )
  202. {
  203. struct ranges *ranges = (struct ranges *)obj;
  204. free( ranges->ranges );
  205. }
  206. static void shared_map_dump( struct object *obj, int verbose )
  207. {
  208. struct shared_map *shared = (struct shared_map *)obj;
  209. fprintf( stderr, "Shared mapping fd=%p file=%p\n", shared->fd, shared->file );
  210. }
  211. static void shared_map_destroy( struct object *obj )
  212. {
  213. struct shared_map *shared = (struct shared_map *)obj;
  214. release_object( shared->fd );
  215. release_object( shared->file );
  216. list_remove( &shared->entry );
  217. }
  218. /* extend a file beyond the current end of file */
  219. int grow_file( int unix_fd, file_pos_t new_size )
  220. {
  221. static const char zero;
  222. off_t size = new_size;
  223. if (sizeof(new_size) > sizeof(size) && size != new_size)
  224. {
  225. set_error( STATUS_INVALID_PARAMETER );
  226. return 0;
  227. }
  228. /* extend the file one byte beyond the requested size and then truncate it */
  229. /* this should work around ftruncate implementations that can't extend files */
  230. if (pwrite( unix_fd, &zero, 1, size ) != -1)
  231. {
  232. ftruncate( unix_fd, size );
  233. return 1;
  234. }
  235. file_set_error();
  236. return 0;
  237. }
  238. /* simplified version of mkstemps() */
  239. static int make_temp_file( char name[16] )
  240. {
  241. static unsigned int value;
  242. int i, fd = -1;
  243. value += (current_time >> 16) + current_time;
  244. for (i = 0; i < 0x8000 && fd < 0; i++, value += 7777)
  245. {
  246. sprintf( name, "tmpmap-%08x", value );
  247. fd = open( name, O_RDWR | O_CREAT | O_EXCL, 0600 );
  248. }
  249. return fd;
  250. }
  251. /* check if the current directory allows exec mappings */
  252. static int check_current_dir_for_exec(void)
  253. {
  254. int fd;
  255. char tmpfn[16];
  256. void *ret = MAP_FAILED;
  257. fd = make_temp_file( tmpfn );
  258. if (fd == -1) return 0;
  259. if (grow_file( fd, 1 ))
  260. {
  261. ret = mmap( NULL, get_page_size(), PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0 );
  262. if (ret != MAP_FAILED) munmap( ret, get_page_size() );
  263. }
  264. close( fd );
  265. unlink( tmpfn );
  266. return (ret != MAP_FAILED);
  267. }
  268. /* create a temp file for anonymous mappings */
  269. static int create_temp_file( file_pos_t size )
  270. {
  271. static int temp_dir_fd = -1;
  272. char tmpfn[16];
  273. int fd;
  274. if (temp_dir_fd == -1)
  275. {
  276. temp_dir_fd = server_dir_fd;
  277. if (!check_current_dir_for_exec())
  278. {
  279. /* the server dir is noexec, try the config dir instead */
  280. fchdir( config_dir_fd );
  281. if (check_current_dir_for_exec())
  282. temp_dir_fd = config_dir_fd;
  283. else /* neither works, fall back to server dir */
  284. fchdir( server_dir_fd );
  285. }
  286. }
  287. else if (temp_dir_fd != server_dir_fd) fchdir( temp_dir_fd );
  288. fd = make_temp_file( tmpfn );
  289. if (fd != -1)
  290. {
  291. if (!grow_file( fd, size ))
  292. {
  293. close( fd );
  294. fd = -1;
  295. }
  296. unlink( tmpfn );
  297. }
  298. else file_set_error();
  299. if (temp_dir_fd != server_dir_fd) fchdir( server_dir_fd );
  300. return fd;
  301. }
  302. /* find a memory view from its base address */
  303. struct memory_view *find_mapped_view( struct process *process, client_ptr_t base )
  304. {
  305. struct memory_view *view;
  306. LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
  307. if (view->base == base) return view;
  308. set_error( STATUS_NOT_MAPPED_VIEW );
  309. return NULL;
  310. }
  311. /* find a memory view from any address inside it */
  312. static struct memory_view *find_mapped_addr( struct process *process, client_ptr_t addr )
  313. {
  314. struct memory_view *view;
  315. LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
  316. if (addr >= view->base && addr < view->base + view->size) return view;
  317. set_error( STATUS_NOT_MAPPED_VIEW );
  318. return NULL;
  319. }
  320. /* get the main exe memory view */
  321. struct memory_view *get_exe_view( struct process *process )
  322. {
  323. return LIST_ENTRY( list_head( &process->views ), struct memory_view, entry );
  324. }
  325. static void set_process_machine( struct process *process, struct memory_view *view )
  326. {
  327. unsigned short machine = view->image.machine;
  328. if (machine == IMAGE_FILE_MACHINE_I386 && (view->image.image_flags & IMAGE_FLAGS_ComPlusNativeReady))
  329. {
  330. if (is_machine_supported( IMAGE_FILE_MACHINE_AMD64 )) machine = IMAGE_FILE_MACHINE_AMD64;
  331. else if (is_machine_supported( IMAGE_FILE_MACHINE_ARM64 )) machine = IMAGE_FILE_MACHINE_ARM64;
  332. }
  333. process->machine = machine;
  334. }
  335. static int generate_dll_event( struct thread *thread, int code, struct memory_view *view )
  336. {
  337. unsigned short process_machine = thread->process->machine;
  338. if (!(view->flags & SEC_IMAGE)) return 0;
  339. if (process_machine != native_machine && process_machine != view->image.machine) return 0;
  340. generate_debug_event( thread, code, view );
  341. return 1;
  342. }
  343. /* add a view to the process list */
  344. static void add_process_view( struct thread *thread, struct memory_view *view )
  345. {
  346. struct process *process = thread->process;
  347. struct unicode_str name;
  348. if (view->flags & SEC_IMAGE)
  349. {
  350. if (is_process_init_done( process ))
  351. {
  352. generate_dll_event( thread, DbgLoadDllStateChange, view );
  353. }
  354. else if (!(view->image.image_charact & IMAGE_FILE_DLL))
  355. {
  356. /* main exe */
  357. set_process_machine( process, view );
  358. list_add_head( &process->views, &view->entry );
  359. free( process->image );
  360. process->image = NULL;
  361. if (get_view_nt_name( view, &name ) && (process->image = memdup( name.str, name.len )))
  362. process->imagelen = name.len;
  363. process->image_info = view->image;
  364. return;
  365. }
  366. }
  367. list_add_tail( &process->views, &view->entry );
  368. }
  369. static void free_memory_view( struct memory_view *view )
  370. {
  371. if (view->fd) release_object( view->fd );
  372. if (view->committed) release_object( view->committed );
  373. if (view->shared) release_object( view->shared );
  374. list_remove( &view->entry );
  375. free( view );
  376. }
  377. /* free all mapped views at process exit */
  378. void free_mapped_views( struct process *process )
  379. {
  380. struct list *ptr;
  381. while ((ptr = list_head( &process->views )))
  382. free_memory_view( LIST_ENTRY( ptr, struct memory_view, entry ));
  383. }
  384. /* find the shared PE mapping for a given mapping */
  385. static struct shared_map *get_shared_file( struct fd *fd )
  386. {
  387. struct shared_map *ptr;
  388. LIST_FOR_EACH_ENTRY( ptr, &shared_map_list, struct shared_map, entry )
  389. if (is_same_file_fd( ptr->fd, fd ))
  390. return (struct shared_map *)grab_object( ptr );
  391. return NULL;
  392. }
  393. /* return the size of the memory mapping and file range of a given section */
  394. static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *map_size,
  395. off_t *file_start, size_t *file_size )
  396. {
  397. static const unsigned int sector_align = 0x1ff;
  398. if (!sec->Misc.VirtualSize) *map_size = ROUND_SIZE( sec->SizeOfRawData );
  399. else *map_size = ROUND_SIZE( sec->Misc.VirtualSize );
  400. *file_start = sec->PointerToRawData & ~sector_align;
  401. *file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
  402. if (*file_size > *map_size) *file_size = *map_size;
  403. }
  404. /* add a range to the committed list */
  405. static void add_committed_range( struct memory_view *view, file_pos_t start, file_pos_t end )
  406. {
  407. unsigned int i, j;
  408. struct ranges *committed = view->committed;
  409. struct range *ranges;
  410. if ((start & page_mask) || (end & page_mask) ||
  411. start >= view->size || end >= view->size ||
  412. start >= end)
  413. {
  414. set_error( STATUS_INVALID_PARAMETER );
  415. return;
  416. }
  417. if (!committed) return; /* everything committed already */
  418. start += view->start;
  419. end += view->start;
  420. for (i = 0, ranges = committed->ranges; i < committed->count; i++)
  421. {
  422. if (ranges[i].start > end) break;
  423. if (ranges[i].end < start) continue;
  424. if (ranges[i].start > start) ranges[i].start = start; /* extend downwards */
  425. if (ranges[i].end < end) /* extend upwards and maybe merge with next */
  426. {
  427. for (j = i + 1; j < committed->count; j++)
  428. {
  429. if (ranges[j].start > end) break;
  430. if (ranges[j].end > end) end = ranges[j].end;
  431. }
  432. if (j > i + 1)
  433. {
  434. memmove( &ranges[i + 1], &ranges[j], (committed->count - j) * sizeof(*ranges) );
  435. committed->count -= j - (i + 1);
  436. }
  437. ranges[i].end = end;
  438. }
  439. return;
  440. }
  441. /* now add a new range */
  442. if (committed->count == committed->max)
  443. {
  444. unsigned int new_size = committed->max * 2;
  445. struct range *new_ptr = realloc( committed->ranges, new_size * sizeof(*new_ptr) );
  446. if (!new_ptr) return;
  447. committed->max = new_size;
  448. ranges = committed->ranges = new_ptr;
  449. }
  450. memmove( &ranges[i + 1], &ranges[i], (committed->count - i) * sizeof(*ranges) );
  451. ranges[i].start = start;
  452. ranges[i].end = end;
  453. committed->count++;
  454. }
  455. /* find the range containing start and return whether it's committed */
  456. static int find_committed_range( struct memory_view *view, file_pos_t start, mem_size_t *size )
  457. {
  458. unsigned int i;
  459. struct ranges *committed = view->committed;
  460. struct range *ranges;
  461. if ((start & page_mask) || start >= view->size)
  462. {
  463. set_error( STATUS_INVALID_PARAMETER );
  464. return 0;
  465. }
  466. if (!committed) /* everything is committed */
  467. {
  468. *size = view->size - start;
  469. return 1;
  470. }
  471. for (i = 0, ranges = committed->ranges; i < committed->count; i++)
  472. {
  473. if (ranges[i].start > view->start + start)
  474. {
  475. *size = min( ranges[i].start, view->start + view->size ) - (view->start + start);
  476. return 0;
  477. }
  478. if (ranges[i].end > view->start + start)
  479. {
  480. *size = min( ranges[i].end, view->start + view->size ) - (view->start + start);
  481. return 1;
  482. }
  483. }
  484. *size = view->size - start;
  485. return 0;
  486. }
  487. /* allocate and fill the temp file for a shared PE image mapping */
  488. static int build_shared_mapping( struct mapping *mapping, int fd,
  489. IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
  490. {
  491. struct shared_map *shared;
  492. struct file *file;
  493. unsigned int i;
  494. mem_size_t total_size;
  495. size_t file_size, map_size, max_size;
  496. off_t shared_pos, read_pos, write_pos;
  497. char *buffer = NULL;
  498. int shared_fd;
  499. long toread;
  500. /* compute the total size of the shared mapping */
  501. total_size = max_size = 0;
  502. for (i = 0; i < nb_sec; i++)
  503. {
  504. if ((sec[i].Characteristics & IMAGE_SCN_MEM_SHARED) &&
  505. (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE))
  506. {
  507. get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
  508. if (file_size > max_size) max_size = file_size;
  509. total_size += map_size;
  510. }
  511. }
  512. if (!total_size) return 1; /* nothing to do */
  513. if ((mapping->shared = get_shared_file( mapping->fd ))) return 1;
  514. /* create a temp file for the mapping */
  515. if ((shared_fd = create_temp_file( total_size )) == -1) return 0;
  516. if (!(file = create_file_for_fd( shared_fd, FILE_GENERIC_READ|FILE_GENERIC_WRITE, 0 ))) return 0;
  517. if (!(buffer = malloc( max_size ))) goto error;
  518. /* copy the shared sections data into the temp file */
  519. shared_pos = 0;
  520. for (i = 0; i < nb_sec; i++)
  521. {
  522. if (!(sec[i].Characteristics & IMAGE_SCN_MEM_SHARED)) continue;
  523. if (!(sec[i].Characteristics & IMAGE_SCN_MEM_WRITE)) continue;
  524. get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
  525. write_pos = shared_pos;
  526. shared_pos += map_size;
  527. if (!sec[i].PointerToRawData || !file_size) continue;
  528. toread = file_size;
  529. while (toread)
  530. {
  531. long res = pread( fd, buffer + file_size - toread, toread, read_pos );
  532. if (!res && toread < 0x200) /* partial sector at EOF is not an error */
  533. {
  534. file_size -= toread;
  535. break;
  536. }
  537. if (res <= 0) goto error;
  538. toread -= res;
  539. read_pos += res;
  540. }
  541. if (pwrite( shared_fd, buffer, file_size, write_pos ) != file_size) goto error;
  542. }
  543. if (!(shared = alloc_object( &shared_map_ops ))) goto error;
  544. shared->fd = (struct fd *)grab_object( mapping->fd );
  545. shared->file = file;
  546. list_add_head( &shared_map_list, &shared->entry );
  547. mapping->shared = shared;
  548. free( buffer );
  549. return 1;
  550. error:
  551. release_object( file );
  552. free( buffer );
  553. return 0;
  554. }
  555. /* load the CLR header from its section */
  556. static int load_clr_header( IMAGE_COR20_HEADER *hdr, size_t va, size_t size, int unix_fd,
  557. IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
  558. {
  559. ssize_t ret;
  560. size_t map_size, file_size;
  561. off_t file_start;
  562. unsigned int i;
  563. if (!va || !size) return 0;
  564. for (i = 0; i < nb_sec; i++)
  565. {
  566. if (va < sec[i].VirtualAddress) continue;
  567. if (sec[i].Misc.VirtualSize && va - sec[i].VirtualAddress >= sec[i].Misc.VirtualSize) continue;
  568. get_section_sizes( &sec[i], &map_size, &file_start, &file_size );
  569. if (size >= map_size) continue;
  570. if (va - sec[i].VirtualAddress >= map_size - size) continue;
  571. file_size = min( file_size, map_size );
  572. size = min( size, sizeof(*hdr) );
  573. ret = pread( unix_fd, hdr, min( size, file_size ), file_start + va - sec[i].VirtualAddress );
  574. if (ret <= 0) break;
  575. if (ret < sizeof(*hdr)) memset( (char *)hdr + ret, 0, sizeof(*hdr) - ret );
  576. return (hdr->MajorRuntimeVersion > COR_VERSION_MAJOR_V2 ||
  577. (hdr->MajorRuntimeVersion == COR_VERSION_MAJOR_V2 &&
  578. hdr->MinorRuntimeVersion >= COR_VERSION_MINOR));
  579. }
  580. return 0;
  581. }
  582. /* retrieve the mapping parameters for an executable (PE) image */
  583. static unsigned int get_image_params( struct mapping *mapping, file_pos_t file_size, int unix_fd )
  584. {
  585. static const char builtin_signature[] = "Wine builtin DLL";
  586. static const char fakedll_signature[] = "Wine placeholder DLL";
  587. IMAGE_COR20_HEADER clr;
  588. IMAGE_SECTION_HEADER sec[96];
  589. struct
  590. {
  591. IMAGE_DOS_HEADER dos;
  592. char buffer[32];
  593. } mz;
  594. struct
  595. {
  596. DWORD Signature;
  597. IMAGE_FILE_HEADER FileHeader;
  598. union
  599. {
  600. IMAGE_OPTIONAL_HEADER32 hdr32;
  601. IMAGE_OPTIONAL_HEADER64 hdr64;
  602. } opt;
  603. } nt;
  604. off_t pos;
  605. int size, opt_size;
  606. size_t mz_size, clr_va, clr_size;
  607. unsigned int i;
  608. /* load the headers */
  609. if (!file_size) return STATUS_INVALID_FILE_FOR_SECTION;
  610. size = pread( unix_fd, &mz, sizeof(mz), 0 );
  611. if (size < sizeof(mz.dos)) return STATUS_INVALID_IMAGE_NOT_MZ;
  612. if (mz.dos.e_magic != IMAGE_DOS_SIGNATURE) return STATUS_INVALID_IMAGE_NOT_MZ;
  613. mz_size = size;
  614. pos = mz.dos.e_lfanew;
  615. size = pread( unix_fd, &nt, sizeof(nt), pos );
  616. if (size < sizeof(nt.Signature) + sizeof(nt.FileHeader)) return STATUS_INVALID_IMAGE_PROTECT;
  617. /* zero out Optional header in the case it's not present or partial */
  618. opt_size = max( nt.FileHeader.SizeOfOptionalHeader, offsetof( IMAGE_OPTIONAL_HEADER32, CheckSum ));
  619. size = min( size, sizeof(nt.Signature) + sizeof(nt.FileHeader) + opt_size );
  620. if (size < sizeof(nt)) memset( (char *)&nt + size, 0, sizeof(nt) - size );
  621. if (nt.Signature != IMAGE_NT_SIGNATURE)
  622. {
  623. IMAGE_OS2_HEADER *os2 = (IMAGE_OS2_HEADER *)&nt;
  624. if (os2->ne_magic != IMAGE_OS2_SIGNATURE) return STATUS_INVALID_IMAGE_PROTECT;
  625. if (os2->ne_exetyp == 2) return STATUS_INVALID_IMAGE_WIN_16;
  626. if (os2->ne_exetyp == 5) return STATUS_INVALID_IMAGE_PROTECT;
  627. return STATUS_INVALID_IMAGE_NE_FORMAT;
  628. }
  629. switch (nt.opt.hdr32.Magic)
  630. {
  631. case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
  632. if (!is_machine_32bit( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
  633. if (!is_machine_supported( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
  634. clr_va = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
  635. clr_size = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
  636. mapping->image.base = nt.opt.hdr32.ImageBase;
  637. mapping->image.entry_point = nt.opt.hdr32.AddressOfEntryPoint;
  638. mapping->image.map_size = ROUND_SIZE( nt.opt.hdr32.SizeOfImage );
  639. mapping->image.stack_size = nt.opt.hdr32.SizeOfStackReserve;
  640. mapping->image.stack_commit = nt.opt.hdr32.SizeOfStackCommit;
  641. mapping->image.subsystem = nt.opt.hdr32.Subsystem;
  642. mapping->image.subsystem_minor = nt.opt.hdr32.MinorSubsystemVersion;
  643. mapping->image.subsystem_major = nt.opt.hdr32.MajorSubsystemVersion;
  644. mapping->image.osversion_minor = nt.opt.hdr32.MinorOperatingSystemVersion;
  645. mapping->image.osversion_major = nt.opt.hdr32.MajorOperatingSystemVersion;
  646. mapping->image.dll_charact = nt.opt.hdr32.DllCharacteristics;
  647. mapping->image.contains_code = (nt.opt.hdr32.SizeOfCode ||
  648. nt.opt.hdr32.AddressOfEntryPoint ||
  649. nt.opt.hdr32.SectionAlignment & page_mask);
  650. mapping->image.header_size = nt.opt.hdr32.SizeOfHeaders;
  651. mapping->image.checksum = nt.opt.hdr32.CheckSum;
  652. mapping->image.image_flags = 0;
  653. if (nt.opt.hdr32.SectionAlignment & page_mask)
  654. mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
  655. if ((nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
  656. mapping->image.contains_code && !(clr_va && clr_size))
  657. mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
  658. break;
  659. case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
  660. if (!is_machine_64bit( native_machine )) return STATUS_INVALID_IMAGE_WIN_64;
  661. if (!is_machine_64bit( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
  662. if (!is_machine_supported( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
  663. clr_va = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
  664. clr_size = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
  665. mapping->image.base = nt.opt.hdr64.ImageBase;
  666. mapping->image.entry_point = nt.opt.hdr64.AddressOfEntryPoint;
  667. mapping->image.map_size = ROUND_SIZE( nt.opt.hdr64.SizeOfImage );
  668. mapping->image.stack_size = nt.opt.hdr64.SizeOfStackReserve;
  669. mapping->image.stack_commit = nt.opt.hdr64.SizeOfStackCommit;
  670. mapping->image.subsystem = nt.opt.hdr64.Subsystem;
  671. mapping->image.subsystem_minor = nt.opt.hdr64.MinorSubsystemVersion;
  672. mapping->image.subsystem_major = nt.opt.hdr64.MajorSubsystemVersion;
  673. mapping->image.osversion_minor = nt.opt.hdr64.MinorOperatingSystemVersion;
  674. mapping->image.osversion_major = nt.opt.hdr64.MajorOperatingSystemVersion;
  675. mapping->image.dll_charact = nt.opt.hdr64.DllCharacteristics;
  676. mapping->image.contains_code = (nt.opt.hdr64.SizeOfCode ||
  677. nt.opt.hdr64.AddressOfEntryPoint ||
  678. nt.opt.hdr64.SectionAlignment & page_mask);
  679. mapping->image.header_size = nt.opt.hdr64.SizeOfHeaders;
  680. mapping->image.checksum = nt.opt.hdr64.CheckSum;
  681. mapping->image.image_flags = 0;
  682. if (nt.opt.hdr64.SectionAlignment & page_mask)
  683. mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
  684. if ((nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
  685. mapping->image.contains_code && !(clr_va && clr_size))
  686. mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
  687. break;
  688. default:
  689. return STATUS_INVALID_IMAGE_FORMAT;
  690. }
  691. mapping->image.image_charact = nt.FileHeader.Characteristics;
  692. mapping->image.machine = nt.FileHeader.Machine;
  693. mapping->image.dbg_offset = nt.FileHeader.PointerToSymbolTable;
  694. mapping->image.dbg_size = nt.FileHeader.NumberOfSymbols;
  695. mapping->image.zerobits = 0; /* FIXME */
  696. mapping->image.file_size = file_size;
  697. mapping->image.loader_flags = clr_va && clr_size;
  698. if (mz_size == sizeof(mz) && !memcmp( mz.buffer, builtin_signature, sizeof(builtin_signature) ))
  699. mapping->image.image_flags |= IMAGE_FLAGS_WineBuiltin;
  700. else if (mz_size == sizeof(mz) && !memcmp( mz.buffer, fakedll_signature, sizeof(fakedll_signature) ))
  701. mapping->image.image_flags |= IMAGE_FLAGS_WineFakeDll;
  702. /* load the section headers */
  703. pos += sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader;
  704. if (nt.FileHeader.NumberOfSections > ARRAY_SIZE( sec )) return STATUS_INVALID_IMAGE_FORMAT;
  705. size = sizeof(*sec) * nt.FileHeader.NumberOfSections;
  706. if (!mapping->size) mapping->size = mapping->image.map_size;
  707. else if (mapping->size > mapping->image.map_size) return STATUS_SECTION_TOO_BIG;
  708. if (pos + size > mapping->image.map_size) return STATUS_INVALID_FILE_FOR_SECTION;
  709. if (pos + size > mapping->image.header_size) mapping->image.header_size = pos + size;
  710. if (pread( unix_fd, sec, size, pos ) != size) return STATUS_INVALID_FILE_FOR_SECTION;
  711. for (i = 0; i < nt.FileHeader.NumberOfSections && !mapping->image.contains_code; i++)
  712. if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) mapping->image.contains_code = 1;
  713. if (load_clr_header( &clr, clr_va, clr_size, unix_fd, sec, nt.FileHeader.NumberOfSections ) &&
  714. (clr.Flags & COMIMAGE_FLAGS_ILONLY))
  715. {
  716. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusILOnly;
  717. if (nt.opt.hdr32.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC)
  718. {
  719. if (!(clr.Flags & COMIMAGE_FLAGS_32BITREQUIRED))
  720. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusNativeReady;
  721. if (clr.Flags & COMIMAGE_FLAGS_32BITPREFERRED)
  722. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusPrefer32bit;
  723. }
  724. }
  725. if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections ))
  726. return STATUS_INVALID_FILE_FOR_SECTION;
  727. return STATUS_SUCCESS;
  728. }
  729. static struct ranges *create_ranges(void)
  730. {
  731. struct ranges *ranges = alloc_object( &ranges_ops );
  732. if (!ranges) return NULL;
  733. ranges->count = 0;
  734. ranges->max = 8;
  735. if (!(ranges->ranges = mem_alloc( ranges->max * sizeof(*ranges->ranges) )))
  736. {
  737. release_object( ranges );
  738. return NULL;
  739. }
  740. return ranges;
  741. }
  742. static unsigned int get_mapping_flags( obj_handle_t handle, unsigned int flags )
  743. {
  744. switch (flags & (SEC_IMAGE | SEC_RESERVE | SEC_COMMIT | SEC_FILE))
  745. {
  746. case SEC_IMAGE:
  747. if (flags & (SEC_WRITECOMBINE | SEC_LARGE_PAGES)) break;
  748. if (handle) return SEC_FILE | SEC_IMAGE;
  749. set_error( STATUS_INVALID_FILE_FOR_SECTION );
  750. return 0;
  751. case SEC_COMMIT:
  752. if (!handle) return flags;
  753. /* fall through */
  754. case SEC_RESERVE:
  755. if (flags & SEC_LARGE_PAGES) break;
  756. if (handle) return SEC_FILE | (flags & (SEC_NOCACHE | SEC_WRITECOMBINE));
  757. return flags;
  758. }
  759. set_error( STATUS_INVALID_PARAMETER );
  760. return 0;
  761. }
  762. static struct mapping *create_mapping( struct object *root, const struct unicode_str *name,
  763. unsigned int attr, mem_size_t size, unsigned int flags,
  764. obj_handle_t handle, unsigned int file_access,
  765. const struct security_descriptor *sd )
  766. {
  767. struct mapping *mapping;
  768. struct file *file;
  769. struct fd *fd;
  770. int unix_fd;
  771. struct stat st;
  772. if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
  773. if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd )))
  774. return NULL;
  775. if (get_error() == STATUS_OBJECT_NAME_EXISTS)
  776. return mapping; /* Nothing else to do */
  777. mapping->size = size;
  778. mapping->fd = NULL;
  779. mapping->shared = NULL;
  780. mapping->committed = NULL;
  781. if (!(mapping->flags = get_mapping_flags( handle, flags ))) goto error;
  782. if (handle)
  783. {
  784. const unsigned int sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
  785. unsigned int mapping_access = FILE_MAPPING_ACCESS;
  786. if (!(file = get_file_obj( current->process, handle, file_access ))) goto error;
  787. fd = get_obj_fd( (struct object *)file );
  788. /* file sharing rules for mappings are different so we use magic the access rights */
  789. if (flags & SEC_IMAGE) mapping_access |= FILE_MAPPING_IMAGE;
  790. else if (file_access & FILE_WRITE_DATA) mapping_access |= FILE_MAPPING_WRITE;
  791. if (!(mapping->fd = get_fd_object_for_mapping( fd, mapping_access, sharing )))
  792. {
  793. mapping->fd = dup_fd_object( fd, mapping_access, sharing, FILE_SYNCHRONOUS_IO_NONALERT );
  794. if (mapping->fd) set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
  795. }
  796. release_object( file );
  797. release_object( fd );
  798. if (!mapping->fd) goto error;
  799. if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
  800. if (fstat( unix_fd, &st ) == -1)
  801. {
  802. file_set_error();
  803. goto error;
  804. }
  805. if (flags & SEC_IMAGE)
  806. {
  807. unsigned int err = get_image_params( mapping, st.st_size, unix_fd );
  808. if (!err) return mapping;
  809. set_error( err );
  810. goto error;
  811. }
  812. if (!mapping->size)
  813. {
  814. if (!(mapping->size = st.st_size))
  815. {
  816. set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
  817. goto error;
  818. }
  819. }
  820. else if (st.st_size < mapping->size)
  821. {
  822. if (!(file_access & FILE_WRITE_DATA))
  823. {
  824. set_error( STATUS_SECTION_TOO_BIG );
  825. goto error;
  826. }
  827. if (!grow_file( unix_fd, mapping->size )) goto error;
  828. }
  829. }
  830. else /* Anonymous mapping (no associated file) */
  831. {
  832. if (!mapping->size)
  833. {
  834. set_error( STATUS_INVALID_PARAMETER );
  835. goto error;
  836. }
  837. if ((flags & SEC_RESERVE) && !(mapping->committed = create_ranges())) goto error;
  838. mapping->size = (mapping->size + page_mask) & ~((mem_size_t)page_mask);
  839. if ((unix_fd = create_temp_file( mapping->size )) == -1) goto error;
  840. if (!(mapping->fd = create_anonymous_fd( &mapping_fd_ops, unix_fd, &mapping->obj,
  841. FILE_SYNCHRONOUS_IO_NONALERT ))) goto error;
  842. allow_fd_caching( mapping->fd );
  843. }
  844. return mapping;
  845. error:
  846. release_object( mapping );
  847. return NULL;
  848. }
  849. /* create a read-only file mapping for the specified fd */
  850. struct mapping *create_fd_mapping( struct object *root, const struct unicode_str *name,
  851. struct fd *fd, unsigned int attr, const struct security_descriptor *sd )
  852. {
  853. struct mapping *mapping;
  854. int unix_fd;
  855. struct stat st;
  856. if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd ))) return NULL;
  857. if (get_error() == STATUS_OBJECT_NAME_EXISTS) return mapping; /* Nothing else to do */
  858. mapping->shared = NULL;
  859. mapping->committed = NULL;
  860. mapping->flags = SEC_FILE;
  861. mapping->fd = (struct fd *)grab_object( fd );
  862. set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
  863. if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
  864. if (fstat( unix_fd, &st ) == -1)
  865. {
  866. file_set_error();
  867. goto error;
  868. }
  869. if (!(mapping->size = st.st_size))
  870. {
  871. set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
  872. goto error;
  873. }
  874. return mapping;
  875. error:
  876. release_object( mapping );
  877. return NULL;
  878. }
  879. static struct mapping *get_mapping_obj( struct process *process, obj_handle_t handle, unsigned int access )
  880. {
  881. return (struct mapping *)get_handle_obj( process, handle, access, &mapping_ops );
  882. }
  883. /* open a new file for the file descriptor backing the view */
  884. struct file *get_view_file( const struct memory_view *view, unsigned int access, unsigned int sharing )
  885. {
  886. if (!view->fd) return NULL;
  887. return create_file_for_fd_obj( view->fd, access, sharing );
  888. }
  889. /* get the image info for a SEC_IMAGE mapped view */
  890. const pe_image_info_t *get_view_image_info( const struct memory_view *view, client_ptr_t *base )
  891. {
  892. if (!(view->flags & SEC_IMAGE)) return NULL;
  893. *base = view->base;
  894. return &view->image;
  895. }
  896. /* get the file name for a mapped view */
  897. int get_view_nt_name( const struct memory_view *view, struct unicode_str *name )
  898. {
  899. if (view->namelen) /* .so builtin */
  900. {
  901. name->str = view->name;
  902. name->len = view->namelen;
  903. return 1;
  904. }
  905. if (!view->fd) return 0;
  906. get_nt_name( view->fd, name );
  907. return 1;
  908. }
  909. /* generate all startup events of a given process */
  910. void generate_startup_debug_events( struct process *process )
  911. {
  912. struct memory_view *view;
  913. struct list *ptr = list_head( &process->views );
  914. struct thread *thread, *first_thread = get_process_first_thread( process );
  915. if (!ptr) return;
  916. view = LIST_ENTRY( ptr, struct memory_view, entry );
  917. generate_debug_event( first_thread, DbgCreateProcessStateChange, view );
  918. /* generate ntdll.dll load event */
  919. while (ptr && (ptr = list_next( &process->views, ptr )))
  920. {
  921. view = LIST_ENTRY( ptr, struct memory_view, entry );
  922. if (generate_dll_event( first_thread, DbgLoadDllStateChange, view )) break;
  923. }
  924. /* generate creation events */
  925. LIST_FOR_EACH_ENTRY( thread, &process->thread_list, struct thread, proc_entry )
  926. {
  927. if (thread != first_thread)
  928. generate_debug_event( thread, DbgCreateThreadStateChange, NULL );
  929. }
  930. /* generate dll events (in loading order) */
  931. while (ptr && (ptr = list_next( &process->views, ptr )))
  932. {
  933. view = LIST_ENTRY( ptr, struct memory_view, entry );
  934. generate_dll_event( first_thread, DbgLoadDllStateChange, view );
  935. }
  936. }
  937. static void mapping_dump( struct object *obj, int verbose )
  938. {
  939. struct mapping *mapping = (struct mapping *)obj;
  940. assert( obj->ops == &mapping_ops );
  941. fprintf( stderr, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
  942. (unsigned int)(mapping->size >> 32), (unsigned int)mapping->size,
  943. mapping->flags, mapping->fd, mapping->shared );
  944. }
  945. static struct fd *mapping_get_fd( struct object *obj )
  946. {
  947. struct mapping *mapping = (struct mapping *)obj;
  948. return (struct fd *)grab_object( mapping->fd );
  949. }
  950. static void mapping_destroy( struct object *obj )
  951. {
  952. struct mapping *mapping = (struct mapping *)obj;
  953. assert( obj->ops == &mapping_ops );
  954. if (mapping->fd) release_object( mapping->fd );
  955. if (mapping->committed) release_object( mapping->committed );
  956. if (mapping->shared) release_object( mapping->shared );
  957. }
  958. static enum server_fd_type mapping_get_fd_type( struct fd *fd )
  959. {
  960. return FD_TYPE_FILE;
  961. }
  962. int get_page_size(void)
  963. {
  964. if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
  965. return page_mask + 1;
  966. }
  967. struct object *create_user_data_mapping( struct object *root, const struct unicode_str *name,
  968. unsigned int attr, const struct security_descriptor *sd )
  969. {
  970. void *ptr;
  971. struct mapping *mapping;
  972. if (!(mapping = create_mapping( root, name, attr, sizeof(KSHARED_USER_DATA),
  973. SEC_COMMIT, 0, FILE_READ_DATA | FILE_WRITE_DATA, sd ))) return NULL;
  974. ptr = mmap( NULL, mapping->size, PROT_WRITE, MAP_SHARED, get_unix_fd( mapping->fd ), 0 );
  975. if (ptr != MAP_FAILED)
  976. {
  977. user_shared_data = ptr;
  978. user_shared_data->SystemCall = 1;
  979. }
  980. return &mapping->obj;
  981. }
  982. /* create a file mapping */
  983. DECL_HANDLER(create_mapping)
  984. {
  985. struct object *root;
  986. struct mapping *mapping;
  987. struct unicode_str name;
  988. const struct security_descriptor *sd;
  989. const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root );
  990. if (!objattr) return;
  991. if ((mapping = create_mapping( root, &name, objattr->attributes, req->size, req->flags,
  992. req->file_handle, req->file_access, sd )))
  993. {
  994. if (get_error() == STATUS_OBJECT_NAME_EXISTS)
  995. reply->handle = alloc_handle( current->process, &mapping->obj, req->access, objattr->attributes );
  996. else
  997. reply->handle = alloc_handle_no_access_check( current->process, &mapping->obj,
  998. req->access, objattr->attributes );
  999. release_object( mapping );
  1000. }
  1001. if (root) release_object( root );
  1002. }
  1003. /* open a handle to a mapping */
  1004. DECL_HANDLER(open_mapping)
  1005. {
  1006. struct unicode_str name = get_req_unicode_str();
  1007. reply->handle = open_object( current->process, req->rootdir, req->access,
  1008. &mapping_ops, &name, req->attributes );
  1009. }
  1010. /* get a mapping information */
  1011. DECL_HANDLER(get_mapping_info)
  1012. {
  1013. struct mapping *mapping;
  1014. if (!(mapping = get_mapping_obj( current->process, req->handle, req->access ))) return;
  1015. reply->size = mapping->size;
  1016. reply->flags = mapping->flags;
  1017. if (mapping->flags & SEC_IMAGE)
  1018. {
  1019. struct unicode_str name = { NULL, 0 };
  1020. data_size_t size;
  1021. void *data;
  1022. if (mapping->fd) get_nt_name( mapping->fd, &name );
  1023. size = min( sizeof(pe_image_info_t) + name.len, get_reply_max_size() );
  1024. if ((data = set_reply_data_size( size )))
  1025. {
  1026. memcpy( data, &mapping->image, min( sizeof(pe_image_info_t), size ));
  1027. if (size > sizeof(pe_image_info_t))
  1028. memcpy( (pe_image_info_t *)data + 1, name.str, size - sizeof(pe_image_info_t) );
  1029. }
  1030. reply->total = sizeof(pe_image_info_t) + name.len;
  1031. }
  1032. if (!(req->access & (SECTION_MAP_READ | SECTION_MAP_WRITE))) /* query only */
  1033. {
  1034. release_object( mapping );
  1035. return;
  1036. }
  1037. if (mapping->shared)
  1038. reply->shared_file = alloc_handle( current->process, mapping->shared->file,
  1039. GENERIC_READ|GENERIC_WRITE, 0 );
  1040. release_object( mapping );
  1041. }
  1042. /* add a memory view in the current process */
  1043. DECL_HANDLER(map_view)
  1044. {
  1045. struct mapping *mapping = NULL;
  1046. struct memory_view *view;
  1047. data_size_t namelen = 0;
  1048. if (!req->size || (req->base & page_mask) || req->base + req->size < req->base) /* overflow */
  1049. {
  1050. set_error( STATUS_INVALID_PARAMETER );
  1051. return;
  1052. }
  1053. /* make sure we don't already have an overlapping view */
  1054. LIST_FOR_EACH_ENTRY( view, &current->process->views, struct memory_view, entry )
  1055. {
  1056. if (view->base + view->size <= req->base) continue;
  1057. if (view->base >= req->base + req->size) continue;
  1058. set_error( STATUS_INVALID_PARAMETER );
  1059. return;
  1060. }
  1061. if (!req->mapping) /* image mapping for a .so dll */
  1062. {
  1063. if (get_req_data_size() > sizeof(view->image)) namelen = get_req_data_size() - sizeof(view->image);
  1064. if (!(view = mem_alloc( offsetof( struct memory_view, name[namelen] )))) return;
  1065. memset( view, 0, sizeof(*view) );
  1066. view->base = req->base;
  1067. view->size = req->size;
  1068. view->start = req->start;
  1069. view->flags = SEC_IMAGE;
  1070. view->namelen = namelen;
  1071. memcpy( &view->image, get_req_data(), min( sizeof(view->image), get_req_data_size() ));
  1072. memcpy( view->name, (pe_image_info_t *)get_req_data() + 1, namelen );
  1073. add_process_view( current, view );
  1074. return;
  1075. }
  1076. if (!(mapping = get_mapping_obj( current->process, req->mapping, req->access ))) return;
  1077. if (mapping->flags & SEC_IMAGE)
  1078. {
  1079. if (req->start || req->size > mapping->image.map_size)
  1080. {
  1081. set_error( STATUS_INVALID_PARAMETER );
  1082. goto done;
  1083. }
  1084. }
  1085. else if (req->start >= mapping->size ||
  1086. req->start + req->size < req->start ||
  1087. req->start + req->size > ((mapping->size + page_mask) & ~(mem_size_t)page_mask))
  1088. {
  1089. set_error( STATUS_INVALID_PARAMETER );
  1090. goto done;
  1091. }
  1092. if ((view = mem_alloc( offsetof( struct memory_view, name[namelen] ))))
  1093. {
  1094. view->base = req->base;
  1095. view->size = req->size;
  1096. view->start = req->start;
  1097. view->flags = mapping->flags;
  1098. view->namelen = namelen;
  1099. view->fd = !is_fd_removable( mapping->fd ) ? (struct fd *)grab_object( mapping->fd ) : NULL;
  1100. view->committed = mapping->committed ? (struct ranges *)grab_object( mapping->committed ) : NULL;
  1101. view->shared = mapping->shared ? (struct shared_map *)grab_object( mapping->shared ) : NULL;
  1102. if (view->flags & SEC_IMAGE) view->image = mapping->image;
  1103. add_process_view( current, view );
  1104. if (view->flags & SEC_IMAGE && view->base != mapping->image.base)
  1105. set_error( STATUS_IMAGE_NOT_AT_BASE );
  1106. }
  1107. done:
  1108. release_object( mapping );
  1109. }
  1110. /* unmap a memory view from the current process */
  1111. DECL_HANDLER(unmap_view)
  1112. {
  1113. struct memory_view *view = find_mapped_view( current->process, req->base );
  1114. if (!view) return;
  1115. generate_dll_event( current, DbgUnloadDllStateChange, view );
  1116. free_memory_view( view );
  1117. }
  1118. /* get a range of committed pages in a file mapping */
  1119. DECL_HANDLER(get_mapping_committed_range)
  1120. {
  1121. struct memory_view *view = find_mapped_view( current->process, req->base );
  1122. if (view) reply->committed = find_committed_range( view, req->offset, &reply->size );
  1123. }
  1124. /* add a range to the committed pages in a file mapping */
  1125. DECL_HANDLER(add_mapping_committed_range)
  1126. {
  1127. struct memory_view *view = find_mapped_view( current->process, req->base );
  1128. if (view) add_committed_range( view, req->offset, req->offset + req->size );
  1129. }
  1130. /* check if two memory maps are for the same file */
  1131. DECL_HANDLER(is_same_mapping)
  1132. {
  1133. struct memory_view *view1 = find_mapped_view( current->process, req->base1 );
  1134. struct memory_view *view2 = find_mapped_view( current->process, req->base2 );
  1135. if (!view1 || !view2) return;
  1136. if (!view1->fd || !view2->fd || !(view1->flags & SEC_IMAGE) || !is_same_file_fd( view1->fd, view2->fd ))
  1137. set_error( STATUS_NOT_SAME_DEVICE );
  1138. }
  1139. /* get the filename of a mapping */
  1140. DECL_HANDLER(get_mapping_filename)
  1141. {
  1142. struct process *process;
  1143. struct memory_view *view;
  1144. struct unicode_str name;
  1145. if (!(process = get_process_from_handle( req->process, PROCESS_QUERY_INFORMATION ))) return;
  1146. if ((view = find_mapped_addr( process, req->addr )) && get_view_nt_name( view, &name ))
  1147. {
  1148. reply->len = name.len;
  1149. if (name.len > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
  1150. else if (!name.len) set_error( STATUS_FILE_INVALID );
  1151. else set_reply_data( name.str, name.len );
  1152. }
  1153. else set_error( STATUS_INVALID_ADDRESS );
  1154. release_object( process );
  1155. }