ump_kernel_api.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
  3. *
  4. * This program is free software and is provided to you under the terms of the GNU General Public License version 2
  5. * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
  6. *
  7. * A copy of the licence is included with the program, and can also be obtained from Free Software
  8. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  9. */
  10. #include "mali_osk.h"
  11. #include "mali_osk_list.h"
  12. #include "ump_osk.h"
  13. #include "ump_uk_types.h"
  14. #include "ump_kernel_interface.h"
  15. #include "ump_kernel_common.h"
  16. /* ---------------- UMP kernel space API functions follows ---------------- */
  17. UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
  18. {
  19. ump_dd_mem * mem = (ump_dd_mem *)memh;
  20. DEBUG_ASSERT_POINTER(mem);
  21. DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
  22. return mem->secure_id;
  23. }
  24. UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
  25. {
  26. ump_dd_mem * mem;
  27. _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  28. DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
  29. if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
  30. {
  31. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  32. DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
  33. return UMP_DD_HANDLE_INVALID;
  34. }
  35. ump_dd_reference_add(mem);
  36. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  37. return (ump_dd_handle)mem;
  38. }
  39. UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
  40. {
  41. ump_dd_mem * mem = (ump_dd_mem*) memh;
  42. DEBUG_ASSERT_POINTER(mem);
  43. return mem->nr_blocks;
  44. }
  45. UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
  46. {
  47. ump_dd_mem * mem = (ump_dd_mem *)memh;
  48. DEBUG_ASSERT_POINTER(mem);
  49. if (blocks == NULL)
  50. {
  51. DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
  52. return UMP_DD_INVALID;
  53. }
  54. if (mem->nr_blocks != num_blocks)
  55. {
  56. DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
  57. return UMP_DD_INVALID;
  58. }
  59. DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
  60. _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
  61. return UMP_DD_SUCCESS;
  62. }
  63. UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
  64. {
  65. ump_dd_mem * mem = (ump_dd_mem *)memh;
  66. DEBUG_ASSERT_POINTER(mem);
  67. if (block == NULL)
  68. {
  69. DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
  70. return UMP_DD_INVALID;
  71. }
  72. if (index >= mem->nr_blocks)
  73. {
  74. DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
  75. return UMP_DD_INVALID;
  76. }
  77. DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
  78. *block = mem->block_array[index];
  79. return UMP_DD_SUCCESS;
  80. }
  81. UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
  82. {
  83. ump_dd_mem * mem = (ump_dd_mem*)memh;
  84. DEBUG_ASSERT_POINTER(mem);
  85. DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
  86. return mem->size_bytes;
  87. }
  88. UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
  89. {
  90. ump_dd_mem * mem = (ump_dd_mem*)memh;
  91. int new_ref;
  92. DEBUG_ASSERT_POINTER(mem);
  93. new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
  94. DBG_MSG(5, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
  95. }
  96. UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
  97. {
  98. int new_ref;
  99. ump_dd_mem * mem = (ump_dd_mem*)memh;
  100. DEBUG_ASSERT_POINTER(mem);
  101. /* We must hold this mutex while doing the atomic_dec_and_read, to protect
  102. that elements in the ump_descriptor_mapping table is always valid. If they
  103. are not, userspace may accidently map in this secure_ids right before its freed
  104. giving a mapped backdoor into unallocated memory.*/
  105. _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  106. new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
  107. DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
  108. if (0 == new_ref)
  109. {
  110. DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
  111. ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
  112. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  113. mem->release_func(mem->ctx, mem);
  114. _mali_osk_free(mem);
  115. }
  116. else
  117. {
  118. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  119. }
  120. }
  121. /* --------------- Handling of user space requests follows --------------- */
  122. _mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
  123. {
  124. ump_session_data * session_data;
  125. DEBUG_ASSERT_POINTER( args );
  126. DEBUG_ASSERT_POINTER( args->ctx );
  127. session_data = (ump_session_data *)args->ctx;
  128. /* check compatability */
  129. if (args->version == UMP_IOCTL_API_VERSION)
  130. {
  131. DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
  132. args->compatible = 1;
  133. session_data->api_version = args->version;
  134. }
  135. else if (args->version == MAKE_VERSION_ID(1))
  136. {
  137. DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
  138. args->compatible = 1;
  139. session_data->api_version = args->version;
  140. }
  141. else
  142. {
  143. DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
  144. args->compatible = 0;
  145. args->version = UMP_IOCTL_API_VERSION; /* report our version */
  146. }
  147. return _MALI_OSK_ERR_OK;
  148. }
  149. _mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
  150. {
  151. ump_session_memory_list_element * session_memory_element;
  152. ump_session_memory_list_element * tmp;
  153. ump_session_data * session_data;
  154. _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
  155. int secure_id;
  156. DEBUG_ASSERT_POINTER( release_info );
  157. DEBUG_ASSERT_POINTER( release_info->ctx );
  158. /* Retreive the session data */
  159. session_data = (ump_session_data*)release_info->ctx;
  160. /* If there are many items in the memory session list we
  161. * could be de-referencing this pointer a lot so keep a local copy
  162. */
  163. secure_id = release_info->secure_id;
  164. DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
  165. /* Iterate through the memory list looking for the requested secure ID */
  166. _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
  167. _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
  168. {
  169. if ( session_memory_element->mem->secure_id == secure_id)
  170. {
  171. ump_dd_mem *release_mem;
  172. release_mem = session_memory_element->mem;
  173. _mali_osk_list_del(&session_memory_element->list);
  174. ump_dd_reference_release(release_mem);
  175. _mali_osk_free(session_memory_element);
  176. ret = _MALI_OSK_ERR_OK;
  177. break;
  178. }
  179. }
  180. _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
  181. DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
  182. DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
  183. return ret;
  184. }
  185. _mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
  186. {
  187. ump_dd_mem * mem;
  188. _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
  189. DEBUG_ASSERT_POINTER( user_interaction );
  190. /* We lock the mappings so things don't get removed while we are looking for the memory */
  191. _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  192. if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
  193. {
  194. user_interaction->size = mem->size_bytes;
  195. DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
  196. ret = _MALI_OSK_ERR_OK;
  197. }
  198. else
  199. {
  200. user_interaction->size = 0;
  201. DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
  202. }
  203. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  204. return ret;
  205. }
  206. void _ump_ukk_msync( _ump_uk_msync_s *args )
  207. {
  208. ump_dd_mem * mem = NULL;
  209. void *virtual = NULL;
  210. u32 size = 0;
  211. u32 offset = 0;
  212. _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  213. ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
  214. if (NULL == mem)
  215. {
  216. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  217. DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
  218. return;
  219. }
  220. /* Ensure the memory doesn't dissapear when we are flushing it. */
  221. ump_dd_reference_add(mem);
  222. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  223. /* Returns the cache settings back to Userspace */
  224. args->is_cached=mem->is_cached;
  225. /* If this flag is the only one set, we should not do the actual flush, only the readout */
  226. if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
  227. {
  228. DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
  229. goto msync_release_and_return;
  230. }
  231. /* Nothing to do if the memory is not caches */
  232. if ( 0==mem->is_cached )
  233. {
  234. DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
  235. goto msync_release_and_return;
  236. }
  237. DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync Flush OP: %d Address: 0x%08x Mapping: 0x%08x\n",
  238. (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
  239. if ( args->address )
  240. {
  241. virtual = (void *)((u32)args->address);
  242. offset = (u32)((args->address) - (args->mapping));
  243. } else {
  244. /* Flush entire mapping when no address is specified. */
  245. virtual = args->mapping;
  246. }
  247. if ( args->size )
  248. {
  249. size = args->size;
  250. } else {
  251. /* Flush entire mapping when no size is specified. */
  252. size = mem->size_bytes - offset;
  253. }
  254. if ( (offset + size) > mem->size_bytes )
  255. {
  256. DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
  257. goto msync_release_and_return;
  258. }
  259. /* The actual cache flush - Implemented for each OS*/
  260. _ump_osk_msync( mem, virtual, offset, size, args->op, NULL);
  261. msync_release_and_return:
  262. ump_dd_reference_release(mem);
  263. return;
  264. }
  265. void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args)
  266. {
  267. ump_session_data * session_data;
  268. ump_uk_cache_op_control op;
  269. DEBUG_ASSERT_POINTER( args );
  270. DEBUG_ASSERT_POINTER( args->ctx );
  271. op = args->op;
  272. session_data = (ump_session_data *)args->ctx;
  273. _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
  274. if ( op== _UMP_UK_CACHE_OP_START )
  275. {
  276. session_data->cache_operations_ongoing++;
  277. DBG_MSG(4, ("Cache ops start\n" ));
  278. if ( session_data->cache_operations_ongoing != 1 )
  279. {
  280. DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing) );
  281. }
  282. }
  283. else if ( op== _UMP_UK_CACHE_OP_FINISH )
  284. {
  285. DBG_MSG(4, ("Cache ops finish\n"));
  286. session_data->cache_operations_ongoing--;
  287. #if 0
  288. if ( session_data->has_pending_level1_cache_flush)
  289. {
  290. /* This function will set has_pending_level1_cache_flush=0 */
  291. _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
  292. }
  293. #endif
  294. /* to be on the safe side: always flush l1 cache when cache operations are done */
  295. _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
  296. DBG_MSG(4, ("Cache ops finish end\n" ));
  297. }
  298. else
  299. {
  300. DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
  301. }
  302. _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
  303. }
  304. void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args )
  305. {
  306. ump_dd_mem * mem = NULL;
  307. ump_uk_user old_user;
  308. ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
  309. ump_session_data *session_data;
  310. DEBUG_ASSERT_POINTER( args );
  311. DEBUG_ASSERT_POINTER( args->ctx );
  312. session_data = (ump_session_data *)args->ctx;
  313. _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  314. ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
  315. if (NULL == mem)
  316. {
  317. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  318. DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n", (ump_secure_id)args->secure_id));
  319. return;
  320. }
  321. old_user = mem->hw_device;
  322. mem->hw_device = args->new_user;
  323. DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n", (ump_secure_id)args->secure_id, args->new_user?"MALI":"CPU",old_user?"MALI":"CPU"));
  324. if ( ! mem->is_cached )
  325. {
  326. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  327. DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
  328. return;
  329. }
  330. if ( old_user == args->new_user)
  331. {
  332. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  333. DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
  334. return;
  335. }
  336. if (
  337. /* Previous AND new is both different from CPU */
  338. (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU )
  339. )
  340. {
  341. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  342. DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
  343. return;
  344. }
  345. if ( (old_user != _UMP_UK_USED_BY_CPU ) && (args->new_user==_UMP_UK_USED_BY_CPU) )
  346. {
  347. cache_op =_UMP_UK_MSYNC_INVALIDATE;
  348. DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
  349. #ifdef UMP_SKIP_INVALIDATION
  350. #error
  351. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  352. DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
  353. return;
  354. #endif
  355. }
  356. /* Ensure the memory doesn't dissapear when we are flushing it. */
  357. ump_dd_reference_add(mem);
  358. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  359. /* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
  360. _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
  361. /* Actual cache flush */
  362. _ump_osk_msync( mem, NULL, 0, mem->size_bytes, cache_op, session_data);
  363. _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
  364. ump_dd_reference_release(mem);
  365. DBG_MSG(4, ("UMP[%02u] Switch usage Finish\n", (ump_secure_id)args->secure_id));
  366. return;
  367. }
  368. void _ump_ukk_lock(_ump_uk_lock_s *args )
  369. {
  370. ump_dd_mem * mem = NULL;
  371. _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  372. ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
  373. if (NULL == mem)
  374. {
  375. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  376. DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n", (ump_secure_id)args->secure_id));
  377. return;
  378. }
  379. ump_dd_reference_add(mem);
  380. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  381. DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage ));
  382. mem->lock_usage = (ump_lock_usage) args->lock_usage;
  383. /** TODO: TAKE LOCK HERE */
  384. ump_dd_reference_release(mem);
  385. }
  386. void _ump_ukk_unlock(_ump_uk_unlock_s *args )
  387. {
  388. ump_dd_mem * mem = NULL;
  389. _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  390. ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
  391. if (NULL == mem)
  392. {
  393. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  394. DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n", (ump_secure_id)args->secure_id));
  395. return;
  396. }
  397. ump_dd_reference_add(mem);
  398. _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
  399. DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n", (u32)args->secure_id, (u32) mem->lock_usage ));
  400. mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
  401. /** TODO: RELEASE LOCK HERE */
  402. ump_dd_reference_release(mem);
  403. }