ump_osk_low_level_mem.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
  3. *
  4. * This program is free software and is provided to you under the terms of the GNU General Public License version 2
  5. * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
  6. *
  7. * A copy of the licence is included with the program, and can also be obtained from Free Software
  8. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  9. */
  10. /**
  11. * @file ump_osk_memory.c
  12. * Implementation of the OS abstraction layer for the kernel device driver
  13. */
  14. /* needed to detect kernel version specific code */
  15. #include <linux/version.h>
  16. #include "ump_osk.h"
  17. #include "ump_uk_types.h"
  18. #include "ump_ukk.h"
  19. #include "ump_kernel_common.h"
  20. #include <linux/module.h> /* kernel module definitions */
  21. #include <linux/kernel.h>
  22. #include <linux/mm.h>
  23. #include <linux/slab.h>
  24. #include <asm/memory.h>
  25. #include <asm/uaccess.h> /* to verify pointers from user space */
  26. #include <asm/cacheflush.h>
  27. #include <linux/dma-mapping.h>
  28. typedef struct ump_vma_usage_tracker
  29. {
  30. atomic_t references;
  31. ump_memory_allocation *descriptor;
  32. } ump_vma_usage_tracker;
  33. static void ump_vma_open(struct vm_area_struct * vma);
  34. static void ump_vma_close(struct vm_area_struct * vma);
  35. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
  36. static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
  37. #else
  38. static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
  39. #endif
  40. static struct vm_operations_struct ump_vm_ops =
  41. {
  42. .open = ump_vma_open,
  43. .close = ump_vma_close,
  44. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
  45. .fault = ump_cpu_page_fault_handler
  46. #else
  47. .nopfn = ump_cpu_page_fault_handler
  48. #endif
  49. };
  50. /*
  51. * Page fault for VMA region
  52. * This should never happen since we always map in the entire virtual memory range.
  53. */
  54. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
  55. static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
  56. #else
  57. static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
  58. #endif
  59. {
  60. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
  61. void __user * address;
  62. address = vmf->virtual_address;
  63. #endif
  64. MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
  65. MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
  66. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
  67. return VM_FAULT_SIGBUS;
  68. #else
  69. return NOPFN_SIGBUS;
  70. #endif
  71. }
  72. static void ump_vma_open(struct vm_area_struct * vma)
  73. {
  74. ump_vma_usage_tracker * vma_usage_tracker;
  75. int new_val;
  76. vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
  77. BUG_ON(NULL == vma_usage_tracker);
  78. new_val = atomic_inc_return(&vma_usage_tracker->references);
  79. DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
  80. }
  81. static void ump_vma_close(struct vm_area_struct * vma)
  82. {
  83. ump_vma_usage_tracker * vma_usage_tracker;
  84. _ump_uk_unmap_mem_s args;
  85. int new_val;
  86. vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
  87. BUG_ON(NULL == vma_usage_tracker);
  88. new_val = atomic_dec_return(&vma_usage_tracker->references);
  89. DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
  90. if (0 == new_val)
  91. {
  92. ump_memory_allocation * descriptor;
  93. descriptor = vma_usage_tracker->descriptor;
  94. args.ctx = descriptor->ump_session;
  95. args.cookie = descriptor->cookie;
  96. args.mapping = descriptor->mapping;
  97. args.size = descriptor->size;
  98. args._ukk_private = NULL; /** @note unused */
  99. DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
  100. _ump_ukk_unmap_mem( & args );
  101. /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
  102. }
  103. }
  104. _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
  105. {
  106. ump_vma_usage_tracker * vma_usage_tracker;
  107. struct vm_area_struct *vma;
  108. if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
  109. vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
  110. if (NULL == vma_usage_tracker)
  111. {
  112. DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
  113. return -_MALI_OSK_ERR_FAULT;
  114. }
  115. vma = (struct vm_area_struct*)descriptor->process_mapping_info;
  116. if (NULL == vma )
  117. {
  118. kfree(vma_usage_tracker);
  119. return _MALI_OSK_ERR_FAULT;
  120. }
  121. vma->vm_private_data = vma_usage_tracker;
  122. vma->vm_flags |= VM_IO;
  123. vma->vm_flags |= VM_RESERVED;
  124. if (0==descriptor->is_cached)
  125. {
  126. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  127. }
  128. DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
  129. /* Setup the functions which handle further VMA handling */
  130. vma->vm_ops = &ump_vm_ops;
  131. /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
  132. descriptor->mapping = (void __user*)vma->vm_start;
  133. atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
  134. vma_usage_tracker->descriptor = descriptor;
  135. return _MALI_OSK_ERR_OK;
  136. }
  137. void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
  138. {
  139. struct vm_area_struct* vma;
  140. ump_vma_usage_tracker * vma_usage_tracker;
  141. if (NULL == descriptor) return;
  142. /* Linux does the right thing as part of munmap to remove the mapping
  143. * All that remains is that we remove the vma_usage_tracker setup in init() */
  144. vma = (struct vm_area_struct*)descriptor->process_mapping_info;
  145. vma_usage_tracker = vma->vm_private_data;
  146. /* We only get called if mem_mapregion_init succeeded */
  147. kfree(vma_usage_tracker);
  148. return;
  149. }
  150. _mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
  151. {
  152. struct vm_area_struct *vma;
  153. _mali_osk_errcode_t retval;
  154. if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
  155. vma = (struct vm_area_struct*)descriptor->process_mapping_info;
  156. if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
  157. retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
  158. DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
  159. ump_dd_secure_id_get(descriptor->handle),
  160. (unsigned long)vma,
  161. (unsigned long)(vma->vm_start + offset),
  162. (unsigned long)*phys_addr,
  163. size,
  164. (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
  165. return retval;
  166. }
  167. static void level1_cache_flush_all(void)
  168. {
  169. DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
  170. __cpuc_flush_kern_all();
  171. }
  172. void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
  173. {
  174. int i;
  175. const void *start_v, *end_v;
  176. /* Flush L1 using virtual address, the entire range in one go.
  177. * Only flush if user space process has a valid write mapping on given address. */
  178. if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) )
  179. {
  180. start_v = (void *)virt;
  181. end_v = (void *)(start_v + size - 1);
  182. /* There is no dmac_clean_range, so the L1 is always flushed,
  183. * also for UMP_MSYNC_CLEAN. */
  184. dmac_flush_range(start_v, end_v);
  185. DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. Cpu address: %x-%x\n", mem->secure_id, start_v,end_v));
  186. }
  187. else
  188. {
  189. if (session_data)
  190. {
  191. if (op == _UMP_UK_MSYNC_FLUSH_L1 )
  192. {
  193. DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
  194. session_data->has_pending_level1_cache_flush = 0;
  195. level1_cache_flush_all();
  196. return;
  197. }
  198. else
  199. {
  200. if (session_data->cache_operations_ongoing)
  201. {
  202. session_data->has_pending_level1_cache_flush++;
  203. DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
  204. }
  205. else
  206. {
  207. /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
  208. level1_cache_flush_all();
  209. }
  210. }
  211. }
  212. else
  213. {
  214. DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
  215. level1_cache_flush_all();
  216. }
  217. }
  218. if ( NULL == mem ) return;
  219. if ( mem->size_bytes==size)
  220. {
  221. DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
  222. }
  223. else
  224. {
  225. DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
  226. mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
  227. }
  228. /* Flush L2 using physical addresses, block for block. */
  229. for (i=0 ; i < mem->nr_blocks; i++)
  230. {
  231. u32 start_p, end_p;
  232. ump_dd_physical_block *block;
  233. block = &mem->block_array[i];
  234. if(offset >= block->size)
  235. {
  236. offset -= block->size;
  237. continue;
  238. }
  239. if(offset)
  240. {
  241. start_p = (u32)block->addr + offset;
  242. /* We'll zero the offset later, after using it to calculate end_p. */
  243. }
  244. else
  245. {
  246. start_p = (u32)block->addr;
  247. }
  248. if(size < block->size - offset)
  249. {
  250. end_p = start_p + size - 1;
  251. size = 0;
  252. }
  253. else
  254. {
  255. if(offset)
  256. {
  257. end_p = start_p + (block->size - offset - 1);
  258. size -= block->size - offset;
  259. offset = 0;
  260. }
  261. else
  262. {
  263. end_p = start_p + block->size - 1;
  264. size -= block->size;
  265. }
  266. }
  267. switch(op)
  268. {
  269. case _UMP_UK_MSYNC_CLEAN:
  270. outer_clean_range(start_p, end_p);
  271. break;
  272. case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
  273. outer_flush_range(start_p, end_p);
  274. break;
  275. case _UMP_UK_MSYNC_INVALIDATE:
  276. outer_inv_range(start_p, end_p);
  277. break;
  278. default:
  279. break;
  280. }
  281. if(0 == size)
  282. {
  283. /* Nothing left to flush. */
  284. break;
  285. }
  286. }
  287. return;
  288. }