ion_cp_common.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Copyright (C) 2011 Google, Inc
  3. * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/memory_alloc.h>
  15. #include <linux/slab.h>
  16. #include <linux/types.h>
  17. #include <mach/scm.h>
  18. #include <linux/highmem.h>
  19. #include "../ion_priv.h"
  20. #include "ion_cp_common.h"
  21. #define MEM_PROTECT_LOCK_ID 0x05
  22. #define MEM_PROTECT_LOCK_ID2 0x0A
  23. struct cp2_mem_chunks {
  24. unsigned int *chunk_list;
  25. unsigned int chunk_list_size;
  26. unsigned int chunk_size;
  27. } __attribute__ ((__packed__));
  28. struct cp2_lock2_req {
  29. struct cp2_mem_chunks chunks;
  30. unsigned int mem_usage;
  31. unsigned int lock;
  32. unsigned int flags;
  33. } __attribute__ ((__packed__));
  34. /* SCM related code for locking down memory for content protection */
  35. #define SCM_CP_LOCK_CMD_ID 0x1
  36. #define SCM_CP_PROTECT 0x1
  37. #define SCM_CP_UNPROTECT 0x0
  38. struct cp_lock_msg {
  39. unsigned int start;
  40. unsigned int end;
  41. unsigned int permission_type;
  42. unsigned char lock;
  43. } __attribute__ ((__packed__));
  44. static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
  45. unsigned int permission_type)
  46. {
  47. struct cp_lock_msg cmd;
  48. cmd.start = phy_base;
  49. cmd.end = phy_base + size;
  50. cmd.permission_type = permission_type;
  51. cmd.lock = SCM_CP_PROTECT;
  52. return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
  53. &cmd, sizeof(cmd), NULL, 0);
  54. }
  55. static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
  56. unsigned int permission_type)
  57. {
  58. struct cp_lock_msg cmd;
  59. cmd.start = phy_base;
  60. cmd.end = phy_base + size;
  61. cmd.permission_type = permission_type;
  62. cmd.lock = SCM_CP_UNPROTECT;
  63. return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
  64. &cmd, sizeof(cmd), NULL, 0);
  65. }
  66. #define V2_CHUNK_SIZE SZ_1M
  67. static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size,
  68. void *data, int lock)
  69. {
  70. enum cp_mem_usage usage = (enum cp_mem_usage) data;
  71. unsigned long *chunk_list;
  72. int nchunks;
  73. int ret;
  74. int i;
  75. int chunk_list_len;
  76. phys_addr_t chunk_list_phys;
  77. if (usage < 0 || usage >= MAX_USAGE)
  78. return -EINVAL;
  79. if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) {
  80. pr_err("%s: heap size is not aligned to %x\n",
  81. __func__, V2_CHUNK_SIZE);
  82. return -EINVAL;
  83. }
  84. nchunks = size / V2_CHUNK_SIZE;
  85. chunk_list_len = sizeof(unsigned long)*nchunks;
  86. chunk_list = kmalloc(chunk_list_len, GFP_KERNEL);
  87. if (!chunk_list)
  88. return -ENOMEM;
  89. chunk_list_phys = virt_to_phys(chunk_list);
  90. for (i = 0; i < nchunks; i++)
  91. chunk_list[i] = phy_base + i * V2_CHUNK_SIZE;
  92. /*
  93. * Flush the chunk list before sending the memory to the
  94. * secure environment to ensure the data is actually present
  95. * in RAM
  96. */
  97. dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
  98. outer_flush_range(chunk_list_phys,
  99. chunk_list_phys + chunk_list_len);
  100. ret = ion_cp_change_chunks_state(chunk_list_phys,
  101. nchunks, V2_CHUNK_SIZE, usage, lock);
  102. kfree(chunk_list);
  103. return ret;
  104. }
  105. int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
  106. unsigned int permission_type, int version,
  107. void *data)
  108. {
  109. switch (version) {
  110. case ION_CP_V1:
  111. return ion_cp_protect_mem_v1(phy_base, size, permission_type);
  112. case ION_CP_V2:
  113. return ion_cp_change_mem_v2(phy_base, size, data,
  114. SCM_CP_PROTECT);
  115. default:
  116. return -EINVAL;
  117. }
  118. }
  119. int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
  120. unsigned int permission_type, int version,
  121. void *data)
  122. {
  123. switch (version) {
  124. case ION_CP_V1:
  125. return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
  126. case ION_CP_V2:
  127. return ion_cp_change_mem_v2(phy_base, size, data,
  128. SCM_CP_UNPROTECT);
  129. default:
  130. return -EINVAL;
  131. }
  132. }
  133. int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks,
  134. unsigned int chunk_size,
  135. enum cp_mem_usage usage,
  136. int lock)
  137. {
  138. struct cp2_lock2_req request;
  139. u32 resp;
  140. request.mem_usage = usage;
  141. request.lock = lock;
  142. request.flags = 0;
  143. request.chunks.chunk_list = (unsigned int *)chunks;
  144. request.chunks.chunk_list_size = nchunks;
  145. request.chunks.chunk_size = chunk_size;
  146. kmap_flush_unused();
  147. kmap_atomic_flush_unused();
  148. return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
  149. &request, sizeof(request), &resp, sizeof(resp));
  150. }
  151. /* Must be protected by ion_cp_buffer lock */
  152. static int __ion_cp_protect_buffer(struct ion_buffer *buffer, int version,
  153. void *data, int flags)
  154. {
  155. struct ion_cp_buffer *buf = buffer->priv_virt;
  156. int ret_value = 0;
  157. if (atomic_inc_return(&buf->secure_cnt) == 1) {
  158. ret_value = ion_cp_protect_mem(buf->buffer,
  159. buffer->size, 0,
  160. version, data);
  161. if (ret_value) {
  162. pr_err("Failed to secure buffer %p, error %d\n",
  163. buffer, ret_value);
  164. atomic_dec(&buf->secure_cnt);
  165. } else {
  166. pr_debug("Protected buffer %p from %pa (size %x)\n",
  167. buffer, &buf->buffer,
  168. buffer->size);
  169. buf->want_delayed_unsecure |=
  170. flags & ION_UNSECURE_DELAYED ? 1 : 0;
  171. buf->data = data;
  172. buf->version = version;
  173. }
  174. }
  175. pr_debug("buffer %p protect count %d\n", buffer,
  176. atomic_read(&buf->secure_cnt));
  177. BUG_ON(atomic_read(&buf->secure_cnt) < 0);
  178. return ret_value;
  179. }
  180. /* Must be protected by ion_cp_buffer lock */
  181. static int __ion_cp_unprotect_buffer(struct ion_buffer *buffer, int version,
  182. void *data, int force_unsecure)
  183. {
  184. struct ion_cp_buffer *buf = buffer->priv_virt;
  185. int ret_value = 0;
  186. if (force_unsecure) {
  187. if (!buf->is_secure || atomic_read(&buf->secure_cnt) == 0)
  188. return 0;
  189. if (atomic_read(&buf->secure_cnt) != 1) {
  190. WARN(1, "Forcing unsecure of buffer with outstanding secure count %d!\n",
  191. atomic_read(&buf->secure_cnt));
  192. atomic_set(&buf->secure_cnt, 1);
  193. }
  194. }
  195. if (atomic_dec_and_test(&buf->secure_cnt)) {
  196. ret_value = ion_cp_unprotect_mem(
  197. buf->buffer, buffer->size,
  198. 0, version, data);
  199. if (ret_value) {
  200. pr_err("Failed to unsecure buffer %p, error %d\n",
  201. buffer, ret_value);
  202. /*
  203. * If the force unsecure is happening, the buffer
  204. * is being destroyed. We failed to unsecure the
  205. * buffer even though the memory is given back.
  206. * Just die now rather than discovering later what
  207. * happens when trying to use the secured memory as
  208. * unsecured...
  209. */
  210. BUG_ON(force_unsecure);
  211. /* Bump the count back up one to try again later */
  212. atomic_inc(&buf->secure_cnt);
  213. } else {
  214. buf->version = -1;
  215. buf->data = NULL;
  216. }
  217. }
  218. pr_debug("buffer %p unprotect count %d\n", buffer,
  219. atomic_read(&buf->secure_cnt));
  220. BUG_ON(atomic_read(&buf->secure_cnt) < 0);
  221. return ret_value;
  222. }
  223. int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
  224. int flags)
  225. {
  226. int ret_value;
  227. struct ion_cp_buffer *buf = buffer->priv_virt;
  228. mutex_lock(&buf->lock);
  229. if (!buf->is_secure) {
  230. pr_err("%s: buffer %p was not allocated as secure\n",
  231. __func__, buffer);
  232. ret_value = -EINVAL;
  233. goto out_unlock;
  234. }
  235. if (ION_IS_CACHED(buffer->flags)) {
  236. pr_err("%s: buffer %p was allocated as cached\n",
  237. __func__, buffer);
  238. ret_value = -EINVAL;
  239. goto out_unlock;
  240. }
  241. if (atomic_read(&buf->map_cnt)) {
  242. pr_err("%s: cannot secure buffer %p with outstanding mappings. Total count: %d",
  243. __func__, buffer, atomic_read(&buf->map_cnt));
  244. ret_value = -EINVAL;
  245. goto out_unlock;
  246. }
  247. if (atomic_read(&buf->secure_cnt) && !buf->ignore_check) {
  248. if (buf->version != version || buf->data != data) {
  249. pr_err("%s: Trying to re-secure buffer with different values",
  250. __func__);
  251. pr_err("Last secured version: %d Currrent %d\n",
  252. buf->version, version);
  253. pr_err("Last secured data: %p current %p\n",
  254. buf->data, data);
  255. ret_value = -EINVAL;
  256. goto out_unlock;
  257. }
  258. }
  259. ret_value = __ion_cp_protect_buffer(buffer, version, data, flags);
  260. out_unlock:
  261. mutex_unlock(&buf->lock);
  262. return ret_value;
  263. }
  264. int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure)
  265. {
  266. int ret_value = 0;
  267. struct ion_cp_buffer *buf = buffer->priv_virt;
  268. mutex_lock(&buf->lock);
  269. ret_value = __ion_cp_unprotect_buffer(buffer, buf->version, buf->data,
  270. force_unsecure);
  271. mutex_unlock(&buf->lock);
  272. return ret_value;
  273. }