uaccess_std.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. /*
  2. * arch/s390/lib/uaccess_std.c
  3. *
  4. * Standard user space access functions based on mvcp/mvcs and doing
  5. * interesting things in the secondary space mode.
  6. *
  7. * Copyright (C) IBM Corp. 2006
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  9. * Gerald Schaefer (gerald.schaefer@de.ibm.com)
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/mm.h>
  13. #include <linux/uaccess.h>
  14. #include <asm/futex.h>
  15. #include "uaccess.h"
  16. #ifndef __s390x__
  17. #define AHI "ahi"
  18. #define ALR "alr"
  19. #define CLR "clr"
  20. #define LHI "lhi"
  21. #define SLR "slr"
  22. #else
  23. #define AHI "aghi"
  24. #define ALR "algr"
  25. #define CLR "clgr"
  26. #define LHI "lghi"
  27. #define SLR "slgr"
  28. #endif
  29. size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
  30. {
  31. unsigned long tmp1, tmp2;
  32. tmp1 = -256UL;
  33. asm volatile(
  34. "0: mvcp 0(%0,%2),0(%1),%3\n"
  35. "10:jz 8f\n"
  36. "1:"ALR" %0,%3\n"
  37. " la %1,256(%1)\n"
  38. " la %2,256(%2)\n"
  39. "2: mvcp 0(%0,%2),0(%1),%3\n"
  40. "11:jnz 1b\n"
  41. " j 8f\n"
  42. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  43. " "LHI" %3,-4096\n"
  44. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  45. " "SLR" %4,%1\n"
  46. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  47. " jnh 5f\n"
  48. "4: mvcp 0(%4,%2),0(%1),%3\n"
  49. "12:"SLR" %0,%4\n"
  50. " "ALR" %2,%4\n"
  51. "5:"LHI" %4,-1\n"
  52. " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
  53. " bras %3,7f\n" /* memset loop */
  54. " xc 0(1,%2),0(%2)\n"
  55. "6: xc 0(256,%2),0(%2)\n"
  56. " la %2,256(%2)\n"
  57. "7:"AHI" %4,-256\n"
  58. " jnm 6b\n"
  59. " ex %4,0(%3)\n"
  60. " j 9f\n"
  61. "8:"SLR" %0,%0\n"
  62. "9: \n"
  63. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
  64. EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
  65. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  66. : : "cc", "memory");
  67. return size;
  68. }
  69. static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
  70. void *x)
  71. {
  72. if (size <= 1024)
  73. return copy_from_user_std(size, ptr, x);
  74. return copy_from_user_pt(size, ptr, x);
  75. }
  76. size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
  77. {
  78. unsigned long tmp1, tmp2;
  79. tmp1 = -256UL;
  80. asm volatile(
  81. "0: mvcs 0(%0,%1),0(%2),%3\n"
  82. "7: jz 5f\n"
  83. "1:"ALR" %0,%3\n"
  84. " la %1,256(%1)\n"
  85. " la %2,256(%2)\n"
  86. "2: mvcs 0(%0,%1),0(%2),%3\n"
  87. "8: jnz 1b\n"
  88. " j 5f\n"
  89. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  90. " "LHI" %3,-4096\n"
  91. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  92. " "SLR" %4,%1\n"
  93. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  94. " jnh 6f\n"
  95. "4: mvcs 0(%4,%1),0(%2),%3\n"
  96. "9:"SLR" %0,%4\n"
  97. " j 6f\n"
  98. "5:"SLR" %0,%0\n"
  99. "6: \n"
  100. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
  101. EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
  102. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  103. : : "cc", "memory");
  104. return size;
  105. }
  106. static size_t copy_to_user_std_check(size_t size, void __user *ptr,
  107. const void *x)
  108. {
  109. if (size <= 1024)
  110. return copy_to_user_std(size, ptr, x);
  111. return copy_to_user_pt(size, ptr, x);
  112. }
  113. static size_t copy_in_user_std(size_t size, void __user *to,
  114. const void __user *from)
  115. {
  116. unsigned long tmp1;
  117. asm volatile(
  118. " sacf 256\n"
  119. " "AHI" %0,-1\n"
  120. " jo 5f\n"
  121. " bras %3,3f\n"
  122. "0:"AHI" %0,257\n"
  123. "1: mvc 0(1,%1),0(%2)\n"
  124. " la %1,1(%1)\n"
  125. " la %2,1(%2)\n"
  126. " "AHI" %0,-1\n"
  127. " jnz 1b\n"
  128. " j 5f\n"
  129. "2: mvc 0(256,%1),0(%2)\n"
  130. " la %1,256(%1)\n"
  131. " la %2,256(%2)\n"
  132. "3:"AHI" %0,-256\n"
  133. " jnm 2b\n"
  134. "4: ex %0,1b-0b(%3)\n"
  135. "5: "SLR" %0,%0\n"
  136. "6: sacf 0\n"
  137. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  138. : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
  139. : : "cc", "memory");
  140. return size;
  141. }
  142. static size_t clear_user_std(size_t size, void __user *to)
  143. {
  144. unsigned long tmp1, tmp2;
  145. asm volatile(
  146. " sacf 256\n"
  147. " "AHI" %0,-1\n"
  148. " jo 5f\n"
  149. " bras %3,3f\n"
  150. " xc 0(1,%1),0(%1)\n"
  151. "0:"AHI" %0,257\n"
  152. " la %2,255(%1)\n" /* %2 = ptr + 255 */
  153. " srl %2,12\n"
  154. " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
  155. " "SLR" %2,%1\n"
  156. " "CLR" %0,%2\n" /* clear crosses next page boundary? */
  157. " jnh 5f\n"
  158. " "AHI" %2,-1\n"
  159. "1: ex %2,0(%3)\n"
  160. " "AHI" %2,1\n"
  161. " "SLR" %0,%2\n"
  162. " j 5f\n"
  163. "2: xc 0(256,%1),0(%1)\n"
  164. " la %1,256(%1)\n"
  165. "3:"AHI" %0,-256\n"
  166. " jnm 2b\n"
  167. "4: ex %0,0(%3)\n"
  168. "5: "SLR" %0,%0\n"
  169. "6: sacf 0\n"
  170. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  171. : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
  172. : : "cc", "memory");
  173. return size;
  174. }
  175. size_t strnlen_user_std(size_t size, const char __user *src)
  176. {
  177. register unsigned long reg0 asm("0") = 0UL;
  178. unsigned long tmp1, tmp2;
  179. asm volatile(
  180. " la %2,0(%1)\n"
  181. " la %3,0(%0,%1)\n"
  182. " "SLR" %0,%0\n"
  183. " sacf 256\n"
  184. "0: srst %3,%2\n"
  185. " jo 0b\n"
  186. " la %0,1(%3)\n" /* strnlen_user results includes \0 */
  187. " "SLR" %0,%1\n"
  188. "1: sacf 0\n"
  189. EX_TABLE(0b,1b)
  190. : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
  191. : "d" (reg0) : "cc", "memory");
  192. return size;
  193. }
  194. size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
  195. {
  196. register unsigned long reg0 asm("0") = 0UL;
  197. unsigned long tmp1, tmp2;
  198. asm volatile(
  199. " la %3,0(%1)\n"
  200. " la %4,0(%0,%1)\n"
  201. " sacf 256\n"
  202. "0: srst %4,%3\n"
  203. " jo 0b\n"
  204. " sacf 0\n"
  205. " la %0,0(%4)\n"
  206. " jh 1f\n" /* found \0 in string ? */
  207. " "AHI" %4,1\n" /* include \0 in copy */
  208. "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */
  209. " "SLR" %4,%1\n" /* %4 = copy length (including \0) */
  210. "2: mvcp 0(%4,%2),0(%1),%5\n"
  211. " jz 9f\n"
  212. "3:"AHI" %4,-256\n"
  213. " la %1,256(%1)\n"
  214. " la %2,256(%2)\n"
  215. "4: mvcp 0(%4,%2),0(%1),%5\n"
  216. " jnz 3b\n"
  217. " j 9f\n"
  218. "7: sacf 0\n"
  219. "8:"LHI" %0,%6\n"
  220. "9:\n"
  221. EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b)
  222. : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2)
  223. : "d" (reg0), "K" (-EFAULT) : "cc", "memory");
  224. return size;
  225. }
  226. #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
  227. asm volatile( \
  228. " sacf 256\n" \
  229. "0: l %1,0(%6)\n" \
  230. "1:"insn \
  231. "2: cs %1,%2,0(%6)\n" \
  232. "3: jl 1b\n" \
  233. " lhi %0,0\n" \
  234. "4: sacf 0\n" \
  235. EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
  236. : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
  237. "=m" (*uaddr) \
  238. : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
  239. "m" (*uaddr) : "cc");
  240. int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
  241. {
  242. int oldval = 0, newval, ret;
  243. switch (op) {
  244. case FUTEX_OP_SET:
  245. __futex_atomic_op("lr %2,%5\n",
  246. ret, oldval, newval, uaddr, oparg);
  247. break;
  248. case FUTEX_OP_ADD:
  249. __futex_atomic_op("lr %2,%1\nar %2,%5\n",
  250. ret, oldval, newval, uaddr, oparg);
  251. break;
  252. case FUTEX_OP_OR:
  253. __futex_atomic_op("lr %2,%1\nor %2,%5\n",
  254. ret, oldval, newval, uaddr, oparg);
  255. break;
  256. case FUTEX_OP_ANDN:
  257. __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
  258. ret, oldval, newval, uaddr, oparg);
  259. break;
  260. case FUTEX_OP_XOR:
  261. __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
  262. ret, oldval, newval, uaddr, oparg);
  263. break;
  264. default:
  265. ret = -ENOSYS;
  266. }
  267. *old = oldval;
  268. return ret;
  269. }
  270. int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
  271. u32 oldval, u32 newval)
  272. {
  273. int ret;
  274. asm volatile(
  275. " sacf 256\n"
  276. "0: cs %1,%4,0(%5)\n"
  277. "1: la %0,0\n"
  278. "2: sacf 0\n"
  279. EX_TABLE(0b,2b) EX_TABLE(1b,2b)
  280. : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
  281. : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
  282. : "cc", "memory" );
  283. *uval = oldval;
  284. return ret;
  285. }
  286. struct uaccess_ops uaccess_std = {
  287. .copy_from_user = copy_from_user_std_check,
  288. .copy_from_user_small = copy_from_user_std,
  289. .copy_to_user = copy_to_user_std_check,
  290. .copy_to_user_small = copy_to_user_std,
  291. .copy_in_user = copy_in_user_std,
  292. .clear_user = clear_user_std,
  293. .strnlen_user = strnlen_user_std,
  294. .strncpy_from_user = strncpy_from_user_std,
  295. .futex_atomic_op = futex_atomic_op_std,
  296. .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
  297. };