uaccess_mvcos.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * arch/s390/lib/uaccess_mvcos.c
  3. *
  4. * Optimized user space space access functions based on mvcos.
  5. *
  6. * Copyright (C) IBM Corp. 2006
  7. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  8. * Gerald Schaefer (gerald.schaefer@de.ibm.com)
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/mm.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/futex.h>
  14. #include "uaccess.h"
  15. #ifndef __s390x__
  16. #define AHI "ahi"
  17. #define ALR "alr"
  18. #define CLR "clr"
  19. #define LHI "lhi"
  20. #define SLR "slr"
  21. #else
  22. #define AHI "aghi"
  23. #define ALR "algr"
  24. #define CLR "clgr"
  25. #define LHI "lghi"
  26. #define SLR "slgr"
  27. #endif
  28. static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
  29. {
  30. register unsigned long reg0 asm("0") = 0x81UL;
  31. unsigned long tmp1, tmp2;
  32. tmp1 = -4096UL;
  33. asm volatile(
  34. "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
  35. "9: jz 7f\n"
  36. "1:"ALR" %0,%3\n"
  37. " "SLR" %1,%3\n"
  38. " "SLR" %2,%3\n"
  39. " j 0b\n"
  40. "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
  41. " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
  42. " "SLR" %4,%1\n"
  43. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  44. " jnh 4f\n"
  45. "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
  46. "10:"SLR" %0,%4\n"
  47. " "ALR" %2,%4\n"
  48. "4:"LHI" %4,-1\n"
  49. " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
  50. " bras %3,6f\n" /* memset loop */
  51. " xc 0(1,%2),0(%2)\n"
  52. "5: xc 0(256,%2),0(%2)\n"
  53. " la %2,256(%2)\n"
  54. "6:"AHI" %4,-256\n"
  55. " jnm 5b\n"
  56. " ex %4,0(%3)\n"
  57. " j 8f\n"
  58. "7:"SLR" %0,%0\n"
  59. "8: \n"
  60. EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
  61. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  62. : "d" (reg0) : "cc", "memory");
  63. return size;
  64. }
  65. static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
  66. {
  67. if (size <= 256)
  68. return copy_from_user_std(size, ptr, x);
  69. return copy_from_user_mvcos(size, ptr, x);
  70. }
  71. static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
  72. {
  73. register unsigned long reg0 asm("0") = 0x810000UL;
  74. unsigned long tmp1, tmp2;
  75. tmp1 = -4096UL;
  76. asm volatile(
  77. "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
  78. "6: jz 4f\n"
  79. "1:"ALR" %0,%3\n"
  80. " "SLR" %1,%3\n"
  81. " "SLR" %2,%3\n"
  82. " j 0b\n"
  83. "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
  84. " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
  85. " "SLR" %4,%1\n"
  86. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  87. " jnh 5f\n"
  88. "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
  89. "7:"SLR" %0,%4\n"
  90. " j 5f\n"
  91. "4:"SLR" %0,%0\n"
  92. "5: \n"
  93. EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
  94. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  95. : "d" (reg0) : "cc", "memory");
  96. return size;
  97. }
  98. static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
  99. const void *x)
  100. {
  101. if (size <= 256)
  102. return copy_to_user_std(size, ptr, x);
  103. return copy_to_user_mvcos(size, ptr, x);
  104. }
  105. static size_t copy_in_user_mvcos(size_t size, void __user *to,
  106. const void __user *from)
  107. {
  108. register unsigned long reg0 asm("0") = 0x810081UL;
  109. unsigned long tmp1, tmp2;
  110. tmp1 = -4096UL;
  111. /* FIXME: copy with reduced length. */
  112. asm volatile(
  113. "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
  114. " jz 2f\n"
  115. "1:"ALR" %0,%3\n"
  116. " "SLR" %1,%3\n"
  117. " "SLR" %2,%3\n"
  118. " j 0b\n"
  119. "2:"SLR" %0,%0\n"
  120. "3: \n"
  121. EX_TABLE(0b,3b)
  122. : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
  123. : "d" (reg0) : "cc", "memory");
  124. return size;
  125. }
  126. static size_t clear_user_mvcos(size_t size, void __user *to)
  127. {
  128. register unsigned long reg0 asm("0") = 0x810000UL;
  129. unsigned long tmp1, tmp2;
  130. tmp1 = -4096UL;
  131. asm volatile(
  132. "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
  133. " jz 4f\n"
  134. "1:"ALR" %0,%2\n"
  135. " "SLR" %1,%2\n"
  136. " j 0b\n"
  137. "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
  138. " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
  139. " "SLR" %3,%1\n"
  140. " "CLR" %0,%3\n" /* copy crosses next page boundary? */
  141. " jnh 5f\n"
  142. "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
  143. " "SLR" %0,%3\n"
  144. " j 5f\n"
  145. "4:"SLR" %0,%0\n"
  146. "5: \n"
  147. EX_TABLE(0b,2b) EX_TABLE(3b,5b)
  148. : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
  149. : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
  150. return size;
  151. }
  152. static size_t strnlen_user_mvcos(size_t count, const char __user *src)
  153. {
  154. char buf[256];
  155. int rc;
  156. size_t done, len, len_str;
  157. done = 0;
  158. do {
  159. len = min(count - done, (size_t) 256);
  160. rc = uaccess.copy_from_user(len, src + done, buf);
  161. if (unlikely(rc == len))
  162. return 0;
  163. len -= rc;
  164. len_str = strnlen(buf, len);
  165. done += len_str;
  166. } while ((len_str == len) && (done < count));
  167. return done + 1;
  168. }
  169. static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
  170. char *dst)
  171. {
  172. int rc;
  173. size_t done, len, len_str;
  174. done = 0;
  175. do {
  176. len = min(count - done, (size_t) 4096);
  177. rc = uaccess.copy_from_user(len, src + done, dst);
  178. if (unlikely(rc == len))
  179. return -EFAULT;
  180. len -= rc;
  181. len_str = strnlen(dst, len);
  182. done += len_str;
  183. } while ((len_str == len) && (done < count));
  184. return done;
  185. }
  186. struct uaccess_ops uaccess_mvcos = {
  187. .copy_from_user = copy_from_user_mvcos_check,
  188. .copy_from_user_small = copy_from_user_std,
  189. .copy_to_user = copy_to_user_mvcos_check,
  190. .copy_to_user_small = copy_to_user_std,
  191. .copy_in_user = copy_in_user_mvcos,
  192. .clear_user = clear_user_mvcos,
  193. .strnlen_user = strnlen_user_std,
  194. .strncpy_from_user = strncpy_from_user_std,
  195. .futex_atomic_op = futex_atomic_op_std,
  196. .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
  197. };
  198. struct uaccess_ops uaccess_mvcos_switch = {
  199. .copy_from_user = copy_from_user_mvcos,
  200. .copy_from_user_small = copy_from_user_mvcos,
  201. .copy_to_user = copy_to_user_mvcos,
  202. .copy_to_user_small = copy_to_user_mvcos,
  203. .copy_in_user = copy_in_user_mvcos,
  204. .clear_user = clear_user_mvcos,
  205. .strnlen_user = strnlen_user_mvcos,
  206. .strncpy_from_user = strncpy_from_user_mvcos,
  207. .futex_atomic_op = futex_atomic_op_pt,
  208. .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
  209. };