uaccess_64.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. #ifndef _ASM_X86_UACCESS_64_H
  2. #define _ASM_X86_UACCESS_64_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/lockdep.h>
  9. #include <asm/alternative.h>
  10. #include <asm/cpufeature.h>
  11. #include <asm/page.h>
  12. /*
  13. * Copy To/From Userspace
  14. */
  15. /* Handles exceptions in both to and from, but doesn't do access_ok */
  16. __must_check unsigned long
  17. copy_user_generic_string(void *to, const void *from, unsigned len);
  18. __must_check unsigned long
  19. copy_user_generic_unrolled(void *to, const void *from, unsigned len);
  20. static __always_inline __must_check unsigned long
  21. copy_user_generic(void *to, const void *from, unsigned len)
  22. {
  23. unsigned ret;
  24. alternative_call(copy_user_generic_unrolled,
  25. copy_user_generic_string,
  26. X86_FEATURE_REP_GOOD,
  27. ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
  28. "=d" (len)),
  29. "1" (to), "2" (from), "3" (len)
  30. : "memory", "rcx", "r8", "r9", "r10", "r11");
  31. return ret;
  32. }
  33. __must_check unsigned long
  34. _copy_to_user(void __user *to, const void *from, unsigned len);
  35. __must_check unsigned long
  36. _copy_from_user(void *to, const void __user *from, unsigned len);
  37. __must_check unsigned long
  38. copy_in_user(void __user *to, const void __user *from, unsigned len);
  39. static inline unsigned long __must_check copy_from_user(void *to,
  40. const void __user *from,
  41. unsigned long n)
  42. {
  43. int sz = __compiletime_object_size(to);
  44. might_fault();
  45. if (likely(sz == -1 || sz >= n))
  46. n = _copy_from_user(to, from, n);
  47. #ifdef CONFIG_DEBUG_VM
  48. else
  49. WARN(1, "Buffer overflow detected!\n");
  50. #endif
  51. return n;
  52. }
  53. static __always_inline __must_check
  54. int copy_to_user(void __user *dst, const void *src, unsigned size)
  55. {
  56. might_fault();
  57. return _copy_to_user(dst, src, size);
  58. }
  59. static __always_inline __must_check
  60. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  61. {
  62. int ret = 0;
  63. might_fault();
  64. if (!__builtin_constant_p(size))
  65. return copy_user_generic(dst, (__force void *)src, size);
  66. switch (size) {
  67. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  68. ret, "b", "b", "=q", 1);
  69. return ret;
  70. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  71. ret, "w", "w", "=r", 2);
  72. return ret;
  73. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  74. ret, "l", "k", "=r", 4);
  75. return ret;
  76. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  77. ret, "q", "", "=r", 8);
  78. return ret;
  79. case 10:
  80. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  81. ret, "q", "", "=r", 10);
  82. if (unlikely(ret))
  83. return ret;
  84. __get_user_asm(*(u16 *)(8 + (char *)dst),
  85. (u16 __user *)(8 + (char __user *)src),
  86. ret, "w", "w", "=r", 2);
  87. return ret;
  88. case 16:
  89. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  90. ret, "q", "", "=r", 16);
  91. if (unlikely(ret))
  92. return ret;
  93. __get_user_asm(*(u64 *)(8 + (char *)dst),
  94. (u64 __user *)(8 + (char __user *)src),
  95. ret, "q", "", "=r", 8);
  96. return ret;
  97. default:
  98. return copy_user_generic(dst, (__force void *)src, size);
  99. }
  100. }
  101. static __always_inline __must_check
  102. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  103. {
  104. int ret = 0;
  105. might_fault();
  106. if (!__builtin_constant_p(size))
  107. return copy_user_generic((__force void *)dst, src, size);
  108. switch (size) {
  109. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  110. ret, "b", "b", "iq", 1);
  111. return ret;
  112. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  113. ret, "w", "w", "ir", 2);
  114. return ret;
  115. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  116. ret, "l", "k", "ir", 4);
  117. return ret;
  118. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  119. ret, "q", "", "er", 8);
  120. return ret;
  121. case 10:
  122. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  123. ret, "q", "", "er", 10);
  124. if (unlikely(ret))
  125. return ret;
  126. asm("":::"memory");
  127. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  128. ret, "w", "w", "ir", 2);
  129. return ret;
  130. case 16:
  131. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  132. ret, "q", "", "er", 16);
  133. if (unlikely(ret))
  134. return ret;
  135. asm("":::"memory");
  136. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  137. ret, "q", "", "er", 8);
  138. return ret;
  139. default:
  140. return copy_user_generic((__force void *)dst, src, size);
  141. }
  142. }
  143. static __always_inline __must_check
  144. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  145. {
  146. int ret = 0;
  147. might_fault();
  148. if (!__builtin_constant_p(size))
  149. return copy_user_generic((__force void *)dst,
  150. (__force void *)src, size);
  151. switch (size) {
  152. case 1: {
  153. u8 tmp;
  154. __get_user_asm(tmp, (u8 __user *)src,
  155. ret, "b", "b", "=q", 1);
  156. if (likely(!ret))
  157. __put_user_asm(tmp, (u8 __user *)dst,
  158. ret, "b", "b", "iq", 1);
  159. return ret;
  160. }
  161. case 2: {
  162. u16 tmp;
  163. __get_user_asm(tmp, (u16 __user *)src,
  164. ret, "w", "w", "=r", 2);
  165. if (likely(!ret))
  166. __put_user_asm(tmp, (u16 __user *)dst,
  167. ret, "w", "w", "ir", 2);
  168. return ret;
  169. }
  170. case 4: {
  171. u32 tmp;
  172. __get_user_asm(tmp, (u32 __user *)src,
  173. ret, "l", "k", "=r", 4);
  174. if (likely(!ret))
  175. __put_user_asm(tmp, (u32 __user *)dst,
  176. ret, "l", "k", "ir", 4);
  177. return ret;
  178. }
  179. case 8: {
  180. u64 tmp;
  181. __get_user_asm(tmp, (u64 __user *)src,
  182. ret, "q", "", "=r", 8);
  183. if (likely(!ret))
  184. __put_user_asm(tmp, (u64 __user *)dst,
  185. ret, "q", "", "er", 8);
  186. return ret;
  187. }
  188. default:
  189. return copy_user_generic((__force void *)dst,
  190. (__force void *)src, size);
  191. }
  192. }
  193. __must_check long strnlen_user(const char __user *str, long n);
  194. __must_check long __strnlen_user(const char __user *str, long n);
  195. __must_check long strlen_user(const char __user *str);
  196. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  197. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  198. static __must_check __always_inline int
  199. __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
  200. {
  201. return copy_user_generic(dst, (__force const void *)src, size);
  202. }
  203. static __must_check __always_inline int
  204. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  205. {
  206. return copy_user_generic((__force void *)dst, src, size);
  207. }
  208. extern long __copy_user_nocache(void *dst, const void __user *src,
  209. unsigned size, int zerorest);
  210. static inline int
  211. __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
  212. {
  213. might_sleep();
  214. return __copy_user_nocache(dst, src, size, 1);
  215. }
  216. static inline int
  217. __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
  218. unsigned size)
  219. {
  220. return __copy_user_nocache(dst, src, size, 0);
  221. }
  222. unsigned long
  223. copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
  224. #endif /* _ASM_X86_UACCESS_64_H */