uaccess.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Hartmut Penner (hp@de.ibm.com),
  5. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. *
  7. * Derived from "include/asm-i386/uaccess.h"
  8. */
  9. #ifndef __S390_UACCESS_H
  10. #define __S390_UACCESS_H
  11. /*
  12. * User space memory access functions
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/errno.h>
  16. #include <asm/ctl_reg.h>
  17. #define VERIFY_READ 0
  18. #define VERIFY_WRITE 1
  19. /*
  20. * The fs value determines whether argument validity checking should be
  21. * performed or not. If get_fs() == USER_DS, checking is performed, with
  22. * get_fs() == KERNEL_DS, checking is bypassed.
  23. *
  24. * For historical reasons, these macros are grossly misnamed.
  25. */
  26. #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
  27. #define KERNEL_DS MAKE_MM_SEG(0)
  28. #define USER_DS MAKE_MM_SEG(1)
  29. #define get_ds() (KERNEL_DS)
  30. #define get_fs() (current->thread.mm_segment)
  31. #define set_fs(x) \
  32. ({ \
  33. unsigned long __pto; \
  34. current->thread.mm_segment = (x); \
  35. __pto = current->thread.mm_segment.ar4 ? \
  36. S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
  37. __ctl_load(__pto, 7, 7); \
  38. })
  39. #define segment_eq(a,b) ((a).ar4 == (b).ar4)
  40. static inline int __range_ok(unsigned long addr, unsigned long size)
  41. {
  42. return 1;
  43. }
  44. #define __access_ok(addr, size) \
  45. ({ \
  46. __chk_user_ptr(addr); \
  47. __range_ok((unsigned long)(addr), (size)); \
  48. })
  49. #define access_ok(type, addr, size) __access_ok(addr, size)
  50. /*
  51. * The exception table consists of pairs of addresses: the first is the
  52. * address of an instruction that is allowed to fault, and the second is
  53. * the address at which the program should continue. No registers are
  54. * modified, so it is entirely up to the continuation code to figure out
  55. * what to do.
  56. *
  57. * All the routines below use bits of fixup code that are out of line
  58. * with the main instruction path. This means when everything is well,
  59. * we don't even have to jump over them. Further, they do not intrude
  60. * on our cache or tlb entries.
  61. */
  62. struct exception_table_entry
  63. {
  64. int insn, fixup;
  65. };
  66. static inline unsigned long extable_fixup(const struct exception_table_entry *x)
  67. {
  68. return (unsigned long)&x->fixup + x->fixup;
  69. }
  70. #define ARCH_HAS_RELATIVE_EXTABLE
  71. /**
  72. * __copy_from_user: - Copy a block of data from user space, with less checking.
  73. * @to: Destination address, in kernel space.
  74. * @from: Source address, in user space.
  75. * @n: Number of bytes to copy.
  76. *
  77. * Context: User context only. This function may sleep if pagefaults are
  78. * enabled.
  79. *
  80. * Copy data from user space to kernel space. Caller must check
  81. * the specified block with access_ok() before calling this function.
  82. *
  83. * Returns number of bytes that could not be copied.
  84. * On success, this will be zero.
  85. *
  86. * If some data could not be copied, this function will pad the copied
  87. * data to the requested size using zero bytes.
  88. */
  89. unsigned long __must_check __copy_from_user(void *to, const void __user *from,
  90. unsigned long n);
  91. /**
  92. * __copy_to_user: - Copy a block of data into user space, with less checking.
  93. * @to: Destination address, in user space.
  94. * @from: Source address, in kernel space.
  95. * @n: Number of bytes to copy.
  96. *
  97. * Context: User context only. This function may sleep if pagefaults are
  98. * enabled.
  99. *
  100. * Copy data from kernel space to user space. Caller must check
  101. * the specified block with access_ok() before calling this function.
  102. *
  103. * Returns number of bytes that could not be copied.
  104. * On success, this will be zero.
  105. */
  106. unsigned long __must_check __copy_to_user(void __user *to, const void *from,
  107. unsigned long n);
  108. #define __copy_to_user_inatomic __copy_to_user
  109. #define __copy_from_user_inatomic __copy_from_user
  110. #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
  111. #define __put_get_user_asm(to, from, size, spec) \
  112. ({ \
  113. register unsigned long __reg0 asm("0") = spec; \
  114. int __rc; \
  115. \
  116. asm volatile( \
  117. "0: mvcos %1,%3,%2\n" \
  118. "1: xr %0,%0\n" \
  119. "2:\n" \
  120. ".pushsection .fixup, \"ax\"\n" \
  121. "3: lhi %0,%5\n" \
  122. " jg 2b\n" \
  123. ".popsection\n" \
  124. EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
  125. : "=d" (__rc), "+Q" (*(to)) \
  126. : "d" (size), "Q" (*(from)), \
  127. "d" (__reg0), "K" (-EFAULT) \
  128. : "cc"); \
  129. __rc; \
  130. })
  131. static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
  132. {
  133. unsigned long spec = 0x810000UL;
  134. int rc;
  135. switch (size) {
  136. case 1:
  137. rc = __put_get_user_asm((unsigned char __user *)ptr,
  138. (unsigned char *)x,
  139. size, spec);
  140. break;
  141. case 2:
  142. rc = __put_get_user_asm((unsigned short __user *)ptr,
  143. (unsigned short *)x,
  144. size, spec);
  145. break;
  146. case 4:
  147. rc = __put_get_user_asm((unsigned int __user *)ptr,
  148. (unsigned int *)x,
  149. size, spec);
  150. break;
  151. case 8:
  152. rc = __put_get_user_asm((unsigned long __user *)ptr,
  153. (unsigned long *)x,
  154. size, spec);
  155. break;
  156. };
  157. return rc;
  158. }
  159. static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
  160. {
  161. unsigned long spec = 0x81UL;
  162. int rc;
  163. switch (size) {
  164. case 1:
  165. rc = __put_get_user_asm((unsigned char *)x,
  166. (unsigned char __user *)ptr,
  167. size, spec);
  168. break;
  169. case 2:
  170. rc = __put_get_user_asm((unsigned short *)x,
  171. (unsigned short __user *)ptr,
  172. size, spec);
  173. break;
  174. case 4:
  175. rc = __put_get_user_asm((unsigned int *)x,
  176. (unsigned int __user *)ptr,
  177. size, spec);
  178. break;
  179. case 8:
  180. rc = __put_get_user_asm((unsigned long *)x,
  181. (unsigned long __user *)ptr,
  182. size, spec);
  183. break;
  184. };
  185. return rc;
  186. }
  187. #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
  188. static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
  189. {
  190. size = __copy_to_user(ptr, x, size);
  191. return size ? -EFAULT : 0;
  192. }
  193. static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
  194. {
  195. size = __copy_from_user(x, ptr, size);
  196. return size ? -EFAULT : 0;
  197. }
  198. #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
  199. /*
  200. * These are the main single-value transfer routines. They automatically
  201. * use the right size if we just have the right pointer type.
  202. */
  203. #define __put_user(x, ptr) \
  204. ({ \
  205. __typeof__(*(ptr)) __x = (x); \
  206. int __pu_err = -EFAULT; \
  207. __chk_user_ptr(ptr); \
  208. switch (sizeof (*(ptr))) { \
  209. case 1: \
  210. case 2: \
  211. case 4: \
  212. case 8: \
  213. __pu_err = __put_user_fn(&__x, ptr, \
  214. sizeof(*(ptr))); \
  215. break; \
  216. default: \
  217. __put_user_bad(); \
  218. break; \
  219. } \
  220. __builtin_expect(__pu_err, 0); \
  221. })
  222. #define put_user(x, ptr) \
  223. ({ \
  224. might_fault(); \
  225. __put_user(x, ptr); \
  226. })
  227. int __put_user_bad(void) __attribute__((noreturn));
  228. #define __get_user(x, ptr) \
  229. ({ \
  230. int __gu_err = -EFAULT; \
  231. __chk_user_ptr(ptr); \
  232. switch (sizeof(*(ptr))) { \
  233. case 1: { \
  234. unsigned char __x = 0; \
  235. __gu_err = __get_user_fn(&__x, ptr, \
  236. sizeof(*(ptr))); \
  237. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  238. break; \
  239. }; \
  240. case 2: { \
  241. unsigned short __x = 0; \
  242. __gu_err = __get_user_fn(&__x, ptr, \
  243. sizeof(*(ptr))); \
  244. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  245. break; \
  246. }; \
  247. case 4: { \
  248. unsigned int __x = 0; \
  249. __gu_err = __get_user_fn(&__x, ptr, \
  250. sizeof(*(ptr))); \
  251. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  252. break; \
  253. }; \
  254. case 8: { \
  255. unsigned long long __x = 0; \
  256. __gu_err = __get_user_fn(&__x, ptr, \
  257. sizeof(*(ptr))); \
  258. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  259. break; \
  260. }; \
  261. default: \
  262. __get_user_bad(); \
  263. break; \
  264. } \
  265. __builtin_expect(__gu_err, 0); \
  266. })
  267. #define get_user(x, ptr) \
  268. ({ \
  269. might_fault(); \
  270. __get_user(x, ptr); \
  271. })
  272. int __get_user_bad(void) __attribute__((noreturn));
  273. #define __put_user_unaligned __put_user
  274. #define __get_user_unaligned __get_user
  275. extern void __compiletime_error("usercopy buffer size is too small")
  276. __bad_copy_user(void);
  277. static inline void copy_user_overflow(int size, unsigned long count)
  278. {
  279. WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
  280. }
  281. /**
  282. * copy_to_user: - Copy a block of data into user space.
  283. * @to: Destination address, in user space.
  284. * @from: Source address, in kernel space.
  285. * @n: Number of bytes to copy.
  286. *
  287. * Context: User context only. This function may sleep if pagefaults are
  288. * enabled.
  289. *
  290. * Copy data from kernel space to user space.
  291. *
  292. * Returns number of bytes that could not be copied.
  293. * On success, this will be zero.
  294. */
  295. static inline unsigned long __must_check
  296. copy_to_user(void __user *to, const void *from, unsigned long n)
  297. {
  298. might_fault();
  299. return __copy_to_user(to, from, n);
  300. }
  301. /**
  302. * copy_from_user: - Copy a block of data from user space.
  303. * @to: Destination address, in kernel space.
  304. * @from: Source address, in user space.
  305. * @n: Number of bytes to copy.
  306. *
  307. * Context: User context only. This function may sleep if pagefaults are
  308. * enabled.
  309. *
  310. * Copy data from user space to kernel space.
  311. *
  312. * Returns number of bytes that could not be copied.
  313. * On success, this will be zero.
  314. *
  315. * If some data could not be copied, this function will pad the copied
  316. * data to the requested size using zero bytes.
  317. */
  318. static inline unsigned long __must_check
  319. copy_from_user(void *to, const void __user *from, unsigned long n)
  320. {
  321. unsigned int sz = __compiletime_object_size(to);
  322. might_fault();
  323. if (unlikely(sz != -1 && sz < n)) {
  324. if (!__builtin_constant_p(n))
  325. copy_user_overflow(sz, n);
  326. else
  327. __bad_copy_user();
  328. return n;
  329. }
  330. return __copy_from_user(to, from, n);
  331. }
  332. unsigned long __must_check
  333. __copy_in_user(void __user *to, const void __user *from, unsigned long n);
  334. static inline unsigned long __must_check
  335. copy_in_user(void __user *to, const void __user *from, unsigned long n)
  336. {
  337. might_fault();
  338. return __copy_in_user(to, from, n);
  339. }
  340. /*
  341. * Copy a null terminated string from userspace.
  342. */
  343. long __strncpy_from_user(char *dst, const char __user *src, long count);
  344. static inline long __must_check
  345. strncpy_from_user(char *dst, const char __user *src, long count)
  346. {
  347. might_fault();
  348. return __strncpy_from_user(dst, src, count);
  349. }
  350. unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
  351. static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
  352. {
  353. might_fault();
  354. return __strnlen_user(src, n);
  355. }
  356. /**
  357. * strlen_user: - Get the size of a string in user space.
  358. * @str: The string to measure.
  359. *
  360. * Context: User context only. This function may sleep if pagefaults are
  361. * enabled.
  362. *
  363. * Get the size of a NUL-terminated string in user space.
  364. *
  365. * Returns the size of the string INCLUDING the terminating NUL.
  366. * On exception, returns 0.
  367. *
  368. * If there is a limit on the length of a valid string, you may wish to
  369. * consider using strnlen_user() instead.
  370. */
  371. #define strlen_user(str) strnlen_user(str, ~0UL)
  372. /*
  373. * Zero Userspace
  374. */
  375. unsigned long __must_check __clear_user(void __user *to, unsigned long size);
  376. static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
  377. {
  378. might_fault();
  379. return __clear_user(to, n);
  380. }
  381. int copy_to_user_real(void __user *dest, void *src, unsigned long count);
  382. void s390_kernel_write(void *dst, const void *src, size_t size);
  383. #endif /* __S390_UACCESS_H */