uaccess.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later.
  5. *
  6. * Based on: include/asm-m68knommu/uaccess.h
  7. */
  8. #ifndef __BLACKFIN_UACCESS_H
  9. #define __BLACKFIN_UACCESS_H
  10. /*
  11. * User space memory access functions
  12. */
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/string.h>
  16. #include <asm/segment.h>
  17. #include <asm/sections.h>
  18. #define get_ds() (KERNEL_DS)
  19. #define get_fs() (current_thread_info()->addr_limit)
  20. static inline void set_fs(mm_segment_t fs)
  21. {
  22. current_thread_info()->addr_limit = fs;
  23. }
  24. #define segment_eq(a, b) ((a) == (b))
  25. #define VERIFY_READ 0
  26. #define VERIFY_WRITE 1
  27. #define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size))
  28. /*
  29. * The fs value determines whether argument validity checking should be
  30. * performed or not. If get_fs() == USER_DS, checking is performed, with
  31. * get_fs() == KERNEL_DS, checking is bypassed.
  32. */
  33. #ifndef CONFIG_ACCESS_CHECK
  34. static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; }
  35. #else
  36. extern int _access_ok(unsigned long addr, unsigned long size);
  37. #endif
  38. /*
  39. * The exception table consists of pairs of addresses: the first is the
  40. * address of an instruction that is allowed to fault, and the second is
  41. * the address at which the program should continue. No registers are
  42. * modified, so it is entirely up to the continuation code to figure out
  43. * what to do.
  44. *
  45. * All the routines below use bits of fixup code that are out of line
  46. * with the main instruction path. This means when everything is well,
  47. * we don't even have to jump over them. Further, they do not intrude
  48. * on our cache or tlb entries.
  49. */
  50. struct exception_table_entry {
  51. unsigned long insn, fixup;
  52. };
  53. /*
  54. * These are the main single-value transfer routines. They automatically
  55. * use the right size if we just have the right pointer type.
  56. */
  57. #define put_user(x, p) \
  58. ({ \
  59. int _err = 0; \
  60. typeof(*(p)) _x = (x); \
  61. typeof(*(p)) __user *_p = (p); \
  62. if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\
  63. _err = -EFAULT; \
  64. } \
  65. else { \
  66. switch (sizeof (*(_p))) { \
  67. case 1: \
  68. __put_user_asm(_x, _p, B); \
  69. break; \
  70. case 2: \
  71. __put_user_asm(_x, _p, W); \
  72. break; \
  73. case 4: \
  74. __put_user_asm(_x, _p, ); \
  75. break; \
  76. case 8: { \
  77. long _xl, _xh; \
  78. _xl = ((__force long *)&_x)[0]; \
  79. _xh = ((__force long *)&_x)[1]; \
  80. __put_user_asm(_xl, ((__force long __user *)_p)+0, );\
  81. __put_user_asm(_xh, ((__force long __user *)_p)+1, );\
  82. } break; \
  83. default: \
  84. _err = __put_user_bad(); \
  85. break; \
  86. } \
  87. } \
  88. _err; \
  89. })
  90. #define __put_user(x, p) put_user(x, p)
  91. static inline int bad_user_access_length(void)
  92. {
  93. panic("bad_user_access_length");
  94. return -1;
  95. }
  96. #define __put_user_bad() (printk(KERN_INFO "put_user_bad %s:%d %s\n",\
  97. __FILE__, __LINE__, __func__),\
  98. bad_user_access_length(), (-EFAULT))
  99. /*
  100. * Tell gcc we read from memory instead of writing: this is because
  101. * we do not write to any memory gcc knows about, so there are no
  102. * aliasing issues.
  103. */
  104. #define __ptr(x) ((unsigned long __force *)(x))
  105. #define __put_user_asm(x, p, bhw) \
  106. __asm__ (#bhw"[%1] = %0;\n\t" \
  107. : /* no outputs */ \
  108. :"d" (x), "a" (__ptr(p)) : "memory")
  109. #define get_user(x, ptr) \
  110. ({ \
  111. int _err = 0; \
  112. unsigned long _val = 0; \
  113. const typeof(*(ptr)) __user *_p = (ptr); \
  114. const size_t ptr_size = sizeof(*(_p)); \
  115. if (likely(access_ok(VERIFY_READ, _p, ptr_size))) { \
  116. BUILD_BUG_ON(ptr_size >= 8); \
  117. switch (ptr_size) { \
  118. case 1: \
  119. __get_user_asm(_val, _p, B, (Z)); \
  120. break; \
  121. case 2: \
  122. __get_user_asm(_val, _p, W, (Z)); \
  123. break; \
  124. case 4: \
  125. __get_user_asm(_val, _p, , ); \
  126. break; \
  127. } \
  128. } else \
  129. _err = -EFAULT; \
  130. x = (__force typeof(*(ptr)))_val; \
  131. _err; \
  132. })
  133. #define __get_user(x, p) get_user(x, p)
  134. #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
  135. #define __get_user_asm(x, ptr, bhw, option) \
  136. ({ \
  137. __asm__ __volatile__ ( \
  138. "%0 =" #bhw "[%1]" #option ";" \
  139. : "=d" (x) \
  140. : "a" (__ptr(ptr))); \
  141. })
  142. #define __copy_to_user_inatomic __copy_to_user
  143. #define __copy_from_user_inatomic __copy_from_user
  144. static inline unsigned long __must_check
  145. __copy_from_user(void *to, const void __user *from, unsigned long n)
  146. {
  147. memcpy(to, (const void __force *)from, n);
  148. return 0;
  149. }
  150. static inline unsigned long __must_check
  151. __copy_to_user(void __user *to, const void *from, unsigned long n)
  152. {
  153. memcpy((void __force *)to, from, n);
  154. SSYNC();
  155. return 0;
  156. }
  157. static inline unsigned long __must_check
  158. copy_from_user(void *to, const void __user *from, unsigned long n)
  159. {
  160. if (likely(access_ok(VERIFY_READ, from, n)))
  161. return __copy_from_user(to, from, n);
  162. memset(to, 0, n);
  163. return n;
  164. }
  165. static inline unsigned long __must_check
  166. copy_to_user(void __user *to, const void *from, unsigned long n)
  167. {
  168. if (likely(access_ok(VERIFY_WRITE, to, n)))
  169. return __copy_to_user(to, from, n);
  170. return n;
  171. }
  172. /*
  173. * Copy a null terminated string from userspace.
  174. */
  175. static inline long __must_check
  176. strncpy_from_user(char *dst, const char __user *src, long count)
  177. {
  178. char *tmp;
  179. if (!access_ok(VERIFY_READ, src, 1))
  180. return -EFAULT;
  181. strncpy(dst, (const char __force *)src, count);
  182. for (tmp = dst; *tmp && count > 0; tmp++, count--) ;
  183. return (tmp - dst);
  184. }
  185. /*
  186. * Get the size of a string in user space.
  187. * src: The string to measure
  188. * n: The maximum valid length
  189. *
  190. * Get the size of a NUL-terminated string in user space.
  191. *
  192. * Returns the size of the string INCLUDING the terminating NUL.
  193. * On exception, returns 0.
  194. * If the string is too long, returns a value greater than n.
  195. */
  196. static inline long __must_check strnlen_user(const char __user *src, long n)
  197. {
  198. if (!access_ok(VERIFY_READ, src, 1))
  199. return 0;
  200. return strnlen((const char __force *)src, n) + 1;
  201. }
  202. static inline long __must_check strlen_user(const char __user *src)
  203. {
  204. if (!access_ok(VERIFY_READ, src, 1))
  205. return 0;
  206. return strlen((const char __force *)src) + 1;
  207. }
  208. /*
  209. * Zero Userspace
  210. */
  211. static inline unsigned long __must_check
  212. __clear_user(void __user *to, unsigned long n)
  213. {
  214. if (!access_ok(VERIFY_WRITE, to, n))
  215. return n;
  216. memset((void __force *)to, 0, n);
  217. return 0;
  218. }
  219. #define clear_user(to, n) __clear_user(to, n)
  220. /* How to interpret these return values:
  221. * CORE: can be accessed by core load or dma memcpy
  222. * CORE_ONLY: can only be accessed by core load
  223. * DMA: can only be accessed by dma memcpy
  224. * IDMA: can only be accessed by interprocessor dma memcpy (BF561)
  225. * ITEST: can be accessed by isram memcpy or dma memcpy
  226. */
  227. enum {
  228. BFIN_MEM_ACCESS_CORE = 0,
  229. BFIN_MEM_ACCESS_CORE_ONLY,
  230. BFIN_MEM_ACCESS_DMA,
  231. BFIN_MEM_ACCESS_IDMA,
  232. BFIN_MEM_ACCESS_ITEST,
  233. };
  234. /**
  235. * bfin_mem_access_type() - what kind of memory access is required
  236. * @addr: the address to check
  237. * @size: number of bytes needed
  238. * @return: <0 is error, >=0 is BFIN_MEM_ACCESS_xxx enum (see above)
  239. */
  240. int bfin_mem_access_type(unsigned long addr, unsigned long size);
  241. #endif /* _BLACKFIN_UACCESS_H */