uaccess.h 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. */
  10. #ifndef _ASM_UACCESS_H
  11. #define _ASM_UACCESS_H
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/thread_info.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #ifdef CONFIG_32BIT
  23. #define __UA_LIMIT 0x80000000UL
  24. #define __UA_ADDR ".word"
  25. #define __UA_LA "la"
  26. #define __UA_ADDU "addu"
  27. #define __UA_t0 "$8"
  28. #define __UA_t1 "$9"
  29. #endif /* CONFIG_32BIT */
  30. #ifdef CONFIG_64BIT
  31. extern u64 __ua_limit;
  32. #define __UA_LIMIT __ua_limit
  33. #define __UA_ADDR ".dword"
  34. #define __UA_LA "dla"
  35. #define __UA_ADDU "daddu"
  36. #define __UA_t0 "$12"
  37. #define __UA_t1 "$13"
  38. #endif /* CONFIG_64BIT */
  39. /*
  40. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  41. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  42. * the arithmetic we're doing only works if the limit is a power of two, so
  43. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  44. * address in this range it's the process's problem, not ours :-)
  45. */
  46. #define KERNEL_DS ((mm_segment_t) { 0UL })
  47. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  48. #define VERIFY_READ 0
  49. #define VERIFY_WRITE 1
  50. #define get_ds() (KERNEL_DS)
  51. #define get_fs() (current_thread_info()->addr_limit)
  52. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  53. #define segment_eq(a, b) ((a).seg == (b).seg)
  54. /*
  55. * Is a address valid? This does a straighforward calculation rather
  56. * than tests.
  57. *
  58. * Address valid if:
  59. * - "addr" doesn't have any high-bits set
  60. * - AND "size" doesn't have any high-bits set
  61. * - AND "addr+size" doesn't have any high-bits set
  62. * - OR we are in kernel mode.
  63. *
  64. * __ua_size() is a trick to avoid runtime checking of positive constant
  65. * sizes; for those we already know at compile time that the size is ok.
  66. */
  67. #define __ua_size(size) \
  68. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  69. /*
  70. * access_ok: - Checks if a user space pointer is valid
  71. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  72. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  73. * to write to a block, it is always safe to read from it.
  74. * @addr: User space pointer to start of block to check
  75. * @size: Size of block to check
  76. *
  77. * Context: User context only. This function may sleep.
  78. *
  79. * Checks if a pointer to a block of memory in user space is valid.
  80. *
  81. * Returns true (nonzero) if the memory block may be valid, false (zero)
  82. * if it is definitely invalid.
  83. *
  84. * Note that, depending on architecture, this function probably just
  85. * checks that the pointer is in the user space range - after calling
  86. * this function, memory access functions may still return -EFAULT.
  87. */
  88. #define __access_mask get_fs().seg
  89. #define __access_ok(addr, size, mask) \
  90. ({ \
  91. unsigned long __addr = (unsigned long) (addr); \
  92. unsigned long __size = size; \
  93. unsigned long __mask = mask; \
  94. unsigned long __ok; \
  95. \
  96. __chk_user_ptr(addr); \
  97. __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
  98. __ua_size(__size))); \
  99. __ok == 0; \
  100. })
  101. #define access_ok(type, addr, size) \
  102. likely(__access_ok((addr), (size), __access_mask))
  103. /*
  104. * put_user: - Write a simple value into user space.
  105. * @x: Value to copy to user space.
  106. * @ptr: Destination address, in user space.
  107. *
  108. * Context: User context only. This function may sleep.
  109. *
  110. * This macro copies a single simple value from kernel space to user
  111. * space. It supports simple types like char and int, but not larger
  112. * data types like structures or arrays.
  113. *
  114. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  115. * to the result of dereferencing @ptr.
  116. *
  117. * Returns zero on success, or -EFAULT on error.
  118. */
  119. #define put_user(x,ptr) \
  120. __put_user_check((x), (ptr), sizeof(*(ptr)))
  121. /*
  122. * get_user: - Get a simple variable from user space.
  123. * @x: Variable to store result.
  124. * @ptr: Source address, in user space.
  125. *
  126. * Context: User context only. This function may sleep.
  127. *
  128. * This macro copies a single simple variable from user space to kernel
  129. * space. It supports simple types like char and int, but not larger
  130. * data types like structures or arrays.
  131. *
  132. * @ptr must have pointer-to-simple-variable type, and the result of
  133. * dereferencing @ptr must be assignable to @x without a cast.
  134. *
  135. * Returns zero on success, or -EFAULT on error.
  136. * On error, the variable @x is set to zero.
  137. */
  138. #define get_user(x,ptr) \
  139. __get_user_check((x), (ptr), sizeof(*(ptr)))
  140. /*
  141. * __put_user: - Write a simple value into user space, with less checking.
  142. * @x: Value to copy to user space.
  143. * @ptr: Destination address, in user space.
  144. *
  145. * Context: User context only. This function may sleep.
  146. *
  147. * This macro copies a single simple value from kernel space to user
  148. * space. It supports simple types like char and int, but not larger
  149. * data types like structures or arrays.
  150. *
  151. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  152. * to the result of dereferencing @ptr.
  153. *
  154. * Caller must check the pointer with access_ok() before calling this
  155. * function.
  156. *
  157. * Returns zero on success, or -EFAULT on error.
  158. */
  159. #define __put_user(x,ptr) \
  160. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  161. /*
  162. * __get_user: - Get a simple variable from user space, with less checking.
  163. * @x: Variable to store result.
  164. * @ptr: Source address, in user space.
  165. *
  166. * Context: User context only. This function may sleep.
  167. *
  168. * This macro copies a single simple variable from user space to kernel
  169. * space. It supports simple types like char and int, but not larger
  170. * data types like structures or arrays.
  171. *
  172. * @ptr must have pointer-to-simple-variable type, and the result of
  173. * dereferencing @ptr must be assignable to @x without a cast.
  174. *
  175. * Caller must check the pointer with access_ok() before calling this
  176. * function.
  177. *
  178. * Returns zero on success, or -EFAULT on error.
  179. * On error, the variable @x is set to zero.
  180. */
  181. #define __get_user(x,ptr) \
  182. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  183. struct __large_struct { unsigned long buf[100]; };
  184. #define __m(x) (*(struct __large_struct __user *)(x))
  185. /*
  186. * Yuck. We need two variants, one for 64bit operation and one
  187. * for 32 bit mode and old iron.
  188. */
  189. #ifdef CONFIG_32BIT
  190. #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
  191. #endif
  192. #ifdef CONFIG_64BIT
  193. #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
  194. #endif
  195. extern void __get_user_unknown(void);
  196. #define __get_user_common(val, size, ptr) \
  197. do { \
  198. switch (size) { \
  199. case 1: __get_user_asm(val, "lb", ptr); break; \
  200. case 2: __get_user_asm(val, "lh", ptr); break; \
  201. case 4: __get_user_asm(val, "lw", ptr); break; \
  202. case 8: __GET_USER_DW(val, ptr); break; \
  203. default: __get_user_unknown(); break; \
  204. } \
  205. } while (0)
  206. #define __get_user_nocheck(x, ptr, size) \
  207. ({ \
  208. int __gu_err; \
  209. \
  210. __chk_user_ptr(ptr); \
  211. __get_user_common((x), size, ptr); \
  212. __gu_err; \
  213. })
  214. #define __get_user_check(x, ptr, size) \
  215. ({ \
  216. int __gu_err = -EFAULT; \
  217. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  218. \
  219. might_fault(); \
  220. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  221. __get_user_common((x), size, __gu_ptr); \
  222. \
  223. __gu_err; \
  224. })
  225. #define __get_user_asm(val, insn, addr) \
  226. { \
  227. long __gu_tmp; \
  228. \
  229. __asm__ __volatile__( \
  230. "1: " insn " %1, %3 \n" \
  231. "2: \n" \
  232. " .section .fixup,\"ax\" \n" \
  233. "3: li %0, %4 \n" \
  234. " j 2b \n" \
  235. " .previous \n" \
  236. " .section __ex_table,\"a\" \n" \
  237. " "__UA_ADDR "\t1b, 3b \n" \
  238. " .previous \n" \
  239. : "=r" (__gu_err), "=r" (__gu_tmp) \
  240. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  241. \
  242. (val) = (__typeof__(*(addr))) __gu_tmp; \
  243. }
  244. /*
  245. * Get a long long 64 using 32 bit registers.
  246. */
  247. #define __get_user_asm_ll32(val, addr) \
  248. { \
  249. union { \
  250. unsigned long long l; \
  251. __typeof__(*(addr)) t; \
  252. } __gu_tmp; \
  253. \
  254. __asm__ __volatile__( \
  255. "1: lw %1, (%3) \n" \
  256. "2: lw %D1, 4(%3) \n" \
  257. "3: .section .fixup,\"ax\" \n" \
  258. "4: li %0, %4 \n" \
  259. " move %1, $0 \n" \
  260. " move %D1, $0 \n" \
  261. " j 3b \n" \
  262. " .previous \n" \
  263. " .section __ex_table,\"a\" \n" \
  264. " " __UA_ADDR " 1b, 4b \n" \
  265. " " __UA_ADDR " 2b, 4b \n" \
  266. " .previous \n" \
  267. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  268. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  269. \
  270. (val) = __gu_tmp.t; \
  271. }
  272. /*
  273. * Yuck. We need two variants, one for 64bit operation and one
  274. * for 32 bit mode and old iron.
  275. */
  276. #ifdef CONFIG_32BIT
  277. #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
  278. #endif
  279. #ifdef CONFIG_64BIT
  280. #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
  281. #endif
  282. #define __put_user_nocheck(x, ptr, size) \
  283. ({ \
  284. __typeof__(*(ptr)) __pu_val; \
  285. int __pu_err = 0; \
  286. \
  287. __chk_user_ptr(ptr); \
  288. __pu_val = (x); \
  289. switch (size) { \
  290. case 1: __put_user_asm("sb", ptr); break; \
  291. case 2: __put_user_asm("sh", ptr); break; \
  292. case 4: __put_user_asm("sw", ptr); break; \
  293. case 8: __PUT_USER_DW(ptr); break; \
  294. default: __put_user_unknown(); break; \
  295. } \
  296. __pu_err; \
  297. })
  298. #define __put_user_check(x, ptr, size) \
  299. ({ \
  300. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  301. __typeof__(*(ptr)) __pu_val = (x); \
  302. int __pu_err = -EFAULT; \
  303. \
  304. might_fault(); \
  305. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  306. switch (size) { \
  307. case 1: __put_user_asm("sb", __pu_addr); break; \
  308. case 2: __put_user_asm("sh", __pu_addr); break; \
  309. case 4: __put_user_asm("sw", __pu_addr); break; \
  310. case 8: __PUT_USER_DW(__pu_addr); break; \
  311. default: __put_user_unknown(); break; \
  312. } \
  313. } \
  314. __pu_err; \
  315. })
  316. #define __put_user_asm(insn, ptr) \
  317. { \
  318. __asm__ __volatile__( \
  319. "1: " insn " %z2, %3 # __put_user_asm\n" \
  320. "2: \n" \
  321. " .section .fixup,\"ax\" \n" \
  322. "3: li %0, %4 \n" \
  323. " j 2b \n" \
  324. " .previous \n" \
  325. " .section __ex_table,\"a\" \n" \
  326. " " __UA_ADDR " 1b, 3b \n" \
  327. " .previous \n" \
  328. : "=r" (__pu_err) \
  329. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  330. "i" (-EFAULT)); \
  331. }
  332. #define __put_user_asm_ll32(ptr) \
  333. { \
  334. __asm__ __volatile__( \
  335. "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
  336. "2: sw %D2, 4(%3) \n" \
  337. "3: \n" \
  338. " .section .fixup,\"ax\" \n" \
  339. "4: li %0, %4 \n" \
  340. " j 3b \n" \
  341. " .previous \n" \
  342. " .section __ex_table,\"a\" \n" \
  343. " " __UA_ADDR " 1b, 4b \n" \
  344. " " __UA_ADDR " 2b, 4b \n" \
  345. " .previous" \
  346. : "=r" (__pu_err) \
  347. : "0" (0), "r" (__pu_val), "r" (ptr), \
  348. "i" (-EFAULT)); \
  349. }
  350. extern void __put_user_unknown(void);
  351. /*
  352. * put_user_unaligned: - Write a simple value into user space.
  353. * @x: Value to copy to user space.
  354. * @ptr: Destination address, in user space.
  355. *
  356. * Context: User context only. This function may sleep.
  357. *
  358. * This macro copies a single simple value from kernel space to user
  359. * space. It supports simple types like char and int, but not larger
  360. * data types like structures or arrays.
  361. *
  362. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  363. * to the result of dereferencing @ptr.
  364. *
  365. * Returns zero on success, or -EFAULT on error.
  366. */
  367. #define put_user_unaligned(x,ptr) \
  368. __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  369. /*
  370. * get_user_unaligned: - Get a simple variable from user space.
  371. * @x: Variable to store result.
  372. * @ptr: Source address, in user space.
  373. *
  374. * Context: User context only. This function may sleep.
  375. *
  376. * This macro copies a single simple variable from user space to kernel
  377. * space. It supports simple types like char and int, but not larger
  378. * data types like structures or arrays.
  379. *
  380. * @ptr must have pointer-to-simple-variable type, and the result of
  381. * dereferencing @ptr must be assignable to @x without a cast.
  382. *
  383. * Returns zero on success, or -EFAULT on error.
  384. * On error, the variable @x is set to zero.
  385. */
  386. #define get_user_unaligned(x,ptr) \
  387. __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  388. /*
  389. * __put_user_unaligned: - Write a simple value into user space, with less checking.
  390. * @x: Value to copy to user space.
  391. * @ptr: Destination address, in user space.
  392. *
  393. * Context: User context only. This function may sleep.
  394. *
  395. * This macro copies a single simple value from kernel space to user
  396. * space. It supports simple types like char and int, but not larger
  397. * data types like structures or arrays.
  398. *
  399. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  400. * to the result of dereferencing @ptr.
  401. *
  402. * Caller must check the pointer with access_ok() before calling this
  403. * function.
  404. *
  405. * Returns zero on success, or -EFAULT on error.
  406. */
  407. #define __put_user_unaligned(x,ptr) \
  408. __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
  409. /*
  410. * __get_user_unaligned: - Get a simple variable from user space, with less checking.
  411. * @x: Variable to store result.
  412. * @ptr: Source address, in user space.
  413. *
  414. * Context: User context only. This function may sleep.
  415. *
  416. * This macro copies a single simple variable from user space to kernel
  417. * space. It supports simple types like char and int, but not larger
  418. * data types like structures or arrays.
  419. *
  420. * @ptr must have pointer-to-simple-variable type, and the result of
  421. * dereferencing @ptr must be assignable to @x without a cast.
  422. *
  423. * Caller must check the pointer with access_ok() before calling this
  424. * function.
  425. *
  426. * Returns zero on success, or -EFAULT on error.
  427. * On error, the variable @x is set to zero.
  428. */
  429. #define __get_user_unaligned(x,ptr) \
  430. __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
  431. /*
  432. * Yuck. We need two variants, one for 64bit operation and one
  433. * for 32 bit mode and old iron.
  434. */
  435. #ifdef CONFIG_32BIT
  436. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  437. __get_user_unaligned_asm_ll32(val, ptr)
  438. #endif
  439. #ifdef CONFIG_64BIT
  440. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  441. __get_user_unaligned_asm(val, "uld", ptr)
  442. #endif
  443. extern void __get_user_unaligned_unknown(void);
  444. #define __get_user_unaligned_common(val, size, ptr) \
  445. do { \
  446. switch (size) { \
  447. case 1: __get_user_asm(val, "lb", ptr); break; \
  448. case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
  449. case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
  450. case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
  451. default: __get_user_unaligned_unknown(); break; \
  452. } \
  453. } while (0)
  454. #define __get_user_unaligned_nocheck(x,ptr,size) \
  455. ({ \
  456. int __gu_err; \
  457. \
  458. __get_user_unaligned_common((x), size, ptr); \
  459. __gu_err; \
  460. })
  461. #define __get_user_unaligned_check(x,ptr,size) \
  462. ({ \
  463. int __gu_err = -EFAULT; \
  464. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  465. \
  466. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  467. __get_user_unaligned_common((x), size, __gu_ptr); \
  468. \
  469. __gu_err; \
  470. })
  471. #define __get_user_unaligned_asm(val, insn, addr) \
  472. { \
  473. long __gu_tmp; \
  474. \
  475. __asm__ __volatile__( \
  476. "1: " insn " %1, %3 \n" \
  477. "2: \n" \
  478. " .section .fixup,\"ax\" \n" \
  479. "3: li %0, %4 \n" \
  480. " j 2b \n" \
  481. " .previous \n" \
  482. " .section __ex_table,\"a\" \n" \
  483. " "__UA_ADDR "\t1b, 3b \n" \
  484. " "__UA_ADDR "\t1b + 4, 3b \n" \
  485. " .previous \n" \
  486. : "=r" (__gu_err), "=r" (__gu_tmp) \
  487. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  488. \
  489. (val) = (__typeof__(*(addr))) __gu_tmp; \
  490. }
  491. /*
  492. * Get a long long 64 using 32 bit registers.
  493. */
  494. #define __get_user_unaligned_asm_ll32(val, addr) \
  495. { \
  496. unsigned long long __gu_tmp; \
  497. \
  498. __asm__ __volatile__( \
  499. "1: ulw %1, (%3) \n" \
  500. "2: ulw %D1, 4(%3) \n" \
  501. " move %0, $0 \n" \
  502. "3: .section .fixup,\"ax\" \n" \
  503. "4: li %0, %4 \n" \
  504. " move %1, $0 \n" \
  505. " move %D1, $0 \n" \
  506. " j 3b \n" \
  507. " .previous \n" \
  508. " .section __ex_table,\"a\" \n" \
  509. " " __UA_ADDR " 1b, 4b \n" \
  510. " " __UA_ADDR " 1b + 4, 4b \n" \
  511. " " __UA_ADDR " 2b, 4b \n" \
  512. " " __UA_ADDR " 2b + 4, 4b \n" \
  513. " .previous \n" \
  514. : "=r" (__gu_err), "=&r" (__gu_tmp) \
  515. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  516. (val) = (__typeof__(*(addr))) __gu_tmp; \
  517. }
  518. /*
  519. * Yuck. We need two variants, one for 64bit operation and one
  520. * for 32 bit mode and old iron.
  521. */
  522. #ifdef CONFIG_32BIT
  523. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
  524. #endif
  525. #ifdef CONFIG_64BIT
  526. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
  527. #endif
  528. #define __put_user_unaligned_nocheck(x,ptr,size) \
  529. ({ \
  530. __typeof__(*(ptr)) __pu_val; \
  531. int __pu_err = 0; \
  532. \
  533. __pu_val = (x); \
  534. switch (size) { \
  535. case 1: __put_user_asm("sb", ptr); break; \
  536. case 2: __put_user_unaligned_asm("ush", ptr); break; \
  537. case 4: __put_user_unaligned_asm("usw", ptr); break; \
  538. case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
  539. default: __put_user_unaligned_unknown(); break; \
  540. } \
  541. __pu_err; \
  542. })
  543. #define __put_user_unaligned_check(x,ptr,size) \
  544. ({ \
  545. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  546. __typeof__(*(ptr)) __pu_val = (x); \
  547. int __pu_err = -EFAULT; \
  548. \
  549. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  550. switch (size) { \
  551. case 1: __put_user_asm("sb", __pu_addr); break; \
  552. case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
  553. case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
  554. case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
  555. default: __put_user_unaligned_unknown(); break; \
  556. } \
  557. } \
  558. __pu_err; \
  559. })
  560. #define __put_user_unaligned_asm(insn, ptr) \
  561. { \
  562. __asm__ __volatile__( \
  563. "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
  564. "2: \n" \
  565. " .section .fixup,\"ax\" \n" \
  566. "3: li %0, %4 \n" \
  567. " j 2b \n" \
  568. " .previous \n" \
  569. " .section __ex_table,\"a\" \n" \
  570. " " __UA_ADDR " 1b, 3b \n" \
  571. " .previous \n" \
  572. : "=r" (__pu_err) \
  573. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  574. "i" (-EFAULT)); \
  575. }
  576. #define __put_user_unaligned_asm_ll32(ptr) \
  577. { \
  578. __asm__ __volatile__( \
  579. "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
  580. "2: sw %D2, 4(%3) \n" \
  581. "3: \n" \
  582. " .section .fixup,\"ax\" \n" \
  583. "4: li %0, %4 \n" \
  584. " j 3b \n" \
  585. " .previous \n" \
  586. " .section __ex_table,\"a\" \n" \
  587. " " __UA_ADDR " 1b, 4b \n" \
  588. " " __UA_ADDR " 1b + 4, 4b \n" \
  589. " " __UA_ADDR " 2b, 4b \n" \
  590. " " __UA_ADDR " 2b + 4, 4b \n" \
  591. " .previous" \
  592. : "=r" (__pu_err) \
  593. : "0" (0), "r" (__pu_val), "r" (ptr), \
  594. "i" (-EFAULT)); \
  595. }
  596. extern void __put_user_unaligned_unknown(void);
  597. /*
  598. * We're generating jump to subroutines which will be outside the range of
  599. * jump instructions
  600. */
  601. #ifdef MODULE
  602. #define __MODULE_JAL(destination) \
  603. ".set\tnoat\n\t" \
  604. __UA_LA "\t$1, " #destination "\n\t" \
  605. "jalr\t$1\n\t" \
  606. ".set\tat\n\t"
  607. #else
  608. #define __MODULE_JAL(destination) \
  609. "jal\t" #destination "\n\t"
  610. #endif
  611. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  612. #define DADDI_SCRATCH "$0"
  613. #else
  614. #define DADDI_SCRATCH "$3"
  615. #endif
  616. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  617. #define __invoke_copy_to_user(to, from, n) \
  618. ({ \
  619. register void __user *__cu_to_r __asm__("$4"); \
  620. register const void *__cu_from_r __asm__("$5"); \
  621. register long __cu_len_r __asm__("$6"); \
  622. \
  623. __cu_to_r = (to); \
  624. __cu_from_r = (from); \
  625. __cu_len_r = (n); \
  626. __asm__ __volatile__( \
  627. __MODULE_JAL(__copy_user) \
  628. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  629. : \
  630. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  631. DADDI_SCRATCH, "memory"); \
  632. __cu_len_r; \
  633. })
  634. /*
  635. * __copy_to_user: - Copy a block of data into user space, with less checking.
  636. * @to: Destination address, in user space.
  637. * @from: Source address, in kernel space.
  638. * @n: Number of bytes to copy.
  639. *
  640. * Context: User context only. This function may sleep.
  641. *
  642. * Copy data from kernel space to user space. Caller must check
  643. * the specified block with access_ok() before calling this function.
  644. *
  645. * Returns number of bytes that could not be copied.
  646. * On success, this will be zero.
  647. */
  648. #define __copy_to_user(to, from, n) \
  649. ({ \
  650. void __user *__cu_to; \
  651. const void *__cu_from; \
  652. long __cu_len; \
  653. \
  654. __cu_to = (to); \
  655. __cu_from = (from); \
  656. __cu_len = (n); \
  657. might_fault(); \
  658. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  659. __cu_len; \
  660. })
  661. extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  662. #define __copy_to_user_inatomic(to, from, n) \
  663. ({ \
  664. void __user *__cu_to; \
  665. const void *__cu_from; \
  666. long __cu_len; \
  667. \
  668. __cu_to = (to); \
  669. __cu_from = (from); \
  670. __cu_len = (n); \
  671. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  672. __cu_len; \
  673. })
  674. #define __copy_from_user_inatomic(to, from, n) \
  675. ({ \
  676. void *__cu_to; \
  677. const void __user *__cu_from; \
  678. long __cu_len; \
  679. \
  680. __cu_to = (to); \
  681. __cu_from = (from); \
  682. __cu_len = (n); \
  683. __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
  684. __cu_len); \
  685. __cu_len; \
  686. })
  687. /*
  688. * copy_to_user: - Copy a block of data into user space.
  689. * @to: Destination address, in user space.
  690. * @from: Source address, in kernel space.
  691. * @n: Number of bytes to copy.
  692. *
  693. * Context: User context only. This function may sleep.
  694. *
  695. * Copy data from kernel space to user space.
  696. *
  697. * Returns number of bytes that could not be copied.
  698. * On success, this will be zero.
  699. */
  700. #define copy_to_user(to, from, n) \
  701. ({ \
  702. void __user *__cu_to; \
  703. const void *__cu_from; \
  704. long __cu_len; \
  705. \
  706. __cu_to = (to); \
  707. __cu_from = (from); \
  708. __cu_len = (n); \
  709. if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
  710. might_fault(); \
  711. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
  712. __cu_len); \
  713. } \
  714. __cu_len; \
  715. })
  716. #define __invoke_copy_from_user(to, from, n) \
  717. ({ \
  718. register void *__cu_to_r __asm__("$4"); \
  719. register const void __user *__cu_from_r __asm__("$5"); \
  720. register long __cu_len_r __asm__("$6"); \
  721. \
  722. __cu_to_r = (to); \
  723. __cu_from_r = (from); \
  724. __cu_len_r = (n); \
  725. __asm__ __volatile__( \
  726. ".set\tnoreorder\n\t" \
  727. __MODULE_JAL(__copy_user) \
  728. ".set\tnoat\n\t" \
  729. __UA_ADDU "\t$1, %1, %2\n\t" \
  730. ".set\tat\n\t" \
  731. ".set\treorder" \
  732. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  733. : \
  734. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  735. DADDI_SCRATCH, "memory"); \
  736. __cu_len_r; \
  737. })
  738. #define __invoke_copy_from_user_inatomic(to, from, n) \
  739. ({ \
  740. register void *__cu_to_r __asm__("$4"); \
  741. register const void __user *__cu_from_r __asm__("$5"); \
  742. register long __cu_len_r __asm__("$6"); \
  743. \
  744. __cu_to_r = (to); \
  745. __cu_from_r = (from); \
  746. __cu_len_r = (n); \
  747. __asm__ __volatile__( \
  748. ".set\tnoreorder\n\t" \
  749. __MODULE_JAL(__copy_user_inatomic) \
  750. ".set\tnoat\n\t" \
  751. __UA_ADDU "\t$1, %1, %2\n\t" \
  752. ".set\tat\n\t" \
  753. ".set\treorder" \
  754. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  755. : \
  756. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  757. DADDI_SCRATCH, "memory"); \
  758. __cu_len_r; \
  759. })
  760. /*
  761. * __copy_from_user: - Copy a block of data from user space, with less checking.
  762. * @to: Destination address, in kernel space.
  763. * @from: Source address, in user space.
  764. * @n: Number of bytes to copy.
  765. *
  766. * Context: User context only. This function may sleep.
  767. *
  768. * Copy data from user space to kernel space. Caller must check
  769. * the specified block with access_ok() before calling this function.
  770. *
  771. * Returns number of bytes that could not be copied.
  772. * On success, this will be zero.
  773. *
  774. * If some data could not be copied, this function will pad the copied
  775. * data to the requested size using zero bytes.
  776. */
  777. #define __copy_from_user(to, from, n) \
  778. ({ \
  779. void *__cu_to; \
  780. const void __user *__cu_from; \
  781. long __cu_len; \
  782. \
  783. __cu_to = (to); \
  784. __cu_from = (from); \
  785. __cu_len = (n); \
  786. might_fault(); \
  787. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  788. __cu_len); \
  789. __cu_len; \
  790. })
  791. /*
  792. * copy_from_user: - Copy a block of data from user space.
  793. * @to: Destination address, in kernel space.
  794. * @from: Source address, in user space.
  795. * @n: Number of bytes to copy.
  796. *
  797. * Context: User context only. This function may sleep.
  798. *
  799. * Copy data from user space to kernel space.
  800. *
  801. * Returns number of bytes that could not be copied.
  802. * On success, this will be zero.
  803. *
  804. * If some data could not be copied, this function will pad the copied
  805. * data to the requested size using zero bytes.
  806. */
  807. #define copy_from_user(to, from, n) \
  808. ({ \
  809. void *__cu_to; \
  810. const void __user *__cu_from; \
  811. long __cu_len; \
  812. \
  813. __cu_to = (to); \
  814. __cu_from = (from); \
  815. __cu_len = (n); \
  816. if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
  817. might_fault(); \
  818. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  819. __cu_len); \
  820. } \
  821. __cu_len; \
  822. })
  823. #define __copy_in_user(to, from, n) \
  824. ({ \
  825. void __user *__cu_to; \
  826. const void __user *__cu_from; \
  827. long __cu_len; \
  828. \
  829. __cu_to = (to); \
  830. __cu_from = (from); \
  831. __cu_len = (n); \
  832. might_fault(); \
  833. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  834. __cu_len); \
  835. __cu_len; \
  836. })
  837. #define copy_in_user(to, from, n) \
  838. ({ \
  839. void __user *__cu_to; \
  840. const void __user *__cu_from; \
  841. long __cu_len; \
  842. \
  843. __cu_to = (to); \
  844. __cu_from = (from); \
  845. __cu_len = (n); \
  846. if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
  847. access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
  848. might_fault(); \
  849. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  850. __cu_len); \
  851. } \
  852. __cu_len; \
  853. })
  854. /*
  855. * __clear_user: - Zero a block of memory in user space, with less checking.
  856. * @to: Destination address, in user space.
  857. * @n: Number of bytes to zero.
  858. *
  859. * Zero a block of memory in user space. Caller must check
  860. * the specified block with access_ok() before calling this function.
  861. *
  862. * Returns number of bytes that could not be cleared.
  863. * On success, this will be zero.
  864. */
  865. static inline __kernel_size_t
  866. __clear_user(void __user *addr, __kernel_size_t size)
  867. {
  868. __kernel_size_t res;
  869. might_fault();
  870. __asm__ __volatile__(
  871. "move\t$4, %1\n\t"
  872. "move\t$5, $0\n\t"
  873. "move\t$6, %2\n\t"
  874. __MODULE_JAL(__bzero)
  875. "move\t%0, $6"
  876. : "=r" (res)
  877. : "r" (addr), "r" (size)
  878. : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
  879. return res;
  880. }
  881. #define clear_user(addr,n) \
  882. ({ \
  883. void __user * __cl_addr = (addr); \
  884. unsigned long __cl_size = (n); \
  885. if (__cl_size && access_ok(VERIFY_WRITE, \
  886. __cl_addr, __cl_size)) \
  887. __cl_size = __clear_user(__cl_addr, __cl_size); \
  888. __cl_size; \
  889. })
  890. /*
  891. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  892. * @dst: Destination address, in kernel space. This buffer must be at
  893. * least @count bytes long.
  894. * @src: Source address, in user space.
  895. * @count: Maximum number of bytes to copy, including the trailing NUL.
  896. *
  897. * Copies a NUL-terminated string from userspace to kernel space.
  898. * Caller must check the specified block with access_ok() before calling
  899. * this function.
  900. *
  901. * On success, returns the length of the string (not including the trailing
  902. * NUL).
  903. *
  904. * If access to userspace fails, returns -EFAULT (some data may have been
  905. * copied).
  906. *
  907. * If @count is smaller than the length of the string, copies @count bytes
  908. * and returns @count.
  909. */
  910. static inline long
  911. __strncpy_from_user(char *__to, const char __user *__from, long __len)
  912. {
  913. long res;
  914. might_fault();
  915. __asm__ __volatile__(
  916. "move\t$4, %1\n\t"
  917. "move\t$5, %2\n\t"
  918. "move\t$6, %3\n\t"
  919. __MODULE_JAL(__strncpy_from_user_nocheck_asm)
  920. "move\t%0, $2"
  921. : "=r" (res)
  922. : "r" (__to), "r" (__from), "r" (__len)
  923. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  924. return res;
  925. }
  926. /*
  927. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  928. * @dst: Destination address, in kernel space. This buffer must be at
  929. * least @count bytes long.
  930. * @src: Source address, in user space.
  931. * @count: Maximum number of bytes to copy, including the trailing NUL.
  932. *
  933. * Copies a NUL-terminated string from userspace to kernel space.
  934. *
  935. * On success, returns the length of the string (not including the trailing
  936. * NUL).
  937. *
  938. * If access to userspace fails, returns -EFAULT (some data may have been
  939. * copied).
  940. *
  941. * If @count is smaller than the length of the string, copies @count bytes
  942. * and returns @count.
  943. */
  944. static inline long
  945. strncpy_from_user(char *__to, const char __user *__from, long __len)
  946. {
  947. long res;
  948. might_fault();
  949. __asm__ __volatile__(
  950. "move\t$4, %1\n\t"
  951. "move\t$5, %2\n\t"
  952. "move\t$6, %3\n\t"
  953. __MODULE_JAL(__strncpy_from_user_asm)
  954. "move\t%0, $2"
  955. : "=r" (res)
  956. : "r" (__to), "r" (__from), "r" (__len)
  957. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  958. return res;
  959. }
  960. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  961. static inline long __strlen_user(const char __user *s)
  962. {
  963. long res;
  964. might_fault();
  965. __asm__ __volatile__(
  966. "move\t$4, %1\n\t"
  967. __MODULE_JAL(__strlen_user_nocheck_asm)
  968. "move\t%0, $2"
  969. : "=r" (res)
  970. : "r" (s)
  971. : "$2", "$4", __UA_t0, "$31");
  972. return res;
  973. }
  974. /*
  975. * strlen_user: - Get the size of a string in user space.
  976. * @str: The string to measure.
  977. *
  978. * Context: User context only. This function may sleep.
  979. *
  980. * Get the size of a NUL-terminated string in user space.
  981. *
  982. * Returns the size of the string INCLUDING the terminating NUL.
  983. * On exception, returns 0.
  984. *
  985. * If there is a limit on the length of a valid string, you may wish to
  986. * consider using strnlen_user() instead.
  987. */
  988. static inline long strlen_user(const char __user *s)
  989. {
  990. long res;
  991. might_fault();
  992. __asm__ __volatile__(
  993. "move\t$4, %1\n\t"
  994. __MODULE_JAL(__strlen_user_asm)
  995. "move\t%0, $2"
  996. : "=r" (res)
  997. : "r" (s)
  998. : "$2", "$4", __UA_t0, "$31");
  999. return res;
  1000. }
  1001. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  1002. static inline long __strnlen_user(const char __user *s, long n)
  1003. {
  1004. long res;
  1005. might_fault();
  1006. __asm__ __volatile__(
  1007. "move\t$4, %1\n\t"
  1008. "move\t$5, %2\n\t"
  1009. __MODULE_JAL(__strnlen_user_nocheck_asm)
  1010. "move\t%0, $2"
  1011. : "=r" (res)
  1012. : "r" (s), "r" (n)
  1013. : "$2", "$4", "$5", __UA_t0, "$31");
  1014. return res;
  1015. }
  1016. /*
  1017. * strlen_user: - Get the size of a string in user space.
  1018. * @str: The string to measure.
  1019. *
  1020. * Context: User context only. This function may sleep.
  1021. *
  1022. * Get the size of a NUL-terminated string in user space.
  1023. *
  1024. * Returns the size of the string INCLUDING the terminating NUL.
  1025. * On exception, returns 0.
  1026. *
  1027. * If there is a limit on the length of a valid string, you may wish to
  1028. * consider using strnlen_user() instead.
  1029. */
  1030. static inline long strnlen_user(const char __user *s, long n)
  1031. {
  1032. long res;
  1033. might_fault();
  1034. __asm__ __volatile__(
  1035. "move\t$4, %1\n\t"
  1036. "move\t$5, %2\n\t"
  1037. __MODULE_JAL(__strnlen_user_asm)
  1038. "move\t%0, $2"
  1039. : "=r" (res)
  1040. : "r" (s), "r" (n)
  1041. : "$2", "$4", "$5", __UA_t0, "$31");
  1042. return res;
  1043. }
  1044. struct exception_table_entry
  1045. {
  1046. unsigned long insn;
  1047. unsigned long nextinsn;
  1048. };
  1049. extern int fixup_exception(struct pt_regs *regs);
  1050. #endif /* _ASM_UACCESS_H */