usercopy_64.S 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /*
  2. * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/linkage.h>
  15. #include <asm/errno.h>
  16. #include <asm/cache.h>
  17. #include <arch/chip.h>
  18. /* Access user memory, but use MMU to avoid propagating kernel exceptions. */
  19. .pushsection .fixup,"ax"
  20. get_user_fault:
  21. { movei r1, -EFAULT; move r0, zero }
  22. jrp lr
  23. ENDPROC(get_user_fault)
  24. put_user_fault:
  25. { movei r0, -EFAULT; jrp lr }
  26. ENDPROC(put_user_fault)
  27. .popsection
  28. /*
  29. * __get_user_N functions take a pointer in r0, and return 0 in r1
  30. * on success, with the value in r0; or else -EFAULT in r1.
  31. */
  32. #define __get_user_N(bytes, LOAD) \
  33. STD_ENTRY(__get_user_##bytes); \
  34. 1: { LOAD r0, r0; move r1, zero }; \
  35. jrp lr; \
  36. STD_ENDPROC(__get_user_##bytes); \
  37. .pushsection __ex_table,"a"; \
  38. .quad 1b, get_user_fault; \
  39. .popsection
  40. __get_user_N(1, ld1u)
  41. __get_user_N(2, ld2u)
  42. __get_user_N(4, ld4u)
  43. __get_user_N(8, ld)
  44. /*
  45. * __put_user_N functions take a value in r0 and a pointer in r1,
  46. * and return 0 in r0 on success or -EFAULT on failure.
  47. */
  48. #define __put_user_N(bytes, STORE) \
  49. STD_ENTRY(__put_user_##bytes); \
  50. 1: { STORE r1, r0; move r0, zero }; \
  51. jrp lr; \
  52. STD_ENDPROC(__put_user_##bytes); \
  53. .pushsection __ex_table,"a"; \
  54. .quad 1b, put_user_fault; \
  55. .popsection
  56. __put_user_N(1, st1)
  57. __put_user_N(2, st2)
  58. __put_user_N(4, st4)
  59. __put_user_N(8, st)
  60. /*
  61. * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
  62. * It returns the length, including the terminating NUL, or zero on exception.
  63. * If length is greater than the bound, returns one plus the bound.
  64. */
  65. STD_ENTRY(strnlen_user_asm)
  66. { beqz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */
  67. 1: { ld1u r4, r0; addi r1, r1, -1 }
  68. beqz r4, 2f
  69. { bnezt r1, 1b; addi r0, r0, 1 }
  70. 2: { sub r0, r0, r3; jrp lr }
  71. STD_ENDPROC(strnlen_user_asm)
  72. .pushsection .fixup,"ax"
  73. strnlen_user_fault:
  74. { move r0, zero; jrp lr }
  75. ENDPROC(strnlen_user_fault)
  76. .section __ex_table,"a"
  77. .quad 1b, strnlen_user_fault
  78. .popsection
  79. /*
  80. * strncpy_from_user_asm takes the kernel target pointer in r0,
  81. * the userspace source pointer in r1, and the length bound (including
  82. * the trailing NUL) in r2. On success, it returns the string length
  83. * (not including the trailing NUL), or -EFAULT on failure.
  84. */
  85. STD_ENTRY(strncpy_from_user_asm)
  86. { beqz r2, 2f; move r3, r0 }
  87. 1: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
  88. { st1 r0, r4; addi r0, r0, 1 }
  89. beqz r2, 2f
  90. bnezt r4, 1b
  91. addi r0, r0, -1 /* don't count the trailing NUL */
  92. 2: { sub r0, r0, r3; jrp lr }
  93. STD_ENDPROC(strncpy_from_user_asm)
  94. .pushsection .fixup,"ax"
  95. strncpy_from_user_fault:
  96. { movei r0, -EFAULT; jrp lr }
  97. ENDPROC(strncpy_from_user_fault)
  98. .section __ex_table,"a"
  99. .quad 1b, strncpy_from_user_fault
  100. .popsection
  101. /*
  102. * clear_user_asm takes the user target address in r0 and the
  103. * number of bytes to zero in r1.
  104. * It returns the number of uncopiable bytes (hopefully zero) in r0.
  105. * Note that we don't use a separate .fixup section here since we fall
  106. * through into the "fixup" code as the last straight-line bundle anyway.
  107. */
  108. STD_ENTRY(clear_user_asm)
  109. { beqz r1, 2f; or r2, r0, r1 }
  110. andi r2, r2, 7
  111. beqzt r2, .Lclear_aligned_user_asm
  112. 1: { st1 r0, zero; addi r0, r0, 1; addi r1, r1, -1 }
  113. bnezt r1, 1b
  114. 2: { move r0, r1; jrp lr }
  115. .pushsection __ex_table,"a"
  116. .quad 1b, 2b
  117. .popsection
  118. .Lclear_aligned_user_asm:
  119. 1: { st r0, zero; addi r0, r0, 8; addi r1, r1, -8 }
  120. bnezt r1, 1b
  121. 2: { move r0, r1; jrp lr }
  122. STD_ENDPROC(clear_user_asm)
  123. .pushsection __ex_table,"a"
  124. .quad 1b, 2b
  125. .popsection
  126. /*
  127. * flush_user_asm takes the user target address in r0 and the
  128. * number of bytes to flush in r1.
  129. * It returns the number of unflushable bytes (hopefully zero) in r0.
  130. */
  131. STD_ENTRY(flush_user_asm)
  132. beqz r1, 2f
  133. { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
  134. { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
  135. { and r0, r0, r2; and r1, r1, r2 }
  136. { sub r1, r1, r0 }
  137. 1: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() }
  138. { addi r0, r0, CHIP_FLUSH_STRIDE(); bnezt r1, 1b }
  139. 2: { move r0, r1; jrp lr }
  140. STD_ENDPROC(flush_user_asm)
  141. .pushsection __ex_table,"a"
  142. .quad 1b, 2b
  143. .popsection
  144. /*
  145. * inv_user_asm takes the user target address in r0 and the
  146. * number of bytes to invalidate in r1.
  147. * It returns the number of not inv'able bytes (hopefully zero) in r0.
  148. */
  149. STD_ENTRY(inv_user_asm)
  150. beqz r1, 2f
  151. { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
  152. { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
  153. { and r0, r0, r2; and r1, r1, r2 }
  154. { sub r1, r1, r0 }
  155. 1: { inv r0; addi r1, r1, -CHIP_INV_STRIDE() }
  156. { addi r0, r0, CHIP_INV_STRIDE(); bnezt r1, 1b }
  157. 2: { move r0, r1; jrp lr }
  158. STD_ENDPROC(inv_user_asm)
  159. .pushsection __ex_table,"a"
  160. .quad 1b, 2b
  161. .popsection
  162. /*
  163. * finv_user_asm takes the user target address in r0 and the
  164. * number of bytes to flush-invalidate in r1.
  165. * It returns the number of not finv'able bytes (hopefully zero) in r0.
  166. */
  167. STD_ENTRY(finv_user_asm)
  168. beqz r1, 2f
  169. { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
  170. { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
  171. { and r0, r0, r2; and r1, r1, r2 }
  172. { sub r1, r1, r0 }
  173. 1: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() }
  174. { addi r0, r0, CHIP_FINV_STRIDE(); bnezt r1, 1b }
  175. 2: { move r0, r1; jrp lr }
  176. STD_ENDPROC(finv_user_asm)
  177. .pushsection __ex_table,"a"
  178. .quad 1b, 2b
  179. .popsection