memmove.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * Copyright (C) 2013 ARM Ltd.
  3. * Copyright (C) 2013 Linaro.
  4. *
  5. * This code is based on glibc cortex strings work originally authored by Linaro
  6. * and re-licensed under GPLv2 for the Linux kernel. The original code can
  7. * be found @
  8. *
  9. * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
  10. * files/head:/src/aarch64/
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. */
  24. #include <linux/linkage.h>
  25. #include <asm/assembler.h>
  26. #include <asm/cache.h>
  27. /*
  28. * Move a buffer from src to test (alignment handled by the hardware).
  29. * If dest <= src, call memcpy, otherwise copy in reverse order.
  30. *
  31. * Parameters:
  32. * x0 - dest
  33. * x1 - src
  34. * x2 - n
  35. * Returns:
  36. * x0 - dest
  37. */
  38. dstin .req x0
  39. src .req x1
  40. count .req x2
  41. tmp1 .req x3
  42. tmp1w .req w3
  43. tmp2 .req x4
  44. tmp2w .req w4
  45. tmp3 .req x5
  46. tmp3w .req w5
  47. dst .req x6
  48. A_l .req x7
  49. A_h .req x8
  50. B_l .req x9
  51. B_h .req x10
  52. C_l .req x11
  53. C_h .req x12
  54. D_l .req x13
  55. D_h .req x14
  56. .weak memmove
  57. ENTRY(__memmove)
  58. ENTRY(memmove)
  59. cmp dstin, src
  60. b.lo __memcpy
  61. add tmp1, src, count
  62. cmp dstin, tmp1
  63. b.hs __memcpy /* No overlap. */
  64. add dst, dstin, count
  65. add src, src, count
  66. cmp count, #16
  67. b.lo .Ltail15 /*probably non-alignment accesses.*/
  68. ands tmp2, src, #15 /* Bytes to reach alignment. */
  69. b.eq .LSrcAligned
  70. sub count, count, tmp2
  71. /*
  72. * process the aligned offset length to make the src aligned firstly.
  73. * those extra instructions' cost is acceptable. It also make the
  74. * coming accesses are based on aligned address.
  75. */
  76. tbz tmp2, #0, 1f
  77. ldrb tmp1w, [src, #-1]!
  78. strb tmp1w, [dst, #-1]!
  79. 1:
  80. tbz tmp2, #1, 2f
  81. ldrh tmp1w, [src, #-2]!
  82. strh tmp1w, [dst, #-2]!
  83. 2:
  84. tbz tmp2, #2, 3f
  85. ldr tmp1w, [src, #-4]!
  86. str tmp1w, [dst, #-4]!
  87. 3:
  88. tbz tmp2, #3, .LSrcAligned
  89. ldr tmp1, [src, #-8]!
  90. str tmp1, [dst, #-8]!
  91. .LSrcAligned:
  92. cmp count, #64
  93. b.ge .Lcpy_over64
  94. /*
  95. * Deal with small copies quickly by dropping straight into the
  96. * exit block.
  97. */
  98. .Ltail63:
  99. /*
  100. * Copy up to 48 bytes of data. At this point we only need the
  101. * bottom 6 bits of count to be accurate.
  102. */
  103. ands tmp1, count, #0x30
  104. b.eq .Ltail15
  105. cmp tmp1w, #0x20
  106. b.eq 1f
  107. b.lt 2f
  108. ldp A_l, A_h, [src, #-16]!
  109. stp A_l, A_h, [dst, #-16]!
  110. 1:
  111. ldp A_l, A_h, [src, #-16]!
  112. stp A_l, A_h, [dst, #-16]!
  113. 2:
  114. ldp A_l, A_h, [src, #-16]!
  115. stp A_l, A_h, [dst, #-16]!
  116. .Ltail15:
  117. tbz count, #3, 1f
  118. ldr tmp1, [src, #-8]!
  119. str tmp1, [dst, #-8]!
  120. 1:
  121. tbz count, #2, 2f
  122. ldr tmp1w, [src, #-4]!
  123. str tmp1w, [dst, #-4]!
  124. 2:
  125. tbz count, #1, 3f
  126. ldrh tmp1w, [src, #-2]!
  127. strh tmp1w, [dst, #-2]!
  128. 3:
  129. tbz count, #0, .Lexitfunc
  130. ldrb tmp1w, [src, #-1]
  131. strb tmp1w, [dst, #-1]
  132. .Lexitfunc:
  133. ret
  134. .Lcpy_over64:
  135. subs count, count, #128
  136. b.ge .Lcpy_body_large
  137. /*
  138. * Less than 128 bytes to copy, so handle 64 bytes here and then jump
  139. * to the tail.
  140. */
  141. ldp A_l, A_h, [src, #-16]
  142. stp A_l, A_h, [dst, #-16]
  143. ldp B_l, B_h, [src, #-32]
  144. ldp C_l, C_h, [src, #-48]
  145. stp B_l, B_h, [dst, #-32]
  146. stp C_l, C_h, [dst, #-48]
  147. ldp D_l, D_h, [src, #-64]!
  148. stp D_l, D_h, [dst, #-64]!
  149. tst count, #0x3f
  150. b.ne .Ltail63
  151. ret
  152. /*
  153. * Critical loop. Start at a new cache line boundary. Assuming
  154. * 64 bytes per line this ensures the entire loop is in one line.
  155. */
  156. .p2align L1_CACHE_SHIFT
  157. .Lcpy_body_large:
  158. /* pre-load 64 bytes data. */
  159. ldp A_l, A_h, [src, #-16]
  160. ldp B_l, B_h, [src, #-32]
  161. ldp C_l, C_h, [src, #-48]
  162. ldp D_l, D_h, [src, #-64]!
  163. 1:
  164. /*
  165. * interlace the load of next 64 bytes data block with store of the last
  166. * loaded 64 bytes data.
  167. */
  168. stp A_l, A_h, [dst, #-16]
  169. ldp A_l, A_h, [src, #-16]
  170. stp B_l, B_h, [dst, #-32]
  171. ldp B_l, B_h, [src, #-32]
  172. stp C_l, C_h, [dst, #-48]
  173. ldp C_l, C_h, [src, #-48]
  174. stp D_l, D_h, [dst, #-64]!
  175. ldp D_l, D_h, [src, #-64]!
  176. subs count, count, #64
  177. b.ge 1b
  178. stp A_l, A_h, [dst, #-16]
  179. stp B_l, B_h, [dst, #-32]
  180. stp C_l, C_h, [dst, #-48]
  181. stp D_l, D_h, [dst, #-64]!
  182. tst count, #0x3f
  183. b.ne .Ltail63
  184. ret
  185. ENDPIPROC(memmove)
  186. ENDPROC(__memmove)