copy_template.S 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * Copyright (C) 2013 ARM Ltd.
  3. * Copyright (C) 2013 Linaro.
  4. *
  5. * This code is based on glibc cortex strings work originally authored by Linaro
  6. * and re-licensed under GPLv2 for the Linux kernel. The original code can
  7. * be found @
  8. *
  9. * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
  10. * files/head:/src/aarch64/
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. */
  24. /*
  25. * Copy a buffer from src to dest (alignment handled by the hardware)
  26. *
  27. * Parameters:
  28. * x0 - dest
  29. * x1 - src
  30. * x2 - n
  31. * Returns:
  32. * x0 - dest
  33. */
  34. dstin .req x0
  35. src .req x1
  36. count .req x2
  37. tmp1 .req x3
  38. tmp1w .req w3
  39. tmp2 .req x4
  40. tmp2w .req w4
  41. dst .req x6
  42. A_l .req x7
  43. A_h .req x8
  44. B_l .req x9
  45. B_h .req x10
  46. C_l .req x11
  47. C_h .req x12
  48. D_l .req x13
  49. D_h .req x14
  50. mov dst, dstin
  51. cmp count, #16
  52. /*When memory length is less than 16, the accessed are not aligned.*/
  53. b.lo .Ltiny15
  54. neg tmp2, src
  55. ands tmp2, tmp2, #15/* Bytes to reach alignment. */
  56. b.eq .LSrcAligned
  57. sub count, count, tmp2
  58. /*
  59. * Copy the leading memory data from src to dst in an increasing
  60. * address order.By this way,the risk of overwritting the source
  61. * memory data is eliminated when the distance between src and
  62. * dst is less than 16. The memory accesses here are alignment.
  63. */
  64. tbz tmp2, #0, 1f
  65. ldrb1 tmp1w, src, #1
  66. strb1 tmp1w, dst, #1
  67. 1:
  68. tbz tmp2, #1, 2f
  69. ldrh1 tmp1w, src, #2
  70. strh1 tmp1w, dst, #2
  71. 2:
  72. tbz tmp2, #2, 3f
  73. ldr1 tmp1w, src, #4
  74. str1 tmp1w, dst, #4
  75. 3:
  76. tbz tmp2, #3, .LSrcAligned
  77. ldr1 tmp1, src, #8
  78. str1 tmp1, dst, #8
  79. .LSrcAligned:
  80. cmp count, #64
  81. b.ge .Lcpy_over64
  82. /*
  83. * Deal with small copies quickly by dropping straight into the
  84. * exit block.
  85. */
  86. .Ltail63:
  87. /*
  88. * Copy up to 48 bytes of data. At this point we only need the
  89. * bottom 6 bits of count to be accurate.
  90. */
  91. ands tmp1, count, #0x30
  92. b.eq .Ltiny15
  93. cmp tmp1w, #0x20
  94. b.eq 1f
  95. b.lt 2f
  96. ldp1 A_l, A_h, src, #16
  97. stp1 A_l, A_h, dst, #16
  98. 1:
  99. ldp1 A_l, A_h, src, #16
  100. stp1 A_l, A_h, dst, #16
  101. 2:
  102. ldp1 A_l, A_h, src, #16
  103. stp1 A_l, A_h, dst, #16
  104. .Ltiny15:
  105. /*
  106. * Prefer to break one ldp/stp into several load/store to access
  107. * memory in an increasing address order,rather than to load/store 16
  108. * bytes from (src-16) to (dst-16) and to backward the src to aligned
  109. * address,which way is used in original cortex memcpy. If keeping
  110. * the original memcpy process here, memmove need to satisfy the
  111. * precondition that src address is at least 16 bytes bigger than dst
  112. * address,otherwise some source data will be overwritten when memove
  113. * call memcpy directly. To make memmove simpler and decouple the
  114. * memcpy's dependency on memmove, withdrew the original process.
  115. */
  116. tbz count, #3, 1f
  117. ldr1 tmp1, src, #8
  118. str1 tmp1, dst, #8
  119. 1:
  120. tbz count, #2, 2f
  121. ldr1 tmp1w, src, #4
  122. str1 tmp1w, dst, #4
  123. 2:
  124. tbz count, #1, 3f
  125. ldrh1 tmp1w, src, #2
  126. strh1 tmp1w, dst, #2
  127. 3:
  128. tbz count, #0, .Lexitfunc
  129. ldrb1 tmp1w, src, #1
  130. strb1 tmp1w, dst, #1
  131. b .Lexitfunc
  132. .Lcpy_over64:
  133. subs count, count, #128
  134. b.ge .Lcpy_body_large
  135. /*
  136. * Less than 128 bytes to copy, so handle 64 here and then jump
  137. * to the tail.
  138. */
  139. ldp1 A_l, A_h, src, #16
  140. stp1 A_l, A_h, dst, #16
  141. ldp1 B_l, B_h, src, #16
  142. ldp1 C_l, C_h, src, #16
  143. stp1 B_l, B_h, dst, #16
  144. stp1 C_l, C_h, dst, #16
  145. ldp1 D_l, D_h, src, #16
  146. stp1 D_l, D_h, dst, #16
  147. tst count, #0x3f
  148. b.ne .Ltail63
  149. b .Lexitfunc
  150. /*
  151. * Critical loop. Start at a new cache line boundary. Assuming
  152. * 64 bytes per line this ensures the entire loop is in one line.
  153. */
  154. .p2align L1_CACHE_SHIFT
  155. .Lcpy_body_large:
  156. /* pre-get 64 bytes data. */
  157. ldp1 A_l, A_h, src, #16
  158. ldp1 B_l, B_h, src, #16
  159. ldp1 C_l, C_h, src, #16
  160. ldp1 D_l, D_h, src, #16
  161. 1:
  162. /*
  163. * interlace the load of next 64 bytes data block with store of the last
  164. * loaded 64 bytes data.
  165. */
  166. stp1 A_l, A_h, dst, #16
  167. ldp1 A_l, A_h, src, #16
  168. stp1 B_l, B_h, dst, #16
  169. ldp1 B_l, B_h, src, #16
  170. stp1 C_l, C_h, dst, #16
  171. ldp1 C_l, C_h, src, #16
  172. stp1 D_l, D_h, dst, #16
  173. ldp1 D_l, D_h, src, #16
  174. subs count, count, #64
  175. b.ge 1b
  176. stp1 A_l, A_h, dst, #16
  177. stp1 B_l, B_h, dst, #16
  178. stp1 C_l, C_h, dst, #16
  179. stp1 D_l, D_h, dst, #16
  180. tst count, #0x3f
  181. b.ne .Ltail63
  182. .Lexitfunc: