memcpy_64.S 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /* Copyright 2002 Andi Kleen */
  2. #include <linux/linkage.h>
  3. #include <asm/cpufeature.h>
  4. #include <asm/dwarf2.h>
  5. #include <asm/alternative-asm.h>
  6. /*
  7. * memcpy - Copy a memory block.
  8. *
  9. * Input:
  10. * rdi destination
  11. * rsi source
  12. * rdx count
  13. *
  14. * Output:
  15. * rax original destination
  16. */
  17. /*
  18. * memcpy_c() - fast string ops (REP MOVSQ) based variant.
  19. *
  20. * This gets patched over the unrolled variant (below) via the
  21. * alternative instructions framework:
  22. */
  23. .section .altinstr_replacement, "ax", @progbits
  24. .Lmemcpy_c:
  25. movq %rdi, %rax
  26. movq %rdx, %rcx
  27. shrq $3, %rcx
  28. andl $7, %edx
  29. rep movsq
  30. movl %edx, %ecx
  31. rep movsb
  32. ret
  33. .Lmemcpy_e:
  34. .previous
  35. /*
  36. * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than
  37. * memcpy_c. Use memcpy_c_e when possible.
  38. *
  39. * This gets patched over the unrolled variant (below) via the
  40. * alternative instructions framework:
  41. */
  42. .section .altinstr_replacement, "ax", @progbits
  43. .Lmemcpy_c_e:
  44. movq %rdi, %rax
  45. movq %rdx, %rcx
  46. rep movsb
  47. ret
  48. .Lmemcpy_e_e:
  49. .previous
  50. ENTRY(__memcpy)
  51. ENTRY(memcpy)
  52. CFI_STARTPROC
  53. movq %rdi, %rax
  54. cmpq $0x20, %rdx
  55. jb .Lhandle_tail
  56. /*
  57. * We check whether memory false dependence could occur,
  58. * then jump to corresponding copy mode.
  59. */
  60. cmp %dil, %sil
  61. jl .Lcopy_backward
  62. subq $0x20, %rdx
  63. .Lcopy_forward_loop:
  64. subq $0x20, %rdx
  65. /*
  66. * Move in blocks of 4x8 bytes:
  67. */
  68. movq 0*8(%rsi), %r8
  69. movq 1*8(%rsi), %r9
  70. movq 2*8(%rsi), %r10
  71. movq 3*8(%rsi), %r11
  72. leaq 4*8(%rsi), %rsi
  73. movq %r8, 0*8(%rdi)
  74. movq %r9, 1*8(%rdi)
  75. movq %r10, 2*8(%rdi)
  76. movq %r11, 3*8(%rdi)
  77. leaq 4*8(%rdi), %rdi
  78. jae .Lcopy_forward_loop
  79. addl $0x20, %edx
  80. jmp .Lhandle_tail
  81. .Lcopy_backward:
  82. /*
  83. * Calculate copy position to tail.
  84. */
  85. addq %rdx, %rsi
  86. addq %rdx, %rdi
  87. subq $0x20, %rdx
  88. /*
  89. * At most 3 ALU operations in one cycle,
  90. * so append NOPS in the same 16bytes trunk.
  91. */
  92. .p2align 4
  93. .Lcopy_backward_loop:
  94. subq $0x20, %rdx
  95. movq -1*8(%rsi), %r8
  96. movq -2*8(%rsi), %r9
  97. movq -3*8(%rsi), %r10
  98. movq -4*8(%rsi), %r11
  99. leaq -4*8(%rsi), %rsi
  100. movq %r8, -1*8(%rdi)
  101. movq %r9, -2*8(%rdi)
  102. movq %r10, -3*8(%rdi)
  103. movq %r11, -4*8(%rdi)
  104. leaq -4*8(%rdi), %rdi
  105. jae .Lcopy_backward_loop
  106. /*
  107. * Calculate copy position to head.
  108. */
  109. addl $0x20, %edx
  110. subq %rdx, %rsi
  111. subq %rdx, %rdi
  112. .Lhandle_tail:
  113. cmpl $16, %edx
  114. jb .Lless_16bytes
  115. /*
  116. * Move data from 16 bytes to 31 bytes.
  117. */
  118. movq 0*8(%rsi), %r8
  119. movq 1*8(%rsi), %r9
  120. movq -2*8(%rsi, %rdx), %r10
  121. movq -1*8(%rsi, %rdx), %r11
  122. movq %r8, 0*8(%rdi)
  123. movq %r9, 1*8(%rdi)
  124. movq %r10, -2*8(%rdi, %rdx)
  125. movq %r11, -1*8(%rdi, %rdx)
  126. retq
  127. .p2align 4
  128. .Lless_16bytes:
  129. cmpl $8, %edx
  130. jb .Lless_8bytes
  131. /*
  132. * Move data from 8 bytes to 15 bytes.
  133. */
  134. movq 0*8(%rsi), %r8
  135. movq -1*8(%rsi, %rdx), %r9
  136. movq %r8, 0*8(%rdi)
  137. movq %r9, -1*8(%rdi, %rdx)
  138. retq
  139. .p2align 4
  140. .Lless_8bytes:
  141. cmpl $4, %edx
  142. jb .Lless_3bytes
  143. /*
  144. * Move data from 4 bytes to 7 bytes.
  145. */
  146. movl (%rsi), %ecx
  147. movl -4(%rsi, %rdx), %r8d
  148. movl %ecx, (%rdi)
  149. movl %r8d, -4(%rdi, %rdx)
  150. retq
  151. .p2align 4
  152. .Lless_3bytes:
  153. subl $1, %edx
  154. jb .Lend
  155. /*
  156. * Move data from 1 bytes to 3 bytes.
  157. */
  158. movzbl (%rsi), %ecx
  159. jz .Lstore_1byte
  160. movzbq 1(%rsi), %r8
  161. movzbq (%rsi, %rdx), %r9
  162. movb %r8b, 1(%rdi)
  163. movb %r9b, (%rdi, %rdx)
  164. .Lstore_1byte:
  165. movb %cl, (%rdi)
  166. .Lend:
  167. retq
  168. CFI_ENDPROC
  169. ENDPROC(memcpy)
  170. ENDPROC(__memcpy)
  171. /*
  172. * Some CPUs are adding enhanced REP MOVSB/STOSB feature
  173. * If the feature is supported, memcpy_c_e() is the first choice.
  174. * If enhanced rep movsb copy is not available, use fast string copy
  175. * memcpy_c() when possible. This is faster and code is simpler than
  176. * original memcpy().
  177. * Otherwise, original memcpy() is used.
  178. * In .altinstructions section, ERMS feature is placed after REG_GOOD
  179. * feature to implement the right patch order.
  180. *
  181. * Replace only beginning, memcpy is used to apply alternatives,
  182. * so it is silly to overwrite itself with nops - reboot is the
  183. * only outcome...
  184. */
  185. .section .altinstructions, "a"
  186. altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
  187. .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c
  188. altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
  189. .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e
  190. .previous