md5block_amd64p32.s 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. // Original source:
  2. // http://www.zorinaq.com/papers/md5-amd64.html
  3. // http://www.zorinaq.com/papers/md5-amd64.tar.bz2
  4. //
  5. // Translated from Perl generating GNU assembly into
  6. // #defines generating 6a assembly by the Go Authors.
  7. //
  8. // Restrictions to make code safe for Native Client:
  9. // replace BP with R11, reloaded before use at return.
  10. // replace R15 with R11.
  11. #include "textflag.h"
  12. // MD5 optimized for AMD64.
  13. //
  14. // Author: Marc Bevand <bevand_m (at) epita.fr>
  15. // Licence: I hereby disclaim the copyright on this code and place it
  16. // in the public domain.
  17. TEXT ·block(SB),NOSPLIT,$0-32
  18. MOVL dig+0(FP), R11
  19. MOVL p+4(FP), SI
  20. MOVL p_len+8(FP), DX
  21. SHRQ $6, DX
  22. SHLQ $6, DX
  23. LEAQ (SI)(DX*1), DI
  24. MOVL (0*4)(R11), AX
  25. MOVL (1*4)(R11), BX
  26. MOVL (2*4)(R11), CX
  27. MOVL (3*4)(R11), DX
  28. CMPQ SI, DI
  29. JEQ end
  30. loop:
  31. MOVL AX, R12
  32. MOVL BX, R13
  33. MOVL CX, R14
  34. MOVL DX, R11
  35. MOVL (0*4)(SI), R8
  36. MOVL DX, R9
  37. #define ROUND1(a, b, c, d, index, const, shift) \
  38. XORL c, R9; \
  39. LEAL const(a)(R8*1), a; \
  40. ANDL b, R9; \
  41. XORL d, R9; \
  42. MOVL (index*4)(SI), R8; \
  43. ADDL R9, a; \
  44. ROLL $shift, a; \
  45. MOVL c, R9; \
  46. ADDL b, a
  47. ROUND1(AX,BX,CX,DX, 1,0xd76aa478, 7);
  48. ROUND1(DX,AX,BX,CX, 2,0xe8c7b756,12);
  49. ROUND1(CX,DX,AX,BX, 3,0x242070db,17);
  50. ROUND1(BX,CX,DX,AX, 4,0xc1bdceee,22);
  51. ROUND1(AX,BX,CX,DX, 5,0xf57c0faf, 7);
  52. ROUND1(DX,AX,BX,CX, 6,0x4787c62a,12);
  53. ROUND1(CX,DX,AX,BX, 7,0xa8304613,17);
  54. ROUND1(BX,CX,DX,AX, 8,0xfd469501,22);
  55. ROUND1(AX,BX,CX,DX, 9,0x698098d8, 7);
  56. ROUND1(DX,AX,BX,CX,10,0x8b44f7af,12);
  57. ROUND1(CX,DX,AX,BX,11,0xffff5bb1,17);
  58. ROUND1(BX,CX,DX,AX,12,0x895cd7be,22);
  59. ROUND1(AX,BX,CX,DX,13,0x6b901122, 7);
  60. ROUND1(DX,AX,BX,CX,14,0xfd987193,12);
  61. ROUND1(CX,DX,AX,BX,15,0xa679438e,17);
  62. ROUND1(BX,CX,DX,AX, 0,0x49b40821,22);
  63. MOVL (1*4)(SI), R8
  64. MOVL DX, R9
  65. MOVL DX, R10
  66. #define ROUND2(a, b, c, d, index, const, shift) \
  67. NOTL R9; \
  68. LEAL const(a)(R8*1),a; \
  69. ANDL b, R10; \
  70. ANDL c, R9; \
  71. MOVL (index*4)(SI),R8; \
  72. ORL R9, R10; \
  73. MOVL c, R9; \
  74. ADDL R10, a; \
  75. MOVL c, R10; \
  76. ROLL $shift, a; \
  77. ADDL b, a
  78. ROUND2(AX,BX,CX,DX, 6,0xf61e2562, 5);
  79. ROUND2(DX,AX,BX,CX,11,0xc040b340, 9);
  80. ROUND2(CX,DX,AX,BX, 0,0x265e5a51,14);
  81. ROUND2(BX,CX,DX,AX, 5,0xe9b6c7aa,20);
  82. ROUND2(AX,BX,CX,DX,10,0xd62f105d, 5);
  83. ROUND2(DX,AX,BX,CX,15, 0x2441453, 9);
  84. ROUND2(CX,DX,AX,BX, 4,0xd8a1e681,14);
  85. ROUND2(BX,CX,DX,AX, 9,0xe7d3fbc8,20);
  86. ROUND2(AX,BX,CX,DX,14,0x21e1cde6, 5);
  87. ROUND2(DX,AX,BX,CX, 3,0xc33707d6, 9);
  88. ROUND2(CX,DX,AX,BX, 8,0xf4d50d87,14);
  89. ROUND2(BX,CX,DX,AX,13,0x455a14ed,20);
  90. ROUND2(AX,BX,CX,DX, 2,0xa9e3e905, 5);
  91. ROUND2(DX,AX,BX,CX, 7,0xfcefa3f8, 9);
  92. ROUND2(CX,DX,AX,BX,12,0x676f02d9,14);
  93. ROUND2(BX,CX,DX,AX, 0,0x8d2a4c8a,20);
  94. MOVL (5*4)(SI), R8
  95. MOVL CX, R9
  96. #define ROUND3(a, b, c, d, index, const, shift) \
  97. LEAL const(a)(R8*1),a; \
  98. MOVL (index*4)(SI),R8; \
  99. XORL d, R9; \
  100. XORL b, R9; \
  101. ADDL R9, a; \
  102. ROLL $shift, a; \
  103. MOVL b, R9; \
  104. ADDL b, a
  105. ROUND3(AX,BX,CX,DX, 8,0xfffa3942, 4);
  106. ROUND3(DX,AX,BX,CX,11,0x8771f681,11);
  107. ROUND3(CX,DX,AX,BX,14,0x6d9d6122,16);
  108. ROUND3(BX,CX,DX,AX, 1,0xfde5380c,23);
  109. ROUND3(AX,BX,CX,DX, 4,0xa4beea44, 4);
  110. ROUND3(DX,AX,BX,CX, 7,0x4bdecfa9,11);
  111. ROUND3(CX,DX,AX,BX,10,0xf6bb4b60,16);
  112. ROUND3(BX,CX,DX,AX,13,0xbebfbc70,23);
  113. ROUND3(AX,BX,CX,DX, 0,0x289b7ec6, 4);
  114. ROUND3(DX,AX,BX,CX, 3,0xeaa127fa,11);
  115. ROUND3(CX,DX,AX,BX, 6,0xd4ef3085,16);
  116. ROUND3(BX,CX,DX,AX, 9, 0x4881d05,23);
  117. ROUND3(AX,BX,CX,DX,12,0xd9d4d039, 4);
  118. ROUND3(DX,AX,BX,CX,15,0xe6db99e5,11);
  119. ROUND3(CX,DX,AX,BX, 2,0x1fa27cf8,16);
  120. ROUND3(BX,CX,DX,AX, 0,0xc4ac5665,23);
  121. MOVL (0*4)(SI), R8
  122. MOVL $0xffffffff, R9
  123. XORL DX, R9
  124. #define ROUND4(a, b, c, d, index, const, shift) \
  125. LEAL const(a)(R8*1),a; \
  126. ORL b, R9; \
  127. XORL c, R9; \
  128. ADDL R9, a; \
  129. MOVL (index*4)(SI),R8; \
  130. MOVL $0xffffffff, R9; \
  131. ROLL $shift, a; \
  132. XORL c, R9; \
  133. ADDL b, a
  134. ROUND4(AX,BX,CX,DX, 7,0xf4292244, 6);
  135. ROUND4(DX,AX,BX,CX,14,0x432aff97,10);
  136. ROUND4(CX,DX,AX,BX, 5,0xab9423a7,15);
  137. ROUND4(BX,CX,DX,AX,12,0xfc93a039,21);
  138. ROUND4(AX,BX,CX,DX, 3,0x655b59c3, 6);
  139. ROUND4(DX,AX,BX,CX,10,0x8f0ccc92,10);
  140. ROUND4(CX,DX,AX,BX, 1,0xffeff47d,15);
  141. ROUND4(BX,CX,DX,AX, 8,0x85845dd1,21);
  142. ROUND4(AX,BX,CX,DX,15,0x6fa87e4f, 6);
  143. ROUND4(DX,AX,BX,CX, 6,0xfe2ce6e0,10);
  144. ROUND4(CX,DX,AX,BX,13,0xa3014314,15);
  145. ROUND4(BX,CX,DX,AX, 4,0x4e0811a1,21);
  146. ROUND4(AX,BX,CX,DX,11,0xf7537e82, 6);
  147. ROUND4(DX,AX,BX,CX, 2,0xbd3af235,10);
  148. ROUND4(CX,DX,AX,BX, 9,0x2ad7d2bb,15);
  149. ROUND4(BX,CX,DX,AX, 0,0xeb86d391,21);
  150. ADDL R12, AX
  151. ADDL R13, BX
  152. ADDL R14, CX
  153. ADDL R11, DX
  154. ADDQ $64, SI
  155. CMPQ SI, DI
  156. JB loop
  157. end:
  158. MOVL dig+0(FP), R11
  159. MOVL AX, (0*4)(R11)
  160. MOVL BX, (1*4)(R11)
  161. MOVL CX, (2*4)(R11)
  162. MOVL DX, (3*4)(R11)
  163. RET