sha1block_amd64p32.s 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // Copyright 2013 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. #include "textflag.h"
  5. // SHA1 block routine. See sha1block.go for Go equivalent.
  6. //
  7. // There are 80 rounds of 4 types:
  8. // - rounds 0-15 are type 1 and load data (ROUND1 macro).
  9. // - rounds 16-19 are type 1 and do not load data (ROUND1x macro).
  10. // - rounds 20-39 are type 2 and do not load data (ROUND2 macro).
  11. // - rounds 40-59 are type 3 and do not load data (ROUND3 macro).
  12. // - rounds 60-79 are type 4 and do not load data (ROUND4 macro).
  13. //
  14. // Each round loads or shuffles the data, then computes a per-round
  15. // function of b, c, d, and then mixes the result into and rotates the
  16. // five registers a, b, c, d, e holding the intermediate results.
  17. //
  18. // The register rotation is implemented by rotating the arguments to
  19. // the round macros instead of by explicit move instructions.
  20. //
  21. // amd64p32 version.
  22. // To ensure safety for Native Client, avoids use of BP and R15
  23. // as well as two-register addressing modes.
  24. #define LOAD(index) \
  25. MOVL (index*4)(SI), R10; \
  26. BSWAPL R10; \
  27. MOVL R10, (index*4)(SP)
  28. #define SHUFFLE(index) \
  29. MOVL (((index)&0xf)*4)(SP), R10; \
  30. XORL (((index-3)&0xf)*4)(SP), R10; \
  31. XORL (((index-8)&0xf)*4)(SP), R10; \
  32. XORL (((index-14)&0xf)*4)(SP), R10; \
  33. ROLL $1, R10; \
  34. MOVL R10, (((index)&0xf)*4)(SP)
  35. #define FUNC1(a, b, c, d, e) \
  36. MOVL d, R9; \
  37. XORL c, R9; \
  38. ANDL b, R9; \
  39. XORL d, R9
  40. #define FUNC2(a, b, c, d, e) \
  41. MOVL b, R9; \
  42. XORL c, R9; \
  43. XORL d, R9
  44. #define FUNC3(a, b, c, d, e) \
  45. MOVL b, R8; \
  46. ORL c, R8; \
  47. ANDL d, R8; \
  48. MOVL b, R9; \
  49. ANDL c, R9; \
  50. ORL R8, R9
  51. #define FUNC4 FUNC2
  52. #define MIX(a, b, c, d, e, const) \
  53. ROLL $30, b; \
  54. ADDL R9, e; \
  55. MOVL a, R8; \
  56. ROLL $5, R8; \
  57. LEAL const(e)(R10*1), e; \
  58. ADDL R8, e
  59. #define ROUND1(a, b, c, d, e, index) \
  60. LOAD(index); \
  61. FUNC1(a, b, c, d, e); \
  62. MIX(a, b, c, d, e, 0x5A827999)
  63. #define ROUND1x(a, b, c, d, e, index) \
  64. SHUFFLE(index); \
  65. FUNC1(a, b, c, d, e); \
  66. MIX(a, b, c, d, e, 0x5A827999)
  67. #define ROUND2(a, b, c, d, e, index) \
  68. SHUFFLE(index); \
  69. FUNC2(a, b, c, d, e); \
  70. MIX(a, b, c, d, e, 0x6ED9EBA1)
  71. #define ROUND3(a, b, c, d, e, index) \
  72. SHUFFLE(index); \
  73. FUNC3(a, b, c, d, e); \
  74. MIX(a, b, c, d, e, 0x8F1BBCDC)
  75. #define ROUND4(a, b, c, d, e, index) \
  76. SHUFFLE(index); \
  77. FUNC4(a, b, c, d, e); \
  78. MIX(a, b, c, d, e, 0xCA62C1D6)
  79. TEXT ·block(SB),NOSPLIT,$64-32
  80. MOVL dig+0(FP), R14
  81. MOVL p_base+4(FP), SI
  82. MOVL p_len+8(FP), DX
  83. SHRQ $6, DX
  84. SHLQ $6, DX
  85. LEAQ (SI)(DX*1), DI
  86. MOVL (0*4)(R14), AX
  87. MOVL (1*4)(R14), BX
  88. MOVL (2*4)(R14), CX
  89. MOVL (3*4)(R14), DX
  90. MOVL (4*4)(R14), R13
  91. CMPQ SI, DI
  92. JEQ end
  93. loop:
  94. #define BP R13 /* keep diff from sha1block_amd64.s small */
  95. ROUND1(AX, BX, CX, DX, BP, 0)
  96. ROUND1(BP, AX, BX, CX, DX, 1)
  97. ROUND1(DX, BP, AX, BX, CX, 2)
  98. ROUND1(CX, DX, BP, AX, BX, 3)
  99. ROUND1(BX, CX, DX, BP, AX, 4)
  100. ROUND1(AX, BX, CX, DX, BP, 5)
  101. ROUND1(BP, AX, BX, CX, DX, 6)
  102. ROUND1(DX, BP, AX, BX, CX, 7)
  103. ROUND1(CX, DX, BP, AX, BX, 8)
  104. ROUND1(BX, CX, DX, BP, AX, 9)
  105. ROUND1(AX, BX, CX, DX, BP, 10)
  106. ROUND1(BP, AX, BX, CX, DX, 11)
  107. ROUND1(DX, BP, AX, BX, CX, 12)
  108. ROUND1(CX, DX, BP, AX, BX, 13)
  109. ROUND1(BX, CX, DX, BP, AX, 14)
  110. ROUND1(AX, BX, CX, DX, BP, 15)
  111. ROUND1x(BP, AX, BX, CX, DX, 16)
  112. ROUND1x(DX, BP, AX, BX, CX, 17)
  113. ROUND1x(CX, DX, BP, AX, BX, 18)
  114. ROUND1x(BX, CX, DX, BP, AX, 19)
  115. ROUND2(AX, BX, CX, DX, BP, 20)
  116. ROUND2(BP, AX, BX, CX, DX, 21)
  117. ROUND2(DX, BP, AX, BX, CX, 22)
  118. ROUND2(CX, DX, BP, AX, BX, 23)
  119. ROUND2(BX, CX, DX, BP, AX, 24)
  120. ROUND2(AX, BX, CX, DX, BP, 25)
  121. ROUND2(BP, AX, BX, CX, DX, 26)
  122. ROUND2(DX, BP, AX, BX, CX, 27)
  123. ROUND2(CX, DX, BP, AX, BX, 28)
  124. ROUND2(BX, CX, DX, BP, AX, 29)
  125. ROUND2(AX, BX, CX, DX, BP, 30)
  126. ROUND2(BP, AX, BX, CX, DX, 31)
  127. ROUND2(DX, BP, AX, BX, CX, 32)
  128. ROUND2(CX, DX, BP, AX, BX, 33)
  129. ROUND2(BX, CX, DX, BP, AX, 34)
  130. ROUND2(AX, BX, CX, DX, BP, 35)
  131. ROUND2(BP, AX, BX, CX, DX, 36)
  132. ROUND2(DX, BP, AX, BX, CX, 37)
  133. ROUND2(CX, DX, BP, AX, BX, 38)
  134. ROUND2(BX, CX, DX, BP, AX, 39)
  135. ROUND3(AX, BX, CX, DX, BP, 40)
  136. ROUND3(BP, AX, BX, CX, DX, 41)
  137. ROUND3(DX, BP, AX, BX, CX, 42)
  138. ROUND3(CX, DX, BP, AX, BX, 43)
  139. ROUND3(BX, CX, DX, BP, AX, 44)
  140. ROUND3(AX, BX, CX, DX, BP, 45)
  141. ROUND3(BP, AX, BX, CX, DX, 46)
  142. ROUND3(DX, BP, AX, BX, CX, 47)
  143. ROUND3(CX, DX, BP, AX, BX, 48)
  144. ROUND3(BX, CX, DX, BP, AX, 49)
  145. ROUND3(AX, BX, CX, DX, BP, 50)
  146. ROUND3(BP, AX, BX, CX, DX, 51)
  147. ROUND3(DX, BP, AX, BX, CX, 52)
  148. ROUND3(CX, DX, BP, AX, BX, 53)
  149. ROUND3(BX, CX, DX, BP, AX, 54)
  150. ROUND3(AX, BX, CX, DX, BP, 55)
  151. ROUND3(BP, AX, BX, CX, DX, 56)
  152. ROUND3(DX, BP, AX, BX, CX, 57)
  153. ROUND3(CX, DX, BP, AX, BX, 58)
  154. ROUND3(BX, CX, DX, BP, AX, 59)
  155. ROUND4(AX, BX, CX, DX, BP, 60)
  156. ROUND4(BP, AX, BX, CX, DX, 61)
  157. ROUND4(DX, BP, AX, BX, CX, 62)
  158. ROUND4(CX, DX, BP, AX, BX, 63)
  159. ROUND4(BX, CX, DX, BP, AX, 64)
  160. ROUND4(AX, BX, CX, DX, BP, 65)
  161. ROUND4(BP, AX, BX, CX, DX, 66)
  162. ROUND4(DX, BP, AX, BX, CX, 67)
  163. ROUND4(CX, DX, BP, AX, BX, 68)
  164. ROUND4(BX, CX, DX, BP, AX, 69)
  165. ROUND4(AX, BX, CX, DX, BP, 70)
  166. ROUND4(BP, AX, BX, CX, DX, 71)
  167. ROUND4(DX, BP, AX, BX, CX, 72)
  168. ROUND4(CX, DX, BP, AX, BX, 73)
  169. ROUND4(BX, CX, DX, BP, AX, 74)
  170. ROUND4(AX, BX, CX, DX, BP, 75)
  171. ROUND4(BP, AX, BX, CX, DX, 76)
  172. ROUND4(DX, BP, AX, BX, CX, 77)
  173. ROUND4(CX, DX, BP, AX, BX, 78)
  174. ROUND4(BX, CX, DX, BP, AX, 79)
  175. #undef BP
  176. ADDL (0*4)(R14), AX
  177. ADDL (1*4)(R14), BX
  178. ADDL (2*4)(R14), CX
  179. ADDL (3*4)(R14), DX
  180. ADDL (4*4)(R14), R13
  181. MOVL AX, (0*4)(R14)
  182. MOVL BX, (1*4)(R14)
  183. MOVL CX, (2*4)(R14)
  184. MOVL DX, (3*4)(R14)
  185. MOVL R13, (4*4)(R14)
  186. ADDQ $64, SI
  187. CMPQ SI, DI
  188. JB loop
  189. end:
  190. RET