twofish-i586-asm_32.S 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /***************************************************************************
  2. * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
  3. * *
  4. * This program is free software; you can redistribute it and/or modify *
  5. * it under the terms of the GNU General Public License as published by *
  6. * the Free Software Foundation; either version 2 of the License, or *
  7. * (at your option) any later version. *
  8. * *
  9. * This program is distributed in the hope that it will be useful, *
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  12. * GNU General Public License for more details. *
  13. * *
  14. * You should have received a copy of the GNU General Public License *
  15. * along with this program; if not, write to the *
  16. * Free Software Foundation, Inc., *
  17. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  18. ***************************************************************************/
  19. .file "twofish-i586-asm.S"
  20. .text
  21. #include <asm/asm-offsets.h>
  22. /* return address at 0 */
  23. #define in_blk 12 /* input byte array address parameter*/
  24. #define out_blk 8 /* output byte array address parameter*/
  25. #define tfm 4 /* Twofish context structure */
  26. #define a_offset 0
  27. #define b_offset 4
  28. #define c_offset 8
  29. #define d_offset 12
  30. /* Structure of the crypto context struct*/
  31. #define s0 0 /* S0 Array 256 Words each */
  32. #define s1 1024 /* S1 Array */
  33. #define s2 2048 /* S2 Array */
  34. #define s3 3072 /* S3 Array */
  35. #define w 4096 /* 8 whitening keys (word) */
  36. #define k 4128 /* key 1-32 ( word ) */
  37. /* define a few register aliases to allow macro substitution */
  38. #define R0D %eax
  39. #define R0B %al
  40. #define R0H %ah
  41. #define R1D %ebx
  42. #define R1B %bl
  43. #define R1H %bh
  44. #define R2D %ecx
  45. #define R2B %cl
  46. #define R2H %ch
  47. #define R3D %edx
  48. #define R3B %dl
  49. #define R3H %dh
  50. /* performs input whitening */
  51. #define input_whitening(src,context,offset)\
  52. xor w+offset(context), src;
  53. /* performs input whitening */
  54. #define output_whitening(src,context,offset)\
  55. xor w+16+offset(context), src;
  56. /*
  57. * a input register containing a (rotated 16)
  58. * b input register containing b
  59. * c input register containing c
  60. * d input register containing d (already rol $1)
  61. * operations on a and b are interleaved to increase performance
  62. */
  63. #define encrypt_round(a,b,c,d,round)\
  64. push d ## D;\
  65. movzx b ## B, %edi;\
  66. mov s1(%ebp,%edi,4),d ## D;\
  67. movzx a ## B, %edi;\
  68. mov s2(%ebp,%edi,4),%esi;\
  69. movzx b ## H, %edi;\
  70. ror $16, b ## D;\
  71. xor s2(%ebp,%edi,4),d ## D;\
  72. movzx a ## H, %edi;\
  73. ror $16, a ## D;\
  74. xor s3(%ebp,%edi,4),%esi;\
  75. movzx b ## B, %edi;\
  76. xor s3(%ebp,%edi,4),d ## D;\
  77. movzx a ## B, %edi;\
  78. xor (%ebp,%edi,4), %esi;\
  79. movzx b ## H, %edi;\
  80. ror $15, b ## D;\
  81. xor (%ebp,%edi,4), d ## D;\
  82. movzx a ## H, %edi;\
  83. xor s1(%ebp,%edi,4),%esi;\
  84. pop %edi;\
  85. add d ## D, %esi;\
  86. add %esi, d ## D;\
  87. add k+round(%ebp), %esi;\
  88. xor %esi, c ## D;\
  89. rol $15, c ## D;\
  90. add k+4+round(%ebp),d ## D;\
  91. xor %edi, d ## D;
  92. /*
  93. * a input register containing a (rotated 16)
  94. * b input register containing b
  95. * c input register containing c
  96. * d input register containing d (already rol $1)
  97. * operations on a and b are interleaved to increase performance
  98. * last round has different rotations for the output preparation
  99. */
  100. #define encrypt_last_round(a,b,c,d,round)\
  101. push d ## D;\
  102. movzx b ## B, %edi;\
  103. mov s1(%ebp,%edi,4),d ## D;\
  104. movzx a ## B, %edi;\
  105. mov s2(%ebp,%edi,4),%esi;\
  106. movzx b ## H, %edi;\
  107. ror $16, b ## D;\
  108. xor s2(%ebp,%edi,4),d ## D;\
  109. movzx a ## H, %edi;\
  110. ror $16, a ## D;\
  111. xor s3(%ebp,%edi,4),%esi;\
  112. movzx b ## B, %edi;\
  113. xor s3(%ebp,%edi,4),d ## D;\
  114. movzx a ## B, %edi;\
  115. xor (%ebp,%edi,4), %esi;\
  116. movzx b ## H, %edi;\
  117. ror $16, b ## D;\
  118. xor (%ebp,%edi,4), d ## D;\
  119. movzx a ## H, %edi;\
  120. xor s1(%ebp,%edi,4),%esi;\
  121. pop %edi;\
  122. add d ## D, %esi;\
  123. add %esi, d ## D;\
  124. add k+round(%ebp), %esi;\
  125. xor %esi, c ## D;\
  126. ror $1, c ## D;\
  127. add k+4+round(%ebp),d ## D;\
  128. xor %edi, d ## D;
  129. /*
  130. * a input register containing a
  131. * b input register containing b (rotated 16)
  132. * c input register containing c
  133. * d input register containing d (already rol $1)
  134. * operations on a and b are interleaved to increase performance
  135. */
  136. #define decrypt_round(a,b,c,d,round)\
  137. push c ## D;\
  138. movzx a ## B, %edi;\
  139. mov (%ebp,%edi,4), c ## D;\
  140. movzx b ## B, %edi;\
  141. mov s3(%ebp,%edi,4),%esi;\
  142. movzx a ## H, %edi;\
  143. ror $16, a ## D;\
  144. xor s1(%ebp,%edi,4),c ## D;\
  145. movzx b ## H, %edi;\
  146. ror $16, b ## D;\
  147. xor (%ebp,%edi,4), %esi;\
  148. movzx a ## B, %edi;\
  149. xor s2(%ebp,%edi,4),c ## D;\
  150. movzx b ## B, %edi;\
  151. xor s1(%ebp,%edi,4),%esi;\
  152. movzx a ## H, %edi;\
  153. ror $15, a ## D;\
  154. xor s3(%ebp,%edi,4),c ## D;\
  155. movzx b ## H, %edi;\
  156. xor s2(%ebp,%edi,4),%esi;\
  157. pop %edi;\
  158. add %esi, c ## D;\
  159. add c ## D, %esi;\
  160. add k+round(%ebp), c ## D;\
  161. xor %edi, c ## D;\
  162. add k+4+round(%ebp),%esi;\
  163. xor %esi, d ## D;\
  164. rol $15, d ## D;
  165. /*
  166. * a input register containing a
  167. * b input register containing b (rotated 16)
  168. * c input register containing c
  169. * d input register containing d (already rol $1)
  170. * operations on a and b are interleaved to increase performance
  171. * last round has different rotations for the output preparation
  172. */
  173. #define decrypt_last_round(a,b,c,d,round)\
  174. push c ## D;\
  175. movzx a ## B, %edi;\
  176. mov (%ebp,%edi,4), c ## D;\
  177. movzx b ## B, %edi;\
  178. mov s3(%ebp,%edi,4),%esi;\
  179. movzx a ## H, %edi;\
  180. ror $16, a ## D;\
  181. xor s1(%ebp,%edi,4),c ## D;\
  182. movzx b ## H, %edi;\
  183. ror $16, b ## D;\
  184. xor (%ebp,%edi,4), %esi;\
  185. movzx a ## B, %edi;\
  186. xor s2(%ebp,%edi,4),c ## D;\
  187. movzx b ## B, %edi;\
  188. xor s1(%ebp,%edi,4),%esi;\
  189. movzx a ## H, %edi;\
  190. ror $16, a ## D;\
  191. xor s3(%ebp,%edi,4),c ## D;\
  192. movzx b ## H, %edi;\
  193. xor s2(%ebp,%edi,4),%esi;\
  194. pop %edi;\
  195. add %esi, c ## D;\
  196. add c ## D, %esi;\
  197. add k+round(%ebp), c ## D;\
  198. xor %edi, c ## D;\
  199. add k+4+round(%ebp),%esi;\
  200. xor %esi, d ## D;\
  201. ror $1, d ## D;
  202. .align 4
  203. .global twofish_enc_blk
  204. .global twofish_dec_blk
  205. twofish_enc_blk:
  206. push %ebp /* save registers according to calling convention*/
  207. push %ebx
  208. push %esi
  209. push %edi
  210. mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
  211. add $crypto_tfm_ctx_offset, %ebp /* ctx address */
  212. mov in_blk+16(%esp),%edi /* input address in edi */
  213. mov (%edi), %eax
  214. mov b_offset(%edi), %ebx
  215. mov c_offset(%edi), %ecx
  216. mov d_offset(%edi), %edx
  217. input_whitening(%eax,%ebp,a_offset)
  218. ror $16, %eax
  219. input_whitening(%ebx,%ebp,b_offset)
  220. input_whitening(%ecx,%ebp,c_offset)
  221. input_whitening(%edx,%ebp,d_offset)
  222. rol $1, %edx
  223. encrypt_round(R0,R1,R2,R3,0);
  224. encrypt_round(R2,R3,R0,R1,8);
  225. encrypt_round(R0,R1,R2,R3,2*8);
  226. encrypt_round(R2,R3,R0,R1,3*8);
  227. encrypt_round(R0,R1,R2,R3,4*8);
  228. encrypt_round(R2,R3,R0,R1,5*8);
  229. encrypt_round(R0,R1,R2,R3,6*8);
  230. encrypt_round(R2,R3,R0,R1,7*8);
  231. encrypt_round(R0,R1,R2,R3,8*8);
  232. encrypt_round(R2,R3,R0,R1,9*8);
  233. encrypt_round(R0,R1,R2,R3,10*8);
  234. encrypt_round(R2,R3,R0,R1,11*8);
  235. encrypt_round(R0,R1,R2,R3,12*8);
  236. encrypt_round(R2,R3,R0,R1,13*8);
  237. encrypt_round(R0,R1,R2,R3,14*8);
  238. encrypt_last_round(R2,R3,R0,R1,15*8);
  239. output_whitening(%eax,%ebp,c_offset)
  240. output_whitening(%ebx,%ebp,d_offset)
  241. output_whitening(%ecx,%ebp,a_offset)
  242. output_whitening(%edx,%ebp,b_offset)
  243. mov out_blk+16(%esp),%edi;
  244. mov %eax, c_offset(%edi)
  245. mov %ebx, d_offset(%edi)
  246. mov %ecx, (%edi)
  247. mov %edx, b_offset(%edi)
  248. pop %edi
  249. pop %esi
  250. pop %ebx
  251. pop %ebp
  252. mov $1, %eax
  253. ret
  254. twofish_dec_blk:
  255. push %ebp /* save registers according to calling convention*/
  256. push %ebx
  257. push %esi
  258. push %edi
  259. mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
  260. add $crypto_tfm_ctx_offset, %ebp /* ctx address */
  261. mov in_blk+16(%esp),%edi /* input address in edi */
  262. mov (%edi), %eax
  263. mov b_offset(%edi), %ebx
  264. mov c_offset(%edi), %ecx
  265. mov d_offset(%edi), %edx
  266. output_whitening(%eax,%ebp,a_offset)
  267. output_whitening(%ebx,%ebp,b_offset)
  268. ror $16, %ebx
  269. output_whitening(%ecx,%ebp,c_offset)
  270. output_whitening(%edx,%ebp,d_offset)
  271. rol $1, %ecx
  272. decrypt_round(R0,R1,R2,R3,15*8);
  273. decrypt_round(R2,R3,R0,R1,14*8);
  274. decrypt_round(R0,R1,R2,R3,13*8);
  275. decrypt_round(R2,R3,R0,R1,12*8);
  276. decrypt_round(R0,R1,R2,R3,11*8);
  277. decrypt_round(R2,R3,R0,R1,10*8);
  278. decrypt_round(R0,R1,R2,R3,9*8);
  279. decrypt_round(R2,R3,R0,R1,8*8);
  280. decrypt_round(R0,R1,R2,R3,7*8);
  281. decrypt_round(R2,R3,R0,R1,6*8);
  282. decrypt_round(R0,R1,R2,R3,5*8);
  283. decrypt_round(R2,R3,R0,R1,4*8);
  284. decrypt_round(R0,R1,R2,R3,3*8);
  285. decrypt_round(R2,R3,R0,R1,2*8);
  286. decrypt_round(R0,R1,R2,R3,1*8);
  287. decrypt_last_round(R2,R3,R0,R1,0);
  288. input_whitening(%eax,%ebp,c_offset)
  289. input_whitening(%ebx,%ebp,d_offset)
  290. input_whitening(%ecx,%ebp,a_offset)
  291. input_whitening(%edx,%ebp,b_offset)
  292. mov out_blk+16(%esp),%edi;
  293. mov %eax, c_offset(%edi)
  294. mov %ebx, d_offset(%edi)
  295. mov %ecx, (%edi)
  296. mov %edx, b_offset(%edi)
  297. pop %edi
  298. pop %esi
  299. pop %ebx
  300. pop %ebp
  301. mov $1, %eax
  302. ret