cast6-avx-x86_64-asm_64.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. /*
  2. * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
  3. *
  4. * Copyright (C) 2012 Johannes Goetzfried
  5. * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
  6. *
  7. * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. #include <linux/linkage.h>
  26. #include <asm/frame.h>
  27. #include "glue_helper-asm-avx.S"
  28. .file "cast6-avx-x86_64-asm_64.S"
  29. .extern cast_s1
  30. .extern cast_s2
  31. .extern cast_s3
  32. .extern cast_s4
  33. /* structure of crypto context */
  34. #define km 0
  35. #define kr (12*4*4)
  36. /* s-boxes */
  37. #define s1 cast_s1
  38. #define s2 cast_s2
  39. #define s3 cast_s3
  40. #define s4 cast_s4
  41. /**********************************************************************
  42. 8-way AVX cast6
  43. **********************************************************************/
  44. #define CTX %rdi
  45. #define RA1 %xmm0
  46. #define RB1 %xmm1
  47. #define RC1 %xmm2
  48. #define RD1 %xmm3
  49. #define RA2 %xmm4
  50. #define RB2 %xmm5
  51. #define RC2 %xmm6
  52. #define RD2 %xmm7
  53. #define RX %xmm8
  54. #define RKM %xmm9
  55. #define RKR %xmm10
  56. #define RKRF %xmm11
  57. #define RKRR %xmm12
  58. #define R32 %xmm13
  59. #define R1ST %xmm14
  60. #define RTMP %xmm15
  61. #define RID1 %rbp
  62. #define RID1d %ebp
  63. #define RID2 %rsi
  64. #define RID2d %esi
  65. #define RGI1 %rdx
  66. #define RGI1bl %dl
  67. #define RGI1bh %dh
  68. #define RGI2 %rcx
  69. #define RGI2bl %cl
  70. #define RGI2bh %ch
  71. #define RGI3 %rax
  72. #define RGI3bl %al
  73. #define RGI3bh %ah
  74. #define RGI4 %rbx
  75. #define RGI4bl %bl
  76. #define RGI4bh %bh
  77. #define RFS1 %r8
  78. #define RFS1d %r8d
  79. #define RFS2 %r9
  80. #define RFS2d %r9d
  81. #define RFS3 %r10
  82. #define RFS3d %r10d
  83. #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
  84. movzbl src ## bh, RID1d; \
  85. movzbl src ## bl, RID2d; \
  86. shrq $16, src; \
  87. movl s1(, RID1, 4), dst ## d; \
  88. op1 s2(, RID2, 4), dst ## d; \
  89. movzbl src ## bh, RID1d; \
  90. movzbl src ## bl, RID2d; \
  91. interleave_op(il_reg); \
  92. op2 s3(, RID1, 4), dst ## d; \
  93. op3 s4(, RID2, 4), dst ## d;
  94. #define dummy(d) /* do nothing */
  95. #define shr_next(reg) \
  96. shrq $16, reg;
  97. #define F_head(a, x, gi1, gi2, op0) \
  98. op0 a, RKM, x; \
  99. vpslld RKRF, x, RTMP; \
  100. vpsrld RKRR, x, x; \
  101. vpor RTMP, x, x; \
  102. \
  103. vmovq x, gi1; \
  104. vpextrq $1, x, gi2;
  105. #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
  106. lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
  107. lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
  108. \
  109. lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
  110. shlq $32, RFS2; \
  111. orq RFS1, RFS2; \
  112. lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
  113. shlq $32, RFS1; \
  114. orq RFS1, RFS3; \
  115. \
  116. vmovq RFS2, x; \
  117. vpinsrq $1, RFS3, x, x;
  118. #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
  119. F_head(b1, RX, RGI1, RGI2, op0); \
  120. F_head(b2, RX, RGI3, RGI4, op0); \
  121. \
  122. F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
  123. F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
  124. \
  125. vpxor a1, RX, a1; \
  126. vpxor a2, RTMP, a2;
  127. #define F1_2(a1, b1, a2, b2) \
  128. F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
  129. #define F2_2(a1, b1, a2, b2) \
  130. F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
  131. #define F3_2(a1, b1, a2, b2) \
  132. F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
  133. #define qop(in, out, f) \
  134. F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
  135. #define get_round_keys(nn) \
  136. vbroadcastss (km+(4*(nn)))(CTX), RKM; \
  137. vpand R1ST, RKR, RKRF; \
  138. vpsubq RKRF, R32, RKRR; \
  139. vpsrldq $1, RKR, RKR;
  140. #define Q(n) \
  141. get_round_keys(4*n+0); \
  142. qop(RD, RC, 1); \
  143. \
  144. get_round_keys(4*n+1); \
  145. qop(RC, RB, 2); \
  146. \
  147. get_round_keys(4*n+2); \
  148. qop(RB, RA, 3); \
  149. \
  150. get_round_keys(4*n+3); \
  151. qop(RA, RD, 1);
  152. #define QBAR(n) \
  153. get_round_keys(4*n+3); \
  154. qop(RA, RD, 1); \
  155. \
  156. get_round_keys(4*n+2); \
  157. qop(RB, RA, 3); \
  158. \
  159. get_round_keys(4*n+1); \
  160. qop(RC, RB, 2); \
  161. \
  162. get_round_keys(4*n+0); \
  163. qop(RD, RC, 1);
  164. #define shuffle(mask) \
  165. vpshufb mask, RKR, RKR;
  166. #define preload_rkr(n, do_mask, mask) \
  167. vbroadcastss .L16_mask, RKR; \
  168. /* add 16-bit rotation to key rotations (mod 32) */ \
  169. vpxor (kr+n*16)(CTX), RKR, RKR; \
  170. do_mask(mask);
  171. #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  172. vpunpckldq x1, x0, t0; \
  173. vpunpckhdq x1, x0, t2; \
  174. vpunpckldq x3, x2, t1; \
  175. vpunpckhdq x3, x2, x3; \
  176. \
  177. vpunpcklqdq t1, t0, x0; \
  178. vpunpckhqdq t1, t0, x1; \
  179. vpunpcklqdq x3, t2, x2; \
  180. vpunpckhqdq x3, t2, x3;
  181. #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
  182. vpshufb rmask, x0, x0; \
  183. vpshufb rmask, x1, x1; \
  184. vpshufb rmask, x2, x2; \
  185. vpshufb rmask, x3, x3; \
  186. \
  187. transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
  188. #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
  189. transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  190. \
  191. vpshufb rmask, x0, x0; \
  192. vpshufb rmask, x1, x1; \
  193. vpshufb rmask, x2, x2; \
  194. vpshufb rmask, x3, x3;
  195. .data
  196. .align 16
  197. .Lxts_gf128mul_and_shl1_mask:
  198. .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
  199. .Lbswap_mask:
  200. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  201. .Lbswap128_mask:
  202. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  203. .Lrkr_enc_Q_Q_QBAR_QBAR:
  204. .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
  205. .Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
  206. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  207. .Lrkr_dec_Q_Q_Q_Q:
  208. .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
  209. .Lrkr_dec_Q_Q_QBAR_QBAR:
  210. .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
  211. .Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
  212. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  213. .L16_mask:
  214. .byte 16, 16, 16, 16
  215. .L32_mask:
  216. .byte 32, 0, 0, 0
  217. .Lfirst_mask:
  218. .byte 0x1f, 0, 0, 0
  219. .text
  220. .align 8
  221. __cast6_enc_blk8:
  222. /* input:
  223. * %rdi: ctx, CTX
  224. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
  225. * output:
  226. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
  227. */
  228. pushq %rbp;
  229. pushq %rbx;
  230. vmovdqa .Lbswap_mask, RKM;
  231. vmovd .Lfirst_mask, R1ST;
  232. vmovd .L32_mask, R32;
  233. inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  234. inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  235. preload_rkr(0, dummy, none);
  236. Q(0);
  237. Q(1);
  238. Q(2);
  239. Q(3);
  240. preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
  241. Q(4);
  242. Q(5);
  243. QBAR(6);
  244. QBAR(7);
  245. preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
  246. QBAR(8);
  247. QBAR(9);
  248. QBAR(10);
  249. QBAR(11);
  250. popq %rbx;
  251. popq %rbp;
  252. vmovdqa .Lbswap_mask, RKM;
  253. outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  254. outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  255. ret;
  256. ENDPROC(__cast6_enc_blk8)
  257. .align 8
  258. __cast6_dec_blk8:
  259. /* input:
  260. * %rdi: ctx, CTX
  261. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
  262. * output:
  263. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
  264. */
  265. pushq %rbp;
  266. pushq %rbx;
  267. vmovdqa .Lbswap_mask, RKM;
  268. vmovd .Lfirst_mask, R1ST;
  269. vmovd .L32_mask, R32;
  270. inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  271. inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  272. preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
  273. Q(11);
  274. Q(10);
  275. Q(9);
  276. Q(8);
  277. preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
  278. Q(7);
  279. Q(6);
  280. QBAR(5);
  281. QBAR(4);
  282. preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
  283. QBAR(3);
  284. QBAR(2);
  285. QBAR(1);
  286. QBAR(0);
  287. popq %rbx;
  288. popq %rbp;
  289. vmovdqa .Lbswap_mask, RKM;
  290. outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  291. outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  292. ret;
  293. ENDPROC(__cast6_dec_blk8)
  294. ENTRY(cast6_ecb_enc_8way)
  295. /* input:
  296. * %rdi: ctx, CTX
  297. * %rsi: dst
  298. * %rdx: src
  299. */
  300. FRAME_BEGIN
  301. movq %rsi, %r11;
  302. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  303. call __cast6_enc_blk8;
  304. store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  305. FRAME_END
  306. ret;
  307. ENDPROC(cast6_ecb_enc_8way)
  308. ENTRY(cast6_ecb_dec_8way)
  309. /* input:
  310. * %rdi: ctx, CTX
  311. * %rsi: dst
  312. * %rdx: src
  313. */
  314. FRAME_BEGIN
  315. movq %rsi, %r11;
  316. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  317. call __cast6_dec_blk8;
  318. store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  319. FRAME_END
  320. ret;
  321. ENDPROC(cast6_ecb_dec_8way)
  322. ENTRY(cast6_cbc_dec_8way)
  323. /* input:
  324. * %rdi: ctx, CTX
  325. * %rsi: dst
  326. * %rdx: src
  327. */
  328. FRAME_BEGIN
  329. pushq %r12;
  330. movq %rsi, %r11;
  331. movq %rdx, %r12;
  332. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  333. call __cast6_dec_blk8;
  334. store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  335. popq %r12;
  336. FRAME_END
  337. ret;
  338. ENDPROC(cast6_cbc_dec_8way)
  339. ENTRY(cast6_ctr_8way)
  340. /* input:
  341. * %rdi: ctx, CTX
  342. * %rsi: dst
  343. * %rdx: src
  344. * %rcx: iv (little endian, 128bit)
  345. */
  346. FRAME_BEGIN
  347. pushq %r12;
  348. movq %rsi, %r11;
  349. movq %rdx, %r12;
  350. load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
  351. RD2, RX, RKR, RKM);
  352. call __cast6_enc_blk8;
  353. store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  354. popq %r12;
  355. FRAME_END
  356. ret;
  357. ENDPROC(cast6_ctr_8way)
  358. ENTRY(cast6_xts_enc_8way)
  359. /* input:
  360. * %rdi: ctx, CTX
  361. * %rsi: dst
  362. * %rdx: src
  363. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  364. */
  365. FRAME_BEGIN
  366. movq %rsi, %r11;
  367. /* regs <= src, dst <= IVs, regs <= regs xor IVs */
  368. load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
  369. RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
  370. call __cast6_enc_blk8;
  371. /* dst <= regs xor IVs(in dst) */
  372. store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  373. FRAME_END
  374. ret;
  375. ENDPROC(cast6_xts_enc_8way)
  376. ENTRY(cast6_xts_dec_8way)
  377. /* input:
  378. * %rdi: ctx, CTX
  379. * %rsi: dst
  380. * %rdx: src
  381. * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
  382. */
  383. FRAME_BEGIN
  384. movq %rsi, %r11;
  385. /* regs <= src, dst <= IVs, regs <= regs xor IVs */
  386. load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
  387. RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
  388. call __cast6_dec_blk8;
  389. /* dst <= regs xor IVs(in dst) */
  390. store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  391. FRAME_END
  392. ret;
  393. ENDPROC(cast6_xts_dec_8way)