aes-ce-ccm-core.S 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
  3. *
  4. * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. #include <asm/assembler.h>
  12. .text
  13. .arch armv8-a+crypto
  14. /*
  15. * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
  16. * u32 *macp, u8 const rk[], u32 rounds);
  17. */
  18. ENTRY(ce_aes_ccm_auth_data)
  19. ldr w8, [x3] /* leftover from prev round? */
  20. ld1 {v0.16b}, [x0] /* load mac */
  21. cbz w8, 1f
  22. sub w8, w8, #16
  23. eor v1.16b, v1.16b, v1.16b
  24. 0: ldrb w7, [x1], #1 /* get 1 byte of input */
  25. subs w2, w2, #1
  26. add w8, w8, #1
  27. ins v1.b[0], w7
  28. ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
  29. beq 8f /* out of input? */
  30. cbnz w8, 0b
  31. eor v0.16b, v0.16b, v1.16b
  32. 1: ld1 {v3.16b}, [x4] /* load first round key */
  33. prfm pldl1strm, [x1]
  34. cmp w5, #12 /* which key size? */
  35. add x6, x4, #16
  36. sub w7, w5, #2 /* modified # of rounds */
  37. bmi 2f
  38. bne 5f
  39. mov v5.16b, v3.16b
  40. b 4f
  41. 2: mov v4.16b, v3.16b
  42. ld1 {v5.16b}, [x6], #16 /* load 2nd round key */
  43. 3: aese v0.16b, v4.16b
  44. aesmc v0.16b, v0.16b
  45. 4: ld1 {v3.16b}, [x6], #16 /* load next round key */
  46. aese v0.16b, v5.16b
  47. aesmc v0.16b, v0.16b
  48. 5: ld1 {v4.16b}, [x6], #16 /* load next round key */
  49. subs w7, w7, #3
  50. aese v0.16b, v3.16b
  51. aesmc v0.16b, v0.16b
  52. ld1 {v5.16b}, [x6], #16 /* load next round key */
  53. bpl 3b
  54. aese v0.16b, v4.16b
  55. subs w2, w2, #16 /* last data? */
  56. eor v0.16b, v0.16b, v5.16b /* final round */
  57. bmi 6f
  58. ld1 {v1.16b}, [x1], #16 /* load next input block */
  59. eor v0.16b, v0.16b, v1.16b /* xor with mac */
  60. bne 1b
  61. 6: st1 {v0.16b}, [x0] /* store mac */
  62. beq 10f
  63. adds w2, w2, #16
  64. beq 10f
  65. mov w8, w2
  66. 7: ldrb w7, [x1], #1
  67. umov w6, v0.b[0]
  68. eor w6, w6, w7
  69. strb w6, [x0], #1
  70. subs w2, w2, #1
  71. beq 10f
  72. ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
  73. b 7b
  74. 8: mov w7, w8
  75. add w8, w8, #16
  76. 9: ext v1.16b, v1.16b, v1.16b, #1
  77. adds w7, w7, #1
  78. bne 9b
  79. eor v0.16b, v0.16b, v1.16b
  80. st1 {v0.16b}, [x0]
  81. 10: str w8, [x3]
  82. ret
  83. ENDPROC(ce_aes_ccm_auth_data)
  84. /*
  85. * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
  86. * u32 rounds);
  87. */
  88. ENTRY(ce_aes_ccm_final)
  89. ld1 {v3.16b}, [x2], #16 /* load first round key */
  90. ld1 {v0.16b}, [x0] /* load mac */
  91. cmp w3, #12 /* which key size? */
  92. sub w3, w3, #2 /* modified # of rounds */
  93. ld1 {v1.16b}, [x1] /* load 1st ctriv */
  94. bmi 0f
  95. bne 3f
  96. mov v5.16b, v3.16b
  97. b 2f
  98. 0: mov v4.16b, v3.16b
  99. 1: ld1 {v5.16b}, [x2], #16 /* load next round key */
  100. aese v0.16b, v4.16b
  101. aesmc v0.16b, v0.16b
  102. aese v1.16b, v4.16b
  103. aesmc v1.16b, v1.16b
  104. 2: ld1 {v3.16b}, [x2], #16 /* load next round key */
  105. aese v0.16b, v5.16b
  106. aesmc v0.16b, v0.16b
  107. aese v1.16b, v5.16b
  108. aesmc v1.16b, v1.16b
  109. 3: ld1 {v4.16b}, [x2], #16 /* load next round key */
  110. subs w3, w3, #3
  111. aese v0.16b, v3.16b
  112. aesmc v0.16b, v0.16b
  113. aese v1.16b, v3.16b
  114. aesmc v1.16b, v1.16b
  115. bpl 1b
  116. aese v0.16b, v4.16b
  117. aese v1.16b, v4.16b
  118. /* final round key cancels out */
  119. eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
  120. st1 {v0.16b}, [x0] /* store result */
  121. ret
  122. ENDPROC(ce_aes_ccm_final)
  123. .macro aes_ccm_do_crypt,enc
  124. ldr x8, [x6, #8] /* load lower ctr */
  125. ld1 {v0.16b}, [x5] /* load mac */
  126. CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
  127. 0: /* outer loop */
  128. ld1 {v1.8b}, [x6] /* load upper ctr */
  129. prfm pldl1strm, [x1]
  130. add x8, x8, #1
  131. rev x9, x8
  132. cmp w4, #12 /* which key size? */
  133. sub w7, w4, #2 /* get modified # of rounds */
  134. ins v1.d[1], x9 /* no carry in lower ctr */
  135. ld1 {v3.16b}, [x3] /* load first round key */
  136. add x10, x3, #16
  137. bmi 1f
  138. bne 4f
  139. mov v5.16b, v3.16b
  140. b 3f
  141. 1: mov v4.16b, v3.16b
  142. ld1 {v5.16b}, [x10], #16 /* load 2nd round key */
  143. 2: /* inner loop: 3 rounds, 2x interleaved */
  144. aese v0.16b, v4.16b
  145. aesmc v0.16b, v0.16b
  146. aese v1.16b, v4.16b
  147. aesmc v1.16b, v1.16b
  148. 3: ld1 {v3.16b}, [x10], #16 /* load next round key */
  149. aese v0.16b, v5.16b
  150. aesmc v0.16b, v0.16b
  151. aese v1.16b, v5.16b
  152. aesmc v1.16b, v1.16b
  153. 4: ld1 {v4.16b}, [x10], #16 /* load next round key */
  154. subs w7, w7, #3
  155. aese v0.16b, v3.16b
  156. aesmc v0.16b, v0.16b
  157. aese v1.16b, v3.16b
  158. aesmc v1.16b, v1.16b
  159. ld1 {v5.16b}, [x10], #16 /* load next round key */
  160. bpl 2b
  161. aese v0.16b, v4.16b
  162. aese v1.16b, v4.16b
  163. subs w2, w2, #16
  164. bmi 6f /* partial block? */
  165. ld1 {v2.16b}, [x1], #16 /* load next input block */
  166. .if \enc == 1
  167. eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
  168. eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
  169. .else
  170. eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
  171. eor v1.16b, v2.16b, v5.16b /* final round enc */
  172. .endif
  173. eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
  174. st1 {v1.16b}, [x0], #16 /* write output block */
  175. bne 0b
  176. CPU_LE( rev x8, x8 )
  177. st1 {v0.16b}, [x5] /* store mac */
  178. str x8, [x6, #8] /* store lsb end of ctr (BE) */
  179. 5: ret
  180. 6: eor v0.16b, v0.16b, v5.16b /* final round mac */
  181. eor v1.16b, v1.16b, v5.16b /* final round enc */
  182. st1 {v0.16b}, [x5] /* store mac */
  183. add w2, w2, #16 /* process partial tail block */
  184. 7: ldrb w9, [x1], #1 /* get 1 byte of input */
  185. umov w6, v1.b[0] /* get top crypted ctr byte */
  186. umov w7, v0.b[0] /* get top mac byte */
  187. .if \enc == 1
  188. eor w7, w7, w9
  189. eor w9, w9, w6
  190. .else
  191. eor w9, w9, w6
  192. eor w7, w7, w9
  193. .endif
  194. strb w9, [x0], #1 /* store out byte */
  195. strb w7, [x5], #1 /* store mac byte */
  196. subs w2, w2, #1
  197. beq 5b
  198. ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
  199. ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
  200. b 7b
  201. .endm
  202. /*
  203. * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
  204. * u8 const rk[], u32 rounds, u8 mac[],
  205. * u8 ctr[]);
  206. * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
  207. * u8 const rk[], u32 rounds, u8 mac[],
  208. * u8 ctr[]);
  209. */
  210. ENTRY(ce_aes_ccm_encrypt)
  211. aes_ccm_do_crypt 1
  212. ENDPROC(ce_aes_ccm_encrypt)
  213. ENTRY(ce_aes_ccm_decrypt)
  214. aes_ccm_do_crypt 0
  215. ENDPROC(ce_aes_ccm_decrypt)