sha256-neon.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /*
  2. * Hardware-accelerated implementation of SHA-256 using Arm NEON.
  3. */
  4. #include "ssh.h"
  5. #include "sha256.h"
  6. #if USE_ARM64_NEON_H
  7. #include <arm64_neon.h>
  8. #else
  9. #include <arm_neon.h>
  10. #endif
  11. static bool sha256_neon_available(void)
  12. {
  13. /*
  14. * For Arm, we delegate to a per-platform detection function (see
  15. * explanation in aes-neon.c).
  16. */
  17. return platform_sha256_neon_available();
  18. }
  19. typedef struct sha256_neon_core sha256_neon_core;
  20. struct sha256_neon_core {
  21. uint32x4_t abcd, efgh;
  22. };
  23. static inline uint32x4_t sha256_neon_load_input(const uint8_t *p)
  24. {
  25. return vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(p)));
  26. }
  27. static inline uint32x4_t sha256_neon_schedule_update(
  28. uint32x4_t m4, uint32x4_t m3, uint32x4_t m2, uint32x4_t m1)
  29. {
  30. return vsha256su1q_u32(vsha256su0q_u32(m4, m3), m2, m1);
  31. }
  32. static inline sha256_neon_core sha256_neon_round4(
  33. sha256_neon_core old, uint32x4_t sched, unsigned round)
  34. {
  35. sha256_neon_core new;
  36. uint32x4_t round_input = vaddq_u32(
  37. sched, vld1q_u32(sha256_round_constants + round));
  38. new.abcd = vsha256hq_u32 (old.abcd, old.efgh, round_input);
  39. new.efgh = vsha256h2q_u32(old.efgh, old.abcd, round_input);
  40. return new;
  41. }
  42. static inline void sha256_neon_block(sha256_neon_core *core, const uint8_t *p)
  43. {
  44. uint32x4_t s0, s1, s2, s3;
  45. sha256_neon_core cr = *core;
  46. s0 = sha256_neon_load_input(p);
  47. cr = sha256_neon_round4(cr, s0, 0);
  48. s1 = sha256_neon_load_input(p+16);
  49. cr = sha256_neon_round4(cr, s1, 4);
  50. s2 = sha256_neon_load_input(p+32);
  51. cr = sha256_neon_round4(cr, s2, 8);
  52. s3 = sha256_neon_load_input(p+48);
  53. cr = sha256_neon_round4(cr, s3, 12);
  54. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  55. cr = sha256_neon_round4(cr, s0, 16);
  56. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  57. cr = sha256_neon_round4(cr, s1, 20);
  58. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  59. cr = sha256_neon_round4(cr, s2, 24);
  60. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  61. cr = sha256_neon_round4(cr, s3, 28);
  62. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  63. cr = sha256_neon_round4(cr, s0, 32);
  64. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  65. cr = sha256_neon_round4(cr, s1, 36);
  66. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  67. cr = sha256_neon_round4(cr, s2, 40);
  68. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  69. cr = sha256_neon_round4(cr, s3, 44);
  70. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  71. cr = sha256_neon_round4(cr, s0, 48);
  72. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  73. cr = sha256_neon_round4(cr, s1, 52);
  74. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  75. cr = sha256_neon_round4(cr, s2, 56);
  76. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  77. cr = sha256_neon_round4(cr, s3, 60);
  78. core->abcd = vaddq_u32(core->abcd, cr.abcd);
  79. core->efgh = vaddq_u32(core->efgh, cr.efgh);
  80. }
  81. typedef struct sha256_neon {
  82. sha256_neon_core core;
  83. sha256_block blk;
  84. BinarySink_IMPLEMENTATION;
  85. ssh_hash hash;
  86. } sha256_neon;
  87. static void sha256_neon_write(BinarySink *bs, const void *vp, size_t len);
  88. static ssh_hash *sha256_neon_new(const ssh_hashalg *alg)
  89. {
  90. const struct sha256_extra *extra = (const struct sha256_extra *)alg->extra;
  91. if (!check_availability(extra))
  92. return NULL;
  93. sha256_neon *s = snew(sha256_neon);
  94. s->hash.vt = alg;
  95. BinarySink_INIT(s, sha256_neon_write);
  96. BinarySink_DELEGATE_INIT(&s->hash, s);
  97. return &s->hash;
  98. }
  99. static void sha256_neon_reset(ssh_hash *hash)
  100. {
  101. sha256_neon *s = container_of(hash, sha256_neon, hash);
  102. s->core.abcd = vld1q_u32(sha256_initial_state);
  103. s->core.efgh = vld1q_u32(sha256_initial_state + 4);
  104. sha256_block_setup(&s->blk);
  105. }
  106. static void sha256_neon_copyfrom(ssh_hash *hcopy, ssh_hash *horig)
  107. {
  108. sha256_neon *copy = container_of(hcopy, sha256_neon, hash);
  109. sha256_neon *orig = container_of(horig, sha256_neon, hash);
  110. *copy = *orig; /* structure copy */
  111. BinarySink_COPIED(copy);
  112. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  113. }
  114. static void sha256_neon_free(ssh_hash *hash)
  115. {
  116. sha256_neon *s = container_of(hash, sha256_neon, hash);
  117. smemclr(s, sizeof(*s));
  118. sfree(s);
  119. }
  120. static void sha256_neon_write(BinarySink *bs, const void *vp, size_t len)
  121. {
  122. sha256_neon *s = BinarySink_DOWNCAST(bs, sha256_neon);
  123. while (len > 0)
  124. if (sha256_block_write(&s->blk, &vp, &len))
  125. sha256_neon_block(&s->core, s->blk.block);
  126. }
  127. static void sha256_neon_digest(ssh_hash *hash, uint8_t *digest)
  128. {
  129. sha256_neon *s = container_of(hash, sha256_neon, hash);
  130. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  131. vst1q_u8(digest, vrev32q_u8(vreinterpretq_u8_u32(s->core.abcd)));
  132. vst1q_u8(digest + 16, vrev32q_u8(vreinterpretq_u8_u32(s->core.efgh)));
  133. }
  134. SHA256_VTABLE(neon, "NEON accelerated");