constant_time.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /**
  2. * Constant-time functions
  3. *
  4. * Copyright The Mbed TLS Contributors
  5. * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
  6. */
  7. /*
  8. * The following functions are implemented without using comparison operators, as those
  9. * might be translated to branches by some compilers on some platforms.
  10. */
  11. #include <stdint.h>
  12. #include <limits.h>
  13. #include "common.h"
  14. #include "constant_time_internal.h"
  15. #include "mbedtls/constant_time.h"
  16. #include "mbedtls/error.h"
  17. #include "mbedtls/platform_util.h"
  18. #include <string.h>
  19. #if !defined(MBEDTLS_CT_ASM)
  20. /*
  21. * Define an object with the value zero, such that the compiler cannot prove that it
  22. * has the value zero (because it is volatile, it "may be modified in ways unknown to
  23. * the implementation").
  24. */
  25. volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0;
  26. #endif
  27. /*
  28. * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to
  29. * perform fast unaligned access to volatile data.
  30. *
  31. * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile
  32. * memory accesses.
  33. *
  34. * Some of these definitions could be moved into alignment.h but for now they are
  35. * only used here.
  36. */
  37. #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \
  38. ((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \
  39. defined(MBEDTLS_CT_AARCH64_ASM))
  40. /* We check pointer sizes to avoid issues with them not matching register size requirements */
  41. #define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS
  42. static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p)
  43. {
  44. /* This is UB, even where it's safe:
  45. * return *((volatile uint32_t*)p);
  46. * so instead the same thing is expressed in assembly below.
  47. */
  48. uint32_t r;
  49. #if defined(MBEDTLS_CT_ARM_ASM)
  50. asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :);
  51. #elif defined(MBEDTLS_CT_AARCH64_ASM)
  52. asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :);
  53. #else
  54. #error "No assembly defined for mbedtls_get_unaligned_volatile_uint32"
  55. #endif
  56. return r;
  57. }
  58. #endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) &&
  59. (defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */
  60. int mbedtls_ct_memcmp(const void *a,
  61. const void *b,
  62. size_t n)
  63. {
  64. size_t i = 0;
  65. /*
  66. * `A` and `B` are cast to volatile to ensure that the compiler
  67. * generates code that always fully reads both buffers.
  68. * Otherwise it could generate a test to exit early if `diff` has all
  69. * bits set early in the loop.
  70. */
  71. volatile const unsigned char *A = (volatile const unsigned char *) a;
  72. volatile const unsigned char *B = (volatile const unsigned char *) b;
  73. uint32_t diff = 0;
  74. #if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS)
  75. for (; (i + 4) <= n; i += 4) {
  76. uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i);
  77. uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i);
  78. diff |= x ^ y;
  79. }
  80. #endif
  81. for (; i < n; i++) {
  82. /* Read volatile data in order before computing diff.
  83. * This avoids IAR compiler warning:
  84. * 'the order of volatile accesses is undefined ..' */
  85. unsigned char x = A[i], y = B[i];
  86. diff |= x ^ y;
  87. }
  88. #if (INT_MAX < INT32_MAX)
  89. /* We don't support int smaller than 32-bits, but if someone tried to build
  90. * with this configuration, there is a risk that, for differing data, the
  91. * only bits set in diff are in the top 16-bits, and would be lost by a
  92. * simple cast from uint32 to int.
  93. * This would have significant security implications, so protect against it. */
  94. #error "mbedtls_ct_memcmp() requires minimum 32-bit ints"
  95. #else
  96. /* The bit-twiddling ensures that when we cast uint32_t to int, we are casting
  97. * a value that is in the range 0..INT_MAX - a value larger than this would
  98. * result in implementation defined behaviour.
  99. *
  100. * This ensures that the value returned by the function is non-zero iff
  101. * diff is non-zero.
  102. */
  103. return (int) ((diff & 0xffff) | (diff >> 16));
  104. #endif
  105. }
  106. #if defined(MBEDTLS_NIST_KW_C)
  107. int mbedtls_ct_memcmp_partial(const void *a,
  108. const void *b,
  109. size_t n,
  110. size_t skip_head,
  111. size_t skip_tail)
  112. {
  113. unsigned int diff = 0;
  114. volatile const unsigned char *A = (volatile const unsigned char *) a;
  115. volatile const unsigned char *B = (volatile const unsigned char *) b;
  116. size_t valid_end = n - skip_tail;
  117. for (size_t i = 0; i < n; i++) {
  118. unsigned char x = A[i], y = B[i];
  119. unsigned int d = x ^ y;
  120. mbedtls_ct_condition_t valid = mbedtls_ct_bool_and(mbedtls_ct_uint_ge(i, skip_head),
  121. mbedtls_ct_uint_lt(i, valid_end));
  122. diff |= mbedtls_ct_uint_if_else_0(valid, d);
  123. }
  124. /* Since we go byte-by-byte, the only bits set will be in the bottom 8 bits, so the
  125. * cast from uint to int is safe. */
  126. return (int) diff;
  127. }
  128. #endif
  129. #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
  130. void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset)
  131. {
  132. volatile unsigned char *buf = start;
  133. for (size_t i = 0; i < total; i++) {
  134. mbedtls_ct_condition_t no_op = mbedtls_ct_uint_gt(total - offset, i);
  135. /* The first `total - offset` passes are a no-op. The last
  136. * `offset` passes shift the data one byte to the left and
  137. * zero out the last byte. */
  138. for (size_t n = 0; n < total - 1; n++) {
  139. unsigned char current = buf[n];
  140. unsigned char next = buf[n+1];
  141. buf[n] = mbedtls_ct_uint_if(no_op, current, next);
  142. }
  143. buf[total-1] = mbedtls_ct_uint_if_else_0(no_op, buf[total-1]);
  144. }
  145. }
  146. #endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */
  147. void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,
  148. unsigned char *dest,
  149. const unsigned char *src1,
  150. const unsigned char *src2,
  151. size_t len)
  152. {
  153. #if defined(MBEDTLS_CT_SIZE_64)
  154. const uint64_t mask = (uint64_t) condition;
  155. const uint64_t not_mask = (uint64_t) ~mbedtls_ct_compiler_opaque(condition);
  156. #else
  157. const uint32_t mask = (uint32_t) condition;
  158. const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition);
  159. #endif
  160. /* If src2 is NULL, setup src2 so that we read from the destination address.
  161. *
  162. * This means that if src2 == NULL && condition is false, the result will be a
  163. * no-op because we read from dest and write the same data back into dest.
  164. */
  165. if (src2 == NULL) {
  166. src2 = dest;
  167. }
  168. /* dest[i] = c1 == c2 ? src[i] : dest[i] */
  169. size_t i = 0;
  170. #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
  171. #if defined(MBEDTLS_CT_SIZE_64)
  172. for (; (i + 8) <= len; i += 8) {
  173. uint64_t a = mbedtls_get_unaligned_uint64(src1 + i) & mask;
  174. uint64_t b = mbedtls_get_unaligned_uint64(src2 + i) & not_mask;
  175. mbedtls_put_unaligned_uint64(dest + i, a | b);
  176. }
  177. #else
  178. for (; (i + 4) <= len; i += 4) {
  179. uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask;
  180. uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask;
  181. mbedtls_put_unaligned_uint32(dest + i, a | b);
  182. }
  183. #endif /* defined(MBEDTLS_CT_SIZE_64) */
  184. #endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */
  185. for (; i < len; i++) {
  186. dest[i] = (src1[i] & mask) | (src2[i] & not_mask);
  187. }
  188. }
  189. void mbedtls_ct_memcpy_offset(unsigned char *dest,
  190. const unsigned char *src,
  191. size_t offset,
  192. size_t offset_min,
  193. size_t offset_max,
  194. size_t len)
  195. {
  196. size_t offsetval;
  197. for (offsetval = offset_min; offsetval <= offset_max; offsetval++) {
  198. mbedtls_ct_memcpy_if(mbedtls_ct_uint_eq(offsetval, offset), dest, src + offsetval, NULL,
  199. len);
  200. }
  201. }
  202. #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
  203. void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len)
  204. {
  205. uint32_t mask = (uint32_t) ~condition;
  206. uint8_t *p = (uint8_t *) buf;
  207. size_t i = 0;
  208. #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
  209. for (; (i + 4) <= len; i += 4) {
  210. mbedtls_put_unaligned_uint32((void *) (p + i),
  211. mbedtls_get_unaligned_uint32((void *) (p + i)) & mask);
  212. }
  213. #endif
  214. for (; i < len; i++) {
  215. p[i] = p[i] & mask;
  216. }
  217. }
  218. #endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */