blk-crypto.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 Google LLC
  4. */
  5. /*
  6. * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  7. */
  8. #define pr_fmt(fmt) "blk-crypto: " fmt
  9. #include <linux/blk-crypto.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/keyslot-manager.h>
  12. #include <linux/random.h>
  13. #include <linux/siphash.h>
  14. #include "blk-crypto-internal.h"
  15. const struct blk_crypto_mode blk_crypto_modes[] = {
  16. [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
  17. .cipher_str = "xts(aes)",
  18. .keysize = 64,
  19. .ivsize = 16,
  20. },
  21. [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
  22. .cipher_str = "essiv(cbc(aes),sha256)",
  23. .keysize = 16,
  24. .ivsize = 16,
  25. },
  26. [BLK_ENCRYPTION_MODE_ADIANTUM] = {
  27. .cipher_str = "adiantum(xchacha12,aes)",
  28. .keysize = 32,
  29. .ivsize = 32,
  30. },
  31. };
  32. /* Check that all I/O segments are data unit aligned */
  33. static int bio_crypt_check_alignment(struct bio *bio)
  34. {
  35. const unsigned int data_unit_size =
  36. bio->bi_crypt_context->bc_key->data_unit_size;
  37. struct bvec_iter iter;
  38. struct bio_vec bv;
  39. bio_for_each_segment(bv, bio, iter) {
  40. if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
  41. return -EIO;
  42. }
  43. return 0;
  44. }
  45. /**
  46. * blk_crypto_submit_bio - handle submitting bio for inline encryption
  47. *
  48. * @bio_ptr: pointer to original bio pointer
  49. *
  50. * If the bio doesn't have inline encryption enabled or the submitter already
  51. * specified a keyslot for the target device, do nothing. Else, a raw key must
  52. * have been provided, so acquire a device keyslot for it if supported. Else,
  53. * use the crypto API fallback.
  54. *
  55. * When the crypto API fallback is used for encryption, blk-crypto may choose to
  56. * split the bio into 2 - the first one that will continue to be processed and
  57. * the second one that will be resubmitted via generic_make_request.
  58. * A bounce bio will be allocated to encrypt the contents of the aforementioned
  59. * "first one", and *bio_ptr will be updated to this bounce bio.
  60. *
  61. * Return: 0 if bio submission should continue; nonzero if bio_endio() was
  62. * already called so bio submission should abort.
  63. */
  64. int blk_crypto_submit_bio(struct bio **bio_ptr)
  65. {
  66. struct bio *bio = *bio_ptr;
  67. struct request_queue *q;
  68. struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  69. int err;
  70. if (!bc || !bio_has_data(bio))
  71. return 0;
  72. /*
  73. * When a read bio is marked for fallback decryption, its bi_iter is
  74. * saved so that when we decrypt the bio later, we know what part of it
  75. * was marked for fallback decryption (when the bio is passed down after
  76. * blk_crypto_submit bio, it may be split or advanced so we cannot rely
  77. * on the bi_iter while decrypting in blk_crypto_endio)
  78. */
  79. if (bio_crypt_fallback_crypted(bc))
  80. return 0;
  81. err = bio_crypt_check_alignment(bio);
  82. if (err) {
  83. bio->bi_status = BLK_STS_IOERR;
  84. goto out;
  85. }
  86. q = bio->bi_disk->queue;
  87. if (bc->bc_ksm) {
  88. /* Key already programmed into device? */
  89. if (q->ksm == bc->bc_ksm)
  90. return 0;
  91. /* Nope, release the existing keyslot. */
  92. bio_crypt_ctx_release_keyslot(bc);
  93. }
  94. /* Get device keyslot if supported */
  95. if (keyslot_manager_crypto_mode_supported(q->ksm,
  96. bc->bc_key->crypto_mode,
  97. blk_crypto_key_dun_bytes(bc->bc_key),
  98. bc->bc_key->data_unit_size,
  99. bc->bc_key->is_hw_wrapped)) {
  100. err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
  101. if (!err)
  102. return 0;
  103. pr_warn_once("Failed to acquire keyslot for %s (err=%d). Falling back to crypto API.\n",
  104. bio->bi_disk->disk_name, err);
  105. }
  106. /* Fallback to crypto API */
  107. err = blk_crypto_fallback_submit_bio(bio_ptr);
  108. if (err)
  109. goto out;
  110. return 0;
  111. out:
  112. bio_endio(*bio_ptr);
  113. return err;
  114. }
  115. /**
  116. * blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio
  117. *
  118. * @bio: the bio to clean up
  119. *
  120. * If blk_crypto_submit_bio decided to fallback to crypto API for this bio,
  121. * we queue the bio for decryption into a workqueue and return false,
  122. * and call bio_endio(bio) at a later time (after the bio has been decrypted).
  123. *
  124. * If the bio is not to be decrypted by the crypto API, this function releases
  125. * the reference to the keyslot that blk_crypto_submit_bio got.
  126. *
  127. * Return: true if bio_endio should continue; false otherwise (bio_endio will
  128. * be called again when bio has been decrypted).
  129. */
  130. bool blk_crypto_endio(struct bio *bio)
  131. {
  132. struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  133. if (!bc)
  134. return true;
  135. if (bio_crypt_fallback_crypted(bc)) {
  136. /*
  137. * The only bios who's crypto is handled by the blk-crypto
  138. * fallback when they reach here are those with
  139. * bio_data_dir(bio) == READ, since WRITE bios that are
  140. * encrypted by the crypto API fallback are handled by
  141. * blk_crypto_encrypt_endio().
  142. */
  143. return !blk_crypto_queue_decrypt_bio(bio);
  144. }
  145. if (bc->bc_keyslot >= 0)
  146. bio_crypt_ctx_release_keyslot(bc);
  147. return true;
  148. }
  149. /**
  150. * blk_crypto_init_key() - Prepare a key for use with blk-crypto
  151. * @blk_key: Pointer to the blk_crypto_key to initialize.
  152. * @raw_key: Pointer to the raw key.
  153. * @raw_key_size: Size of raw key. Must be at least the required size for the
  154. * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed
  155. * to be longer than the mode's actual key size, in order to
  156. * support inline encryption hardware that accepts wrapped keys.
  157. * @is_hw_wrapped has to be set for such keys)
  158. * @is_hw_wrapped: Denotes @raw_key is wrapped.
  159. * @crypto_mode: identifier for the encryption algorithm to use
  160. * @dun_bytes: number of bytes that will be used to specify the DUN when this
  161. * key is used
  162. * @data_unit_size: the data unit size to use for en/decryption
  163. *
  164. * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When
  165. * done using the key, it must be freed with blk_crypto_free_key().
  166. */
  167. int blk_crypto_init_key(struct blk_crypto_key *blk_key,
  168. const u8 *raw_key, unsigned int raw_key_size,
  169. bool is_hw_wrapped,
  170. enum blk_crypto_mode_num crypto_mode,
  171. unsigned int dun_bytes,
  172. unsigned int data_unit_size)
  173. {
  174. const struct blk_crypto_mode *mode;
  175. static siphash_key_t hash_key;
  176. u32 hash;
  177. memset(blk_key, 0, sizeof(*blk_key));
  178. if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
  179. return -EINVAL;
  180. BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
  181. mode = &blk_crypto_modes[crypto_mode];
  182. if (is_hw_wrapped) {
  183. if (raw_key_size < mode->keysize ||
  184. raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
  185. return -EINVAL;
  186. } else {
  187. if (raw_key_size != mode->keysize)
  188. return -EINVAL;
  189. }
  190. if (dun_bytes <= 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
  191. return -EINVAL;
  192. if (!is_power_of_2(data_unit_size))
  193. return -EINVAL;
  194. blk_key->crypto_mode = crypto_mode;
  195. blk_key->data_unit_size = data_unit_size;
  196. blk_key->data_unit_size_bits = ilog2(data_unit_size);
  197. blk_key->size = raw_key_size;
  198. blk_key->is_hw_wrapped = is_hw_wrapped;
  199. memcpy(blk_key->raw, raw_key, raw_key_size);
  200. /*
  201. * The keyslot manager uses the SipHash of the key to implement O(1) key
  202. * lookups while avoiding leaking information about the keys. It's
  203. * precomputed here so that it only needs to be computed once per key.
  204. */
  205. get_random_once(&hash_key, sizeof(hash_key));
  206. hash = (u32)siphash(raw_key, raw_key_size, &hash_key);
  207. blk_crypto_key_set_hash_and_dun_bytes(blk_key, hash, dun_bytes);
  208. return 0;
  209. }
  210. EXPORT_SYMBOL_GPL(blk_crypto_init_key);
  211. /**
  212. * blk_crypto_start_using_mode() - Start using blk-crypto on a device
  213. * @crypto_mode: the crypto mode that will be used
  214. * @dun_bytes: number of bytes that will be used to specify the DUN
  215. * @data_unit_size: the data unit size that will be used
  216. * @is_hw_wrapped_key: whether the key will be hardware-wrapped
  217. * @q: the request queue for the device
  218. *
  219. * Upper layers must call this function to ensure that either the hardware
  220. * supports the needed crypto settings, or the crypto API fallback has
  221. * transforms for the needed mode allocated and ready to go.
  222. *
  223. * Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto
  224. * settings and blk-crypto-fallback is either disabled or the needed
  225. * algorithm is disabled in the crypto API; or another -errno code.
  226. */
  227. int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode,
  228. unsigned int dun_bytes,
  229. unsigned int data_unit_size,
  230. bool is_hw_wrapped_key,
  231. struct request_queue *q)
  232. {
  233. if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode,
  234. dun_bytes, data_unit_size,
  235. is_hw_wrapped_key))
  236. return 0;
  237. if (is_hw_wrapped_key) {
  238. pr_warn_once("hardware doesn't support wrapped keys\n");
  239. return -EOPNOTSUPP;
  240. }
  241. return blk_crypto_fallback_start_using_mode(crypto_mode);
  242. }
  243. EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);
  244. /**
  245. * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
  246. * it may have been programmed into
  247. * @q: The request queue who's keyslot manager this key might have been
  248. * programmed into
  249. * @key: The key to evict
  250. *
  251. * Upper layers (filesystems) should call this function to ensure that a key
  252. * is evicted from hardware that it might have been programmed into. This
  253. * will call keyslot_manager_evict_key on the queue's keyslot manager, if one
  254. * exists, and supports the crypto algorithm with the specified data unit size.
  255. * Otherwise, it will evict the key from the blk-crypto-fallback's ksm.
  256. *
  257. * Return: 0 on success, -err on error.
  258. */
  259. int blk_crypto_evict_key(struct request_queue *q,
  260. const struct blk_crypto_key *key)
  261. {
  262. if (q->ksm &&
  263. keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
  264. blk_crypto_key_dun_bytes(key),
  265. key->data_unit_size,
  266. key->is_hw_wrapped))
  267. return keyslot_manager_evict_key(q->ksm, key);
  268. return blk_crypto_fallback_evict_key(key);
  269. }
  270. EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
  271. inline void blk_crypto_flock(struct keyslot_manager *ksm, unsigned int flags)
  272. {
  273. ksm_flock(ksm, flags);
  274. }
  275. EXPORT_SYMBOL_GPL(blk_crypto_flock);