blk-crypto-fallback.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 Google LLC
  4. */
  5. /*
  6. * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  7. */
  8. #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
  9. #include <crypto/skcipher.h>
  10. #include <linux/blk-cgroup.h>
  11. #include <linux/blk-crypto.h>
  12. #include <linux/crypto.h>
  13. #include <linux/keyslot-manager.h>
  14. #include <linux/mempool.h>
  15. #include <linux/module.h>
  16. #include <linux/random.h>
  17. #include "blk-crypto-internal.h"
  18. static unsigned int num_prealloc_bounce_pg = 32;
  19. module_param(num_prealloc_bounce_pg, uint, 0);
  20. MODULE_PARM_DESC(num_prealloc_bounce_pg,
  21. "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
  22. static unsigned int blk_crypto_num_keyslots = 100;
  23. module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
  24. MODULE_PARM_DESC(num_keyslots,
  25. "Number of keyslots for the blk-crypto crypto API fallback");
  26. static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
  27. module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
  28. MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
  29. "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
  30. struct bio_fallback_crypt_ctx {
  31. struct bio_crypt_ctx crypt_ctx;
  32. /*
  33. * Copy of the bvec_iter when this bio was submitted.
  34. * We only want to en/decrypt the part of the bio as described by the
  35. * bvec_iter upon submission because bio might be split before being
  36. * resubmitted
  37. */
  38. struct bvec_iter crypt_iter;
  39. u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  40. };
  41. /* The following few vars are only used during the crypto API fallback */
  42. static struct kmem_cache *bio_fallback_crypt_ctx_cache;
  43. static mempool_t *bio_fallback_crypt_ctx_pool;
  44. /*
  45. * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
  46. * all of a mode's tfms when that mode starts being used. Since each mode may
  47. * need all the keyslots at some point, each mode needs its own tfm for each
  48. * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
  49. * match the behavior of real inline encryption hardware (which only supports a
  50. * single encryption context per keyslot), we only allow one tfm per keyslot to
  51. * be used at a time - the rest of the unused tfms have their keys cleared.
  52. */
  53. static DEFINE_MUTEX(tfms_init_lock);
  54. static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
  55. struct blk_crypto_decrypt_work {
  56. struct work_struct work;
  57. struct bio *bio;
  58. };
  59. static struct blk_crypto_keyslot {
  60. struct crypto_skcipher *tfm;
  61. enum blk_crypto_mode_num crypto_mode;
  62. struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
  63. } *blk_crypto_keyslots;
  64. /* The following few vars are only used during the crypto API fallback */
  65. static struct keyslot_manager *blk_crypto_ksm;
  66. static struct workqueue_struct *blk_crypto_wq;
  67. static mempool_t *blk_crypto_bounce_page_pool;
  68. static struct kmem_cache *blk_crypto_decrypt_work_cache;
  69. bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
  70. {
  71. return bc && bc->bc_ksm == blk_crypto_ksm;
  72. }
  73. /*
  74. * This is the key we set when evicting a keyslot. This *should* be the all 0's
  75. * key, but AES-XTS rejects that key, so we use some random bytes instead.
  76. */
  77. static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
  78. static void blk_crypto_evict_keyslot(unsigned int slot)
  79. {
  80. struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
  81. enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
  82. int err;
  83. WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
  84. /* Clear the key in the skcipher */
  85. err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
  86. blk_crypto_modes[crypto_mode].keysize);
  87. WARN_ON(err);
  88. slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
  89. }
  90. static int blk_crypto_keyslot_program(struct keyslot_manager *ksm,
  91. const struct blk_crypto_key *key,
  92. unsigned int slot)
  93. {
  94. struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
  95. const enum blk_crypto_mode_num crypto_mode = key->crypto_mode;
  96. int err;
  97. if (crypto_mode != slotp->crypto_mode &&
  98. slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) {
  99. blk_crypto_evict_keyslot(slot);
  100. }
  101. if (!slotp->tfms[crypto_mode])
  102. return -ENOMEM;
  103. slotp->crypto_mode = crypto_mode;
  104. err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
  105. key->size);
  106. if (err) {
  107. blk_crypto_evict_keyslot(slot);
  108. return err;
  109. }
  110. return 0;
  111. }
  112. static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm,
  113. const struct blk_crypto_key *key,
  114. unsigned int slot)
  115. {
  116. blk_crypto_evict_keyslot(slot);
  117. return 0;
  118. }
  119. /*
  120. * The crypto API fallback KSM ops - only used for a bio when it specifies a
  121. * blk_crypto_mode for which we failed to get a keyslot in the device's inline
  122. * encryption hardware (which probably means the device doesn't have inline
  123. * encryption hardware that supports that crypto mode).
  124. */
  125. static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = {
  126. .keyslot_program = blk_crypto_keyslot_program,
  127. .keyslot_evict = blk_crypto_keyslot_evict,
  128. };
  129. static void blk_crypto_encrypt_endio(struct bio *enc_bio)
  130. {
  131. struct bio *src_bio = enc_bio->bi_private;
  132. int i;
  133. for (i = 0; i < enc_bio->bi_vcnt; i++)
  134. mempool_free(enc_bio->bi_io_vec[i].bv_page,
  135. blk_crypto_bounce_page_pool);
  136. src_bio->bi_status = enc_bio->bi_status;
  137. bio_put(enc_bio);
  138. bio_endio(src_bio);
  139. }
  140. static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
  141. {
  142. struct bvec_iter iter;
  143. struct bio_vec bv;
  144. struct bio *bio;
  145. bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
  146. if (!bio)
  147. return NULL;
  148. bio->bi_disk = bio_src->bi_disk;
  149. bio->bi_opf = bio_src->bi_opf;
  150. bio->bi_ioprio = bio_src->bi_ioprio;
  151. bio->bi_write_hint = bio_src->bi_write_hint;
  152. bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
  153. bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
  154. bio_for_each_segment(bv, bio_src, iter)
  155. bio->bi_io_vec[bio->bi_vcnt++] = bv;
  156. if (bio_integrity(bio_src) &&
  157. bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) {
  158. bio_put(bio);
  159. return NULL;
  160. }
  161. bio_clone_blkcg_association(bio, bio_src);
  162. bio_clone_skip_dm_default_key(bio, bio_src);
  163. return bio;
  164. }
  165. static int blk_crypto_alloc_cipher_req(struct bio *src_bio,
  166. struct skcipher_request **ciph_req_ret,
  167. struct crypto_wait *wait)
  168. {
  169. struct skcipher_request *ciph_req;
  170. const struct blk_crypto_keyslot *slotp;
  171. slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot];
  172. ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
  173. GFP_NOIO);
  174. if (!ciph_req) {
  175. src_bio->bi_status = BLK_STS_RESOURCE;
  176. return -ENOMEM;
  177. }
  178. skcipher_request_set_callback(ciph_req,
  179. CRYPTO_TFM_REQ_MAY_BACKLOG |
  180. CRYPTO_TFM_REQ_MAY_SLEEP,
  181. crypto_req_done, wait);
  182. *ciph_req_ret = ciph_req;
  183. return 0;
  184. }
  185. static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
  186. {
  187. struct bio *bio = *bio_ptr;
  188. unsigned int i = 0;
  189. unsigned int num_sectors = 0;
  190. struct bio_vec bv;
  191. struct bvec_iter iter;
  192. bio_for_each_segment(bv, bio, iter) {
  193. num_sectors += bv.bv_len >> SECTOR_SHIFT;
  194. if (++i == BIO_MAX_PAGES)
  195. break;
  196. }
  197. if (num_sectors < bio_sectors(bio)) {
  198. struct bio *split_bio;
  199. split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
  200. if (!split_bio) {
  201. bio->bi_status = BLK_STS_RESOURCE;
  202. return -ENOMEM;
  203. }
  204. bio_chain(split_bio, bio);
  205. generic_make_request(bio);
  206. *bio_ptr = split_bio;
  207. }
  208. return 0;
  209. }
  210. union blk_crypto_iv {
  211. __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  212. u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
  213. };
  214. static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  215. union blk_crypto_iv *iv)
  216. {
  217. int i;
  218. for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
  219. iv->dun[i] = cpu_to_le64(dun[i]);
  220. }
  221. /*
  222. * The crypto API fallback's encryption routine.
  223. * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
  224. * and replace *bio_ptr with the bounce bio. May split input bio if it's too
  225. * large.
  226. */
  227. static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
  228. {
  229. struct bio *src_bio;
  230. struct skcipher_request *ciph_req = NULL;
  231. DECLARE_CRYPTO_WAIT(wait);
  232. u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  233. union blk_crypto_iv iv;
  234. struct scatterlist src, dst;
  235. struct bio *enc_bio;
  236. unsigned int i, j;
  237. int data_unit_size;
  238. struct bio_crypt_ctx *bc;
  239. int err = 0;
  240. /* Split the bio if it's too big for single page bvec */
  241. err = blk_crypto_split_bio_if_needed(bio_ptr);
  242. if (err)
  243. return err;
  244. src_bio = *bio_ptr;
  245. bc = src_bio->bi_crypt_context;
  246. data_unit_size = bc->bc_key->data_unit_size;
  247. /* Allocate bounce bio for encryption */
  248. enc_bio = blk_crypto_clone_bio(src_bio);
  249. if (!enc_bio) {
  250. src_bio->bi_status = BLK_STS_RESOURCE;
  251. return -ENOMEM;
  252. }
  253. /*
  254. * Use the crypto API fallback keyslot manager to get a crypto_skcipher
  255. * for the algorithm and key specified for this bio.
  256. */
  257. err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm);
  258. if (err) {
  259. src_bio->bi_status = BLK_STS_IOERR;
  260. goto out_put_enc_bio;
  261. }
  262. /* and then allocate an skcipher_request for it */
  263. err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait);
  264. if (err)
  265. goto out_release_keyslot;
  266. memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
  267. sg_init_table(&src, 1);
  268. sg_init_table(&dst, 1);
  269. skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
  270. iv.bytes);
  271. /* Encrypt each page in the bounce bio */
  272. for (i = 0; i < enc_bio->bi_vcnt; i++) {
  273. struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
  274. struct page *plaintext_page = enc_bvec->bv_page;
  275. struct page *ciphertext_page =
  276. mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
  277. enc_bvec->bv_page = ciphertext_page;
  278. if (!ciphertext_page) {
  279. src_bio->bi_status = BLK_STS_RESOURCE;
  280. err = -ENOMEM;
  281. goto out_free_bounce_pages;
  282. }
  283. sg_set_page(&src, plaintext_page, data_unit_size,
  284. enc_bvec->bv_offset);
  285. sg_set_page(&dst, ciphertext_page, data_unit_size,
  286. enc_bvec->bv_offset);
  287. /* Encrypt each data unit in this page */
  288. for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
  289. blk_crypto_dun_to_iv(curr_dun, &iv);
  290. err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
  291. &wait);
  292. if (err) {
  293. i++;
  294. src_bio->bi_status = BLK_STS_RESOURCE;
  295. goto out_free_bounce_pages;
  296. }
  297. bio_crypt_dun_increment(curr_dun, 1);
  298. src.offset += data_unit_size;
  299. dst.offset += data_unit_size;
  300. }
  301. }
  302. enc_bio->bi_private = src_bio;
  303. enc_bio->bi_end_io = blk_crypto_encrypt_endio;
  304. *bio_ptr = enc_bio;
  305. enc_bio = NULL;
  306. err = 0;
  307. goto out_free_ciph_req;
  308. out_free_bounce_pages:
  309. while (i > 0)
  310. mempool_free(enc_bio->bi_io_vec[--i].bv_page,
  311. blk_crypto_bounce_page_pool);
  312. out_free_ciph_req:
  313. skcipher_request_free(ciph_req);
  314. out_release_keyslot:
  315. bio_crypt_ctx_release_keyslot(bc);
  316. out_put_enc_bio:
  317. if (enc_bio)
  318. bio_put(enc_bio);
  319. return err;
  320. }
  321. static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio)
  322. {
  323. mempool_free(container_of(bio->bi_crypt_context,
  324. struct bio_fallback_crypt_ctx,
  325. crypt_ctx),
  326. bio_fallback_crypt_ctx_pool);
  327. bio->bi_crypt_context = NULL;
  328. }
  329. /*
  330. * The crypto API fallback's main decryption routine.
  331. * Decrypts input bio in place.
  332. */
  333. static void blk_crypto_decrypt_bio(struct work_struct *work)
  334. {
  335. struct blk_crypto_decrypt_work *decrypt_work =
  336. container_of(work, struct blk_crypto_decrypt_work, work);
  337. struct bio *bio = decrypt_work->bio;
  338. struct skcipher_request *ciph_req = NULL;
  339. DECLARE_CRYPTO_WAIT(wait);
  340. struct bio_vec bv;
  341. struct bvec_iter iter;
  342. u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  343. union blk_crypto_iv iv;
  344. struct scatterlist sg;
  345. struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  346. struct bio_fallback_crypt_ctx *f_ctx =
  347. container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx);
  348. const int data_unit_size = bc->bc_key->data_unit_size;
  349. unsigned int i;
  350. int err;
  351. /*
  352. * Use the crypto API fallback keyslot manager to get a crypto_skcipher
  353. * for the algorithm and key specified for this bio.
  354. */
  355. if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) {
  356. bio->bi_status = BLK_STS_RESOURCE;
  357. goto out_no_keyslot;
  358. }
  359. /* and then allocate an skcipher_request for it */
  360. err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait);
  361. if (err)
  362. goto out;
  363. memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun));
  364. sg_init_table(&sg, 1);
  365. skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
  366. iv.bytes);
  367. /* Decrypt each segment in the bio */
  368. __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
  369. struct page *page = bv.bv_page;
  370. sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
  371. /* Decrypt each data unit in the segment */
  372. for (i = 0; i < bv.bv_len; i += data_unit_size) {
  373. blk_crypto_dun_to_iv(curr_dun, &iv);
  374. if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
  375. &wait)) {
  376. bio->bi_status = BLK_STS_IOERR;
  377. goto out;
  378. }
  379. bio_crypt_dun_increment(curr_dun, 1);
  380. sg.offset += data_unit_size;
  381. }
  382. }
  383. out:
  384. skcipher_request_free(ciph_req);
  385. bio_crypt_ctx_release_keyslot(bc);
  386. out_no_keyslot:
  387. kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work);
  388. blk_crypto_free_fallback_crypt_ctx(bio);
  389. bio_endio(bio);
  390. }
  391. /*
  392. * Queue bio for decryption.
  393. * Returns true iff bio was queued for decryption.
  394. */
  395. bool blk_crypto_queue_decrypt_bio(struct bio *bio)
  396. {
  397. struct blk_crypto_decrypt_work *decrypt_work;
  398. /* If there was an IO error, don't queue for decrypt. */
  399. if (bio->bi_status)
  400. goto out;
  401. decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache,
  402. GFP_ATOMIC);
  403. if (!decrypt_work) {
  404. bio->bi_status = BLK_STS_RESOURCE;
  405. goto out;
  406. }
  407. INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio);
  408. decrypt_work->bio = bio;
  409. queue_work(blk_crypto_wq, &decrypt_work->work);
  410. return true;
  411. out:
  412. blk_crypto_free_fallback_crypt_ctx(bio);
  413. return false;
  414. }
  415. /*
  416. * Prepare blk-crypto-fallback for the specified crypto mode.
  417. * Returns -ENOPKG if the needed crypto API support is missing.
  418. */
  419. int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
  420. {
  421. const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
  422. struct blk_crypto_keyslot *slotp;
  423. unsigned int i;
  424. int err = 0;
  425. /*
  426. * Fast path
  427. * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
  428. * for each i are visible before we try to access them.
  429. */
  430. if (likely(smp_load_acquire(&tfms_inited[mode_num])))
  431. return 0;
  432. mutex_lock(&tfms_init_lock);
  433. if (likely(tfms_inited[mode_num]))
  434. goto out;
  435. for (i = 0; i < blk_crypto_num_keyslots; i++) {
  436. slotp = &blk_crypto_keyslots[i];
  437. slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
  438. if (IS_ERR(slotp->tfms[mode_num])) {
  439. err = PTR_ERR(slotp->tfms[mode_num]);
  440. if (err == -ENOENT) {
  441. pr_warn_once("Missing crypto API support for \"%s\"\n",
  442. cipher_str);
  443. err = -ENOPKG;
  444. }
  445. slotp->tfms[mode_num] = NULL;
  446. goto out_free_tfms;
  447. }
  448. crypto_skcipher_set_flags(slotp->tfms[mode_num],
  449. CRYPTO_TFM_REQ_WEAK_KEY);
  450. }
  451. /*
  452. * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
  453. * for each i are visible before we set tfms_inited[mode_num].
  454. */
  455. smp_store_release(&tfms_inited[mode_num], true);
  456. goto out;
  457. out_free_tfms:
  458. for (i = 0; i < blk_crypto_num_keyslots; i++) {
  459. slotp = &blk_crypto_keyslots[i];
  460. crypto_free_skcipher(slotp->tfms[mode_num]);
  461. slotp->tfms[mode_num] = NULL;
  462. }
  463. out:
  464. mutex_unlock(&tfms_init_lock);
  465. return err;
  466. }
  467. int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
  468. {
  469. return keyslot_manager_evict_key(blk_crypto_ksm, key);
  470. }
  471. int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
  472. {
  473. struct bio *bio = *bio_ptr;
  474. struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  475. struct bio_fallback_crypt_ctx *f_ctx;
  476. if (bc->bc_key->is_hw_wrapped) {
  477. pr_warn_once("HW wrapped key cannot be used with fallback.\n");
  478. bio->bi_status = BLK_STS_NOTSUPP;
  479. return -EOPNOTSUPP;
  480. }
  481. if (!tfms_inited[bc->bc_key->crypto_mode]) {
  482. bio->bi_status = BLK_STS_IOERR;
  483. return -EIO;
  484. }
  485. if (bio_data_dir(bio) == WRITE)
  486. return blk_crypto_encrypt_bio(bio_ptr);
  487. /*
  488. * Mark bio as fallback crypted and replace the bio_crypt_ctx with
  489. * another one contained in a bio_fallback_crypt_ctx, so that the
  490. * fallback has space to store the info it needs for decryption.
  491. */
  492. bc->bc_ksm = blk_crypto_ksm;
  493. f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
  494. f_ctx->crypt_ctx = *bc;
  495. memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun));
  496. f_ctx->crypt_iter = bio->bi_iter;
  497. bio_crypt_free_ctx(bio);
  498. bio->bi_crypt_context = &f_ctx->crypt_ctx;
  499. return 0;
  500. }
  501. int __init blk_crypto_fallback_init(void)
  502. {
  503. int i;
  504. unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
  505. prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
  506. /* All blk-crypto modes have a crypto API fallback. */
  507. for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
  508. crypto_mode_supported[i] = 0xFFFFFFFF;
  509. crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
  510. blk_crypto_ksm = keyslot_manager_create(
  511. NULL, blk_crypto_num_keyslots,
  512. &blk_crypto_ksm_ll_ops,
  513. BLK_CRYPTO_FEATURE_STANDARD_KEYS,
  514. crypto_mode_supported, NULL);
  515. if (!blk_crypto_ksm)
  516. return -ENOMEM;
  517. blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
  518. WQ_UNBOUND | WQ_HIGHPRI |
  519. WQ_MEM_RECLAIM, num_online_cpus());
  520. if (!blk_crypto_wq)
  521. return -ENOMEM;
  522. blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
  523. sizeof(blk_crypto_keyslots[0]),
  524. GFP_KERNEL);
  525. if (!blk_crypto_keyslots)
  526. return -ENOMEM;
  527. blk_crypto_bounce_page_pool =
  528. mempool_create_page_pool(num_prealloc_bounce_pg, 0);
  529. if (!blk_crypto_bounce_page_pool)
  530. return -ENOMEM;
  531. blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work,
  532. SLAB_RECLAIM_ACCOUNT);
  533. if (!blk_crypto_decrypt_work_cache)
  534. return -ENOMEM;
  535. bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
  536. if (!bio_fallback_crypt_ctx_cache)
  537. return -ENOMEM;
  538. bio_fallback_crypt_ctx_pool =
  539. mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
  540. bio_fallback_crypt_ctx_cache);
  541. if (!bio_fallback_crypt_ctx_pool)
  542. return -ENOMEM;
  543. return 0;
  544. }