dm-req-crypt.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959
  1. /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License version 2 and
  4. * only version 2 as published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. */
  11. #include <linux/completion.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/kernel.h>
  16. #include <linux/bio.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/mempool.h>
  19. #include <linux/slab.h>
  20. #include <linux/crypto.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/backing-dev.h>
  23. #include <linux/atomic.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/device-mapper.h>
  26. #include <linux/printk.h>
  27. #include <linux/pft.h>
  28. #include <crypto/scatterwalk.h>
  29. #include <asm/page.h>
  30. #include <asm/unaligned.h>
  31. #include <crypto/hash.h>
  32. #include <crypto/md5.h>
  33. #include <crypto/algapi.h>
  34. #include <mach/qcrypto.h>
  35. #define DM_MSG_PREFIX "req-crypt"
  36. #define MAX_SG_LIST 1024
  37. #define REQ_DM_512_KB (512*1024)
  38. #define MAX_ENCRYPTION_BUFFERS 1
  39. #define MIN_IOS 16
  40. #define MIN_POOL_PAGES 32
  41. #define KEY_SIZE_XTS 64
  42. #define AES_XTS_IV_LEN 16
  43. #define DM_REQ_CRYPT_ERROR -1
  44. #define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2
  45. struct req_crypt_result {
  46. struct completion completion;
  47. int err;
  48. };
  49. #define FDE_KEY_ID 0
  50. #define PFE_KEY_ID 1
  51. static struct dm_dev *dev;
  52. static struct kmem_cache *_req_crypt_io_pool;
  53. static sector_t start_sector_orig;
  54. static struct workqueue_struct *req_crypt_queue;
  55. static mempool_t *req_io_pool;
  56. static mempool_t *req_page_pool;
  57. static bool is_fde_enabled;
  58. static struct crypto_ablkcipher *tfm;
  59. struct req_dm_crypt_io {
  60. struct work_struct work;
  61. struct request *cloned_request;
  62. int error;
  63. atomic_t pending;
  64. struct timespec start_time;
  65. bool should_encrypt;
  66. bool should_decrypt;
  67. u32 key_id;
  68. };
  69. static void req_crypt_cipher_complete
  70. (struct crypto_async_request *req, int err);
  71. static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
  72. {
  73. int ret;
  74. bool should_encrypt = false;
  75. struct bio *bio = NULL;
  76. u32 key_id = 0;
  77. bool is_encrypted = false;
  78. bool is_inplace = false;
  79. if (!req || !req->cloned_request || !req->cloned_request->bio)
  80. return false;
  81. bio = req->cloned_request->bio;
  82. ret = pft_get_key_index(bio, &key_id, &is_encrypted, &is_inplace);
  83. /* req->key_id = key_id; @todo support more than 1 pfe key */
  84. if ((ret == 0) && (is_encrypted || is_inplace)) {
  85. should_encrypt = true;
  86. req->key_id = PFE_KEY_ID;
  87. } else if (is_fde_enabled) {
  88. should_encrypt = true;
  89. req->key_id = FDE_KEY_ID;
  90. }
  91. return should_encrypt;
  92. }
  93. static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
  94. {
  95. int ret;
  96. bool should_deccrypt = false;
  97. struct bio *bio = NULL;
  98. u32 key_id = 0;
  99. bool is_encrypted = false;
  100. bool is_inplace = false;
  101. if (!req || !req->cloned_request || !req->cloned_request->bio)
  102. return false;
  103. bio = req->cloned_request->bio;
  104. ret = pft_get_key_index(bio, &key_id, &is_encrypted, &is_inplace);
  105. /* req->key_id = key_id; @todo support more than 1 pfe key */
  106. if ((ret == 0) && (is_encrypted && !is_inplace)) {
  107. should_deccrypt = true;
  108. req->key_id = PFE_KEY_ID;
  109. } else if (is_fde_enabled) {
  110. should_deccrypt = true;
  111. req->key_id = FDE_KEY_ID;
  112. }
  113. return should_deccrypt;
  114. }
  115. static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
  116. {
  117. atomic_inc(&io->pending);
  118. }
  119. static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
  120. {
  121. int error = 0;
  122. struct request *clone = NULL;
  123. if (io) {
  124. error = io->error;
  125. if (io->cloned_request) {
  126. clone = io->cloned_request;
  127. } else {
  128. DMERR("%s io->cloned_request is NULL\n",
  129. __func__);
  130. /*
  131. * If Clone is NULL we cannot do anything,
  132. * this should never happen
  133. */
  134. BUG();
  135. }
  136. } else {
  137. DMERR("%s io is NULL\n", __func__);
  138. /*
  139. * If Clone is NULL we cannot do anything,
  140. * this should never happen
  141. */
  142. BUG();
  143. }
  144. atomic_dec(&io->pending);
  145. if (error < 0) {
  146. dm_kill_unmapped_request(clone, error);
  147. mempool_free(io, req_io_pool);
  148. } else
  149. dm_dispatch_request(clone);
  150. }
  151. static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
  152. {
  153. int error = 0;
  154. struct request *clone = NULL;
  155. if (io) {
  156. error = io->error;
  157. if (io->cloned_request) {
  158. clone = io->cloned_request;
  159. } else {
  160. DMERR("%s io->cloned_request is NULL\n",
  161. __func__);
  162. /*
  163. * If Clone is NULL we cannot do anything,
  164. * this should never happen
  165. */
  166. BUG();
  167. }
  168. } else {
  169. DMERR("%s io is NULL\n",
  170. __func__);
  171. /*
  172. * If Clone is NULL we cannot do anything,
  173. * this should never happen
  174. */
  175. BUG();
  176. }
  177. /* Should never get here if io or Clone is NULL */
  178. dm_end_request(clone, error);
  179. atomic_dec(&io->pending);
  180. mempool_free(io, req_io_pool);
  181. }
  182. /*
  183. * The callback that will be called by the worker queue to perform Decryption
  184. * for reads and use the dm function to complete the bios and requests.
  185. */
  186. static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
  187. {
  188. struct request *clone = NULL;
  189. int error = 0;
  190. int total_sg_len = 0, rc = 0, total_bytes_in_req = 0;
  191. struct ablkcipher_request *req = NULL;
  192. struct req_crypt_result result;
  193. struct scatterlist *req_sg_read = NULL;
  194. int err = 0;
  195. u8 IV[AES_XTS_IV_LEN];
  196. if (io) {
  197. error = io->error;
  198. if (io->cloned_request) {
  199. clone = io->cloned_request;
  200. } else {
  201. DMERR("%s io->cloned_request is NULL\n",
  202. __func__);
  203. error = DM_REQ_CRYPT_ERROR;
  204. goto submit_request;
  205. }
  206. } else {
  207. DMERR("%s io is NULL\n",
  208. __func__);
  209. error = DM_REQ_CRYPT_ERROR;
  210. goto submit_request;
  211. }
  212. req_crypt_inc_pending(io);
  213. if (error != 0) {
  214. err = error;
  215. goto submit_request;
  216. }
  217. req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
  218. if (!req) {
  219. DMERR("%s ablkcipher request allocation failed\n", __func__);
  220. err = DM_REQ_CRYPT_ERROR;
  221. goto ablkcipher_req_alloc_failure;
  222. }
  223. ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  224. req_crypt_cipher_complete, &result);
  225. init_completion(&result.completion);
  226. err = qcrypto_cipher_set_device(req, io->key_id);
  227. if (err != 0) {
  228. DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
  229. __func__, err);
  230. error = DM_REQ_CRYPT_ERROR;
  231. goto ablkcipher_req_alloc_failure;
  232. }
  233. qcrypto_cipher_set_flag(req,
  234. QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
  235. crypto_ablkcipher_clear_flags(tfm, ~0);
  236. crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
  237. req_sg_read = kzalloc(sizeof(struct scatterlist) *
  238. MAX_SG_LIST, GFP_KERNEL);
  239. if (!req_sg_read) {
  240. DMERR("%s req_sg_read allocation failed\n",
  241. __func__);
  242. err = DM_REQ_CRYPT_ERROR;
  243. goto ablkcipher_req_alloc_failure;
  244. }
  245. total_sg_len = blk_rq_map_sg(clone->q, clone, req_sg_read);
  246. if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
  247. DMERR("%s Request Error%d", __func__, total_sg_len);
  248. err = DM_REQ_CRYPT_ERROR;
  249. goto ablkcipher_req_alloc_failure;
  250. }
  251. total_bytes_in_req = clone->__data_len;
  252. if (total_bytes_in_req > REQ_DM_512_KB) {
  253. DMERR("%s total_bytes_in_req > 512 MB %d",
  254. __func__, total_bytes_in_req);
  255. err = DM_REQ_CRYPT_ERROR;
  256. goto ablkcipher_req_alloc_failure;
  257. }
  258. memset(IV, 0, AES_XTS_IV_LEN);
  259. memcpy(IV, &clone->__sector, sizeof(sector_t));
  260. ablkcipher_request_set_crypt(req, req_sg_read, req_sg_read,
  261. total_bytes_in_req, (void *) IV);
  262. rc = crypto_ablkcipher_decrypt(req);
  263. switch (rc) {
  264. case 0:
  265. break;
  266. case -EBUSY:
  267. /*
  268. * Lets make this synchronous request by waiting on
  269. * in progress as well
  270. */
  271. case -EINPROGRESS:
  272. wait_for_completion_io(&result.completion);
  273. if (result.err) {
  274. DMERR("%s error = %d encrypting the request\n",
  275. __func__, result.err);
  276. err = DM_REQ_CRYPT_ERROR;
  277. }
  278. break;
  279. default:
  280. err = DM_REQ_CRYPT_ERROR;
  281. break;
  282. }
  283. ablkcipher_req_alloc_failure:
  284. if (req)
  285. ablkcipher_request_free(req);
  286. kfree(req_sg_read);
  287. submit_request:
  288. if (io)
  289. io->error = err;
  290. req_crypt_dec_pending_decrypt(io);
  291. }
  292. /*
  293. * This callback is called by the worker queue to perform non-decrypt reads
  294. * and use the dm function to complete the bios and requests.
  295. */
  296. static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
  297. {
  298. struct request *clone = NULL;
  299. int error = 0;
  300. if (!io || !io->cloned_request) {
  301. DMERR("%s io is invalid\n", __func__);
  302. BUG(); /* should not happen */
  303. }
  304. clone = io->cloned_request;
  305. dm_end_request(clone, error);
  306. mempool_free(io, req_io_pool);
  307. }
  308. /*
  309. * The callback that will be called by the worker queue to perform Encryption
  310. * for writes and submit the request using the elevelator.
  311. */
  312. static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
  313. {
  314. struct request *clone = NULL;
  315. struct bio *bio_src = NULL;
  316. unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
  317. total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
  318. struct req_iterator iter = {0, NULL};
  319. struct req_iterator iter1 = {0, NULL};
  320. struct ablkcipher_request *req = NULL;
  321. struct req_crypt_result result;
  322. struct bio_vec *bvec = NULL;
  323. struct scatterlist *req_sg_in = NULL;
  324. struct scatterlist *req_sg_out = NULL;
  325. int copy_bio_sector_to_req = 0;
  326. gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
  327. struct page *page = NULL;
  328. u8 IV[AES_XTS_IV_LEN];
  329. int remaining_size = 0;
  330. int err = 0;
  331. if (io) {
  332. if (io->cloned_request) {
  333. clone = io->cloned_request;
  334. } else {
  335. DMERR("%s io->cloned_request is NULL\n",
  336. __func__);
  337. error = DM_REQ_CRYPT_ERROR;
  338. goto submit_request;
  339. }
  340. } else {
  341. DMERR("%s io is NULL\n",
  342. __func__);
  343. error = DM_REQ_CRYPT_ERROR;
  344. goto submit_request;
  345. }
  346. req_crypt_inc_pending(io);
  347. req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
  348. if (!req) {
  349. DMERR("%s ablkcipher request allocation failed\n",
  350. __func__);
  351. error = DM_REQ_CRYPT_ERROR;
  352. goto ablkcipher_req_alloc_failure;
  353. }
  354. ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  355. req_crypt_cipher_complete, &result);
  356. init_completion(&result.completion);
  357. err = qcrypto_cipher_set_device(req, io->key_id);
  358. if (err != 0) {
  359. DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
  360. __func__, err);
  361. error = DM_REQ_CRYPT_ERROR;
  362. goto ablkcipher_req_alloc_failure;
  363. }
  364. qcrypto_cipher_set_flag(req,
  365. QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
  366. crypto_ablkcipher_clear_flags(tfm, ~0);
  367. crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
  368. req_sg_in = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
  369. GFP_KERNEL);
  370. if (!req_sg_in) {
  371. DMERR("%s req_sg_in allocation failed\n",
  372. __func__);
  373. error = DM_REQ_CRYPT_ERROR;
  374. goto ablkcipher_req_alloc_failure;
  375. }
  376. req_sg_out = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
  377. GFP_KERNEL);
  378. if (!req_sg_out) {
  379. DMERR("%s req_sg_out allocation failed\n",
  380. __func__);
  381. error = DM_REQ_CRYPT_ERROR;
  382. goto ablkcipher_req_alloc_failure;
  383. }
  384. total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
  385. if ((total_sg_len_req_in <= 0) ||
  386. (total_sg_len_req_in > MAX_SG_LIST)) {
  387. DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
  388. error = DM_REQ_CRYPT_ERROR;
  389. goto ablkcipher_req_alloc_failure;
  390. }
  391. total_bytes_in_req = clone->__data_len;
  392. if (total_bytes_in_req > REQ_DM_512_KB) {
  393. DMERR("%s total_bytes_in_req > 512 MB %d",
  394. __func__, total_bytes_in_req);
  395. error = DM_REQ_CRYPT_ERROR;
  396. goto ablkcipher_req_alloc_failure;
  397. }
  398. rq_for_each_segment(bvec, clone, iter) {
  399. if (bvec->bv_len > remaining_size) {
  400. page = NULL;
  401. while (page == NULL) {
  402. page = mempool_alloc(req_page_pool, gfp_mask);
  403. if (!page) {
  404. DMERR("%s Crypt page alloc failed",
  405. __func__);
  406. congestion_wait(BLK_RW_ASYNC, HZ/100);
  407. }
  408. }
  409. bvec->bv_page = page;
  410. bvec->bv_offset = 0;
  411. remaining_size = PAGE_SIZE - bvec->bv_len;
  412. if (remaining_size < 0)
  413. BUG();
  414. } else {
  415. bvec->bv_page = page;
  416. bvec->bv_offset = PAGE_SIZE - remaining_size;
  417. remaining_size = remaining_size - bvec->bv_len;
  418. }
  419. }
  420. total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
  421. if ((total_sg_len_req_out <= 0) ||
  422. (total_sg_len_req_out > MAX_SG_LIST)) {
  423. DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
  424. error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
  425. goto ablkcipher_req_alloc_failure;
  426. }
  427. memset(IV, 0, AES_XTS_IV_LEN);
  428. memcpy(IV, &clone->__sector, sizeof(sector_t));
  429. ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out,
  430. total_bytes_in_req, (void *) IV);
  431. rc = crypto_ablkcipher_encrypt(req);
  432. switch (rc) {
  433. case 0:
  434. break;
  435. case -EBUSY:
  436. /*
  437. * Lets make this synchronous request by waiting on
  438. * in progress as well
  439. */
  440. case -EINPROGRESS:
  441. wait_for_completion_interruptible(&result.completion);
  442. if (result.err) {
  443. DMERR("%s error = %d encrypting the request\n",
  444. __func__, result.err);
  445. error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
  446. goto ablkcipher_req_alloc_failure;
  447. }
  448. break;
  449. default:
  450. error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
  451. goto ablkcipher_req_alloc_failure;
  452. }
  453. __rq_for_each_bio(bio_src, clone) {
  454. if (copy_bio_sector_to_req == 0) {
  455. clone->buffer = bio_data(bio_src);
  456. copy_bio_sector_to_req++;
  457. }
  458. blk_queue_bounce(clone->q, &bio_src);
  459. }
  460. ablkcipher_req_alloc_failure:
  461. if (req)
  462. ablkcipher_request_free(req);
  463. if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
  464. bvec = NULL;
  465. rq_for_each_segment(bvec, clone, iter1) {
  466. if (bvec->bv_offset == 0) {
  467. mempool_free(bvec->bv_page, req_page_pool);
  468. bvec->bv_page = NULL;
  469. } else
  470. bvec->bv_page = NULL;
  471. }
  472. }
  473. kfree(req_sg_in);
  474. kfree(req_sg_out);
  475. submit_request:
  476. if (io)
  477. io->error = error;
  478. req_crypt_dec_pending_encrypt(io);
  479. }
  480. /*
  481. * This callback is called by the worker queue to perform non-encrypted writes
  482. * and submit the request using the elevelator.
  483. */
  484. static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
  485. {
  486. struct request *clone = NULL;
  487. if (!io || !io->cloned_request) {
  488. DMERR("%s io is invalid\n", __func__);
  489. BUG(); /* should not happen */
  490. }
  491. clone = io->cloned_request;
  492. io->error = 0;
  493. dm_dispatch_request(clone);
  494. }
  495. /* Queue callback function that will get triggered */
  496. static void req_cryptd_crypt(struct work_struct *work)
  497. {
  498. struct req_dm_crypt_io *io =
  499. container_of(work, struct req_dm_crypt_io, work);
  500. if (rq_data_dir(io->cloned_request) == WRITE) {
  501. if (io->should_encrypt)
  502. req_cryptd_crypt_write_convert(io);
  503. else
  504. req_cryptd_crypt_write_plain(io);
  505. } else if (rq_data_dir(io->cloned_request) == READ) {
  506. if (io->should_decrypt)
  507. req_cryptd_crypt_read_convert(io);
  508. else
  509. req_cryptd_crypt_read_plain(io);
  510. } else {
  511. DMERR("%s received non-write request for Clone %u\n",
  512. __func__, (unsigned int)io->cloned_request);
  513. }
  514. }
  515. static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
  516. {
  517. INIT_WORK(&io->work, req_cryptd_crypt);
  518. queue_work(req_crypt_queue, &io->work);
  519. }
  520. /*
  521. * Cipher complete callback, this is triggered by the Linux crypto api once
  522. * the operation is done. This signals the waiting thread that the crypto
  523. * operation is complete.
  524. */
  525. static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
  526. {
  527. struct req_crypt_result *res = req->data;
  528. if (err == -EINPROGRESS)
  529. return;
  530. res->err = err;
  531. complete(&res->completion);
  532. }
  533. /*
  534. * If bio->bi_dev is a partition, remap the location
  535. */
  536. static inline void req_crypt_blk_partition_remap(struct bio *bio)
  537. {
  538. struct block_device *bdev = bio->bi_bdev;
  539. if (bio_sectors(bio) && bdev != bdev->bd_contains) {
  540. struct hd_struct *p = bdev->bd_part;
  541. /*
  542. * Check for integer overflow, should never happen.
  543. */
  544. if (p->start_sect > (UINT_MAX - bio->bi_sector))
  545. BUG();
  546. bio->bi_sector += p->start_sect;
  547. bio->bi_bdev = bdev->bd_contains;
  548. }
  549. }
  550. /*
  551. * The endio function is called from ksoftirqd context (atomic).
  552. * For write operations the new pages created form the mempool
  553. * is freed and returned. * For read operations, decryption is
  554. * required, since this is called in a atomic * context, the
  555. * request is sent to a worker queue to complete decryptiona and
  556. * free the request once done.
  557. */
  558. static int req_crypt_endio(struct dm_target *ti, struct request *clone,
  559. int error, union map_info *map_context)
  560. {
  561. int err = 0;
  562. struct req_iterator iter1;
  563. struct bio_vec *bvec = NULL;
  564. struct req_dm_crypt_io *req_io = map_context->ptr;
  565. /* If it is a write request, do nothing just return. */
  566. bvec = NULL;
  567. if (rq_data_dir(clone) == WRITE) {
  568. rq_for_each_segment(bvec, clone, iter1) {
  569. if (req_io->should_encrypt && bvec->bv_offset == 0) {
  570. mempool_free(bvec->bv_page, req_page_pool);
  571. bvec->bv_page = NULL;
  572. } else
  573. bvec->bv_page = NULL;
  574. }
  575. mempool_free(req_io, req_io_pool);
  576. goto submit_request;
  577. } else if (rq_data_dir(clone) == READ) {
  578. req_io->error = error;
  579. req_cryptd_queue_crypt(req_io);
  580. err = DM_ENDIO_INCOMPLETE;
  581. goto submit_request;
  582. }
  583. submit_request:
  584. return err;
  585. }
  586. /*
  587. * This function is called with interrupts disabled
  588. * The function remaps the clone for the underlying device.
  589. * If it is a write request, it calls into the worker queue to
  590. * encrypt the data
  591. * and submit the request directly using the elevator
  592. * For a read request no pre-processing is required the request
  593. * is returned to dm once mapping is done
  594. */
  595. static int req_crypt_map(struct dm_target *ti, struct request *clone,
  596. union map_info *map_context)
  597. {
  598. struct req_dm_crypt_io *req_io = NULL;
  599. int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0;
  600. struct bio *bio_src = NULL;
  601. if ((rq_data_dir(clone) != READ) &&
  602. (rq_data_dir(clone) != WRITE)) {
  603. error = DM_REQ_CRYPT_ERROR;
  604. DMERR("%s Unknown request\n", __func__);
  605. goto submit_request;
  606. }
  607. req_io = mempool_alloc(req_io_pool, GFP_NOWAIT);
  608. if (!req_io) {
  609. DMERR("%s req_io allocation failed\n", __func__);
  610. error = DM_REQ_CRYPT_ERROR;
  611. goto submit_request;
  612. }
  613. /* Save the clone in the req_io, the callback to the worker
  614. * queue will get the req_io
  615. */
  616. req_io->cloned_request = clone;
  617. map_context->ptr = req_io;
  618. atomic_set(&req_io->pending, 0);
  619. if (rq_data_dir(clone) == WRITE)
  620. req_io->should_encrypt = req_crypt_should_encrypt(req_io);
  621. if (rq_data_dir(clone) == READ)
  622. req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
  623. /* Get the queue of the underlying original device */
  624. clone->q = bdev_get_queue(dev->bdev);
  625. clone->rq_disk = dev->bdev->bd_disk;
  626. __rq_for_each_bio(bio_src, clone) {
  627. bio_src->bi_bdev = dev->bdev;
  628. /* Currently the way req-dm works is that once the underlying
  629. * device driver completes the request by calling into the
  630. * block layer. The block layer completes the bios (clones) and
  631. * then the cloned request. This is undesirable for req-dm-crypt
  632. * hence added a flag BIO_DONTFREE, this flag will ensure that
  633. * blk layer does not complete the cloned bios before completing
  634. * the request. When the crypt endio is called, post-processsing
  635. * is done and then the dm layer will complete the bios (clones)
  636. * and free them.
  637. */
  638. bio_src->bi_flags |= 1 << BIO_DONTFREE;
  639. /*
  640. * If this device has partitions, remap block n
  641. * of partition p to block n+start(p) of the disk.
  642. */
  643. req_crypt_blk_partition_remap(bio_src);
  644. if (copy_bio_sector_to_req == 0) {
  645. clone->__sector = bio_src->bi_sector;
  646. clone->buffer = bio_data(bio_src);
  647. copy_bio_sector_to_req++;
  648. }
  649. blk_queue_bounce(clone->q, &bio_src);
  650. }
  651. if (rq_data_dir(clone) == READ) {
  652. error = DM_MAPIO_REMAPPED;
  653. goto submit_request;
  654. } else if (rq_data_dir(clone) == WRITE) {
  655. req_cryptd_queue_crypt(req_io);
  656. error = DM_MAPIO_SUBMITTED;
  657. goto submit_request;
  658. }
  659. submit_request:
  660. return error;
  661. }
  662. static void req_crypt_dtr(struct dm_target *ti)
  663. {
  664. DMDEBUG("dm-req-crypt Destructor.\n");
  665. if (req_crypt_queue) {
  666. destroy_workqueue(req_crypt_queue);
  667. req_crypt_queue = NULL;
  668. }
  669. if (req_io_pool) {
  670. mempool_destroy(req_io_pool);
  671. req_io_pool = NULL;
  672. }
  673. if (req_page_pool) {
  674. mempool_destroy(req_page_pool);
  675. req_page_pool = NULL;
  676. }
  677. if (tfm) {
  678. crypto_free_ablkcipher(tfm);
  679. tfm = NULL;
  680. }
  681. }
  682. /*
  683. * Construct an encryption mapping:
  684. * <cipher> <key> <iv_offset> <dev_path> <start>
  685. */
  686. static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  687. {
  688. unsigned long long tmpll;
  689. char dummy;
  690. int err = DM_REQ_CRYPT_ERROR;
  691. DMDEBUG("dm-req-crypt Constructor.\n");
  692. if (argc < 5) {
  693. DMERR(" %s Not enough args\n", __func__);
  694. err = DM_REQ_CRYPT_ERROR;
  695. goto ctr_exit;
  696. }
  697. if (argv[3]) {
  698. if (dm_get_device(ti, argv[3],
  699. dm_table_get_mode(ti->table), &dev)) {
  700. DMERR(" %s Device Lookup failed\n", __func__);
  701. err = DM_REQ_CRYPT_ERROR;
  702. goto ctr_exit;
  703. }
  704. } else {
  705. DMERR(" %s Arg[3] invalid\n", __func__);
  706. err = DM_REQ_CRYPT_ERROR;
  707. goto ctr_exit;
  708. }
  709. if (argv[4]) {
  710. if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
  711. DMERR("%s Invalid device sector\n", __func__);
  712. err = DM_REQ_CRYPT_ERROR;
  713. goto ctr_exit;
  714. }
  715. } else {
  716. DMERR(" %s Arg[4] invalid\n", __func__);
  717. err = DM_REQ_CRYPT_ERROR;
  718. goto ctr_exit;
  719. }
  720. start_sector_orig = tmpll;
  721. /* Allow backward compatible */
  722. if (argc >= 6) {
  723. if (argv[5]) {
  724. if (!strcmp(argv[5], "fde_enabled"))
  725. is_fde_enabled = true;
  726. else
  727. is_fde_enabled = false;
  728. } else {
  729. DMERR(" %s Arg[5] invalid\n", __func__);
  730. err = DM_REQ_CRYPT_ERROR;
  731. goto ctr_exit;
  732. }
  733. } else {
  734. DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__);
  735. is_fde_enabled = true; /* backward compatible */
  736. }
  737. DMDEBUG("%s is_fde_enabled=%d\n", __func__, is_fde_enabled);
  738. req_crypt_queue = alloc_workqueue("req_cryptd",
  739. WQ_NON_REENTRANT |
  740. WQ_HIGHPRI |
  741. WQ_CPU_INTENSIVE|
  742. WQ_MEM_RECLAIM,
  743. 1);
  744. if (!req_crypt_queue) {
  745. DMERR("%s req_crypt_queue not allocated\n", __func__);
  746. err = DM_REQ_CRYPT_ERROR;
  747. goto ctr_exit;
  748. }
  749. /* Allocate the crypto alloc blk cipher and keep the handle */
  750. tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0);
  751. if (IS_ERR(tfm)) {
  752. DMERR("%s ablkcipher tfm allocation failed : error\n",
  753. __func__);
  754. err = DM_REQ_CRYPT_ERROR;
  755. goto ctr_exit;
  756. }
  757. req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
  758. BUG_ON(!req_io_pool);
  759. if (!req_io_pool) {
  760. DMERR("%s req_io_pool not allocated\n", __func__);
  761. err = DM_REQ_CRYPT_ERROR;
  762. goto ctr_exit;
  763. }
  764. req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
  765. if (!req_page_pool) {
  766. DMERR("%s req_page_pool not allocated\n", __func__);
  767. err = DM_REQ_CRYPT_ERROR;
  768. goto ctr_exit;
  769. }
  770. err = 0;
  771. ctr_exit:
  772. if (err != 0) {
  773. if (req_crypt_queue) {
  774. destroy_workqueue(req_crypt_queue);
  775. req_crypt_queue = NULL;
  776. }
  777. if (req_io_pool) {
  778. mempool_destroy(req_io_pool);
  779. req_io_pool = NULL;
  780. }
  781. if (req_page_pool) {
  782. mempool_destroy(req_page_pool);
  783. req_page_pool = NULL;
  784. }
  785. if (tfm) {
  786. crypto_free_ablkcipher(tfm);
  787. tfm = NULL;
  788. }
  789. }
  790. return err;
  791. }
  792. static int req_crypt_iterate_devices(struct dm_target *ti,
  793. iterate_devices_callout_fn fn, void *data)
  794. {
  795. return fn(ti, dev, start_sector_orig, ti->len, data);
  796. }
  797. static struct target_type req_crypt_target = {
  798. .name = "req-crypt",
  799. .version = {1, 0, 0},
  800. .module = THIS_MODULE,
  801. .ctr = req_crypt_ctr,
  802. .dtr = req_crypt_dtr,
  803. .map_rq = req_crypt_map,
  804. .rq_end_io = req_crypt_endio,
  805. .iterate_devices = req_crypt_iterate_devices,
  806. };
  807. static int __init req_dm_crypt_init(void)
  808. {
  809. int r;
  810. _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
  811. if (!_req_crypt_io_pool)
  812. return -ENOMEM;
  813. r = dm_register_target(&req_crypt_target);
  814. if (r < 0) {
  815. DMERR("register failed %d", r);
  816. kmem_cache_destroy(_req_crypt_io_pool);
  817. }
  818. DMINFO("dm-req-crypt successfully initalized.\n");
  819. return r;
  820. }
  821. static void __exit req_dm_crypt_exit(void)
  822. {
  823. kmem_cache_destroy(_req_crypt_io_pool);
  824. dm_unregister_target(&req_crypt_target);
  825. }
  826. module_init(req_dm_crypt_init);
  827. module_exit(req_dm_crypt_exit);
  828. MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
  829. MODULE_LICENSE("GPL v2");