ctr_drbg.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/device.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/hw_random.h>
  19. #include <linux/clk.h>
  20. #include <linux/slab.h>
  21. #include <linux/io.h>
  22. #include <linux/err.h>
  23. #include <linux/types.h>
  24. #include <mach/msm_bus.h>
  25. #include <linux/qrng.h>
  26. #include <linux/fs.h>
  27. #include <linux/cdev.h>
  28. #include <linux/errno.h>
  29. #include <linux/crypto.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/gfp.h>
  33. #include <linux/string.h>
  34. #include <linux/platform_data/qcom_crypto_device.h>
  35. #include "ctr_drbg.h"
  36. #include "fips_drbg.h"
  37. #define E_FAILURE 0Xffff
  38. #define E_SUCCESS 0
  39. #define AES128_KEY_SIZE (16)
  40. #define AES128_BLOCK_SIZE (16)
  41. #define AES_TEXT_LENGTH (64)
  42. #define MAX_TEXT_LENGTH (2048)
  43. uint8_t df_initial_k[16] = "\x0\x1\x2\x3\x4\x5\x6\x7\x8\x9\xa\xb\xc\xd\xe\xf";
  44. static void _crypto_cipher_test_complete(struct crypto_async_request *req,
  45. int err)
  46. {
  47. struct msm_ctr_tcrypt_result_s *res = NULL;
  48. if (!req)
  49. return;
  50. res = req->data;
  51. if (!res)
  52. return;
  53. if (err == -EINPROGRESS)
  54. return;
  55. res->err = err;
  56. complete(&res->completion);
  57. }
  58. static int ctr_aes_init(struct ctr_drbg_ctx_s *ctx)
  59. {
  60. int status = 0;
  61. ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0);
  62. if (IS_ERR(ctx->aes_ctx.tfm) || (NULL == ctx->aes_ctx.tfm)) {
  63. pr_info("%s: qcom-ecb(aes) failed", __func__);
  64. ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0);
  65. pr_info("ctx->aes_ctx.tfm = %p\n", ctx->aes_ctx.tfm);
  66. if (IS_ERR(ctx->aes_ctx.tfm) || (NULL == ctx->aes_ctx.tfm)) {
  67. pr_err("%s: qcom-ecb(aes) failed\n", __func__);
  68. status = -E_FAILURE;
  69. goto out;
  70. }
  71. }
  72. ctx->aes_ctx.req = ablkcipher_request_alloc(ctx->aes_ctx.tfm,
  73. GFP_KERNEL);
  74. if (IS_ERR(ctx->aes_ctx.req) || (NULL == ctx->aes_ctx.req)) {
  75. pr_info("%s: Failed to allocate request.\n", __func__);
  76. status = -E_FAILURE;
  77. goto clr_tfm;
  78. }
  79. ablkcipher_request_set_callback(ctx->aes_ctx.req,
  80. CRYPTO_TFM_REQ_MAY_BACKLOG,
  81. _crypto_cipher_test_complete,
  82. &ctx->aes_ctx.result);
  83. memset(&ctx->aes_ctx.input, 0, sizeof(struct msm_ctr_buffer_s));
  84. memset(&ctx->aes_ctx.output, 0, sizeof(struct msm_ctr_buffer_s));
  85. /* Allocate memory. */
  86. ctx->aes_ctx.input.virt_addr = kmalloc(AES128_BLOCK_SIZE,
  87. GFP_KERNEL | __GFP_DMA);
  88. if (NULL == ctx->aes_ctx.input.virt_addr) {
  89. pr_debug("%s: Failed to input memory.\n", __func__);
  90. status = -E_FAILURE;
  91. goto clr_req;
  92. }
  93. ctx->aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE,
  94. GFP_KERNEL | __GFP_DMA);
  95. if (NULL == ctx->aes_ctx.output.virt_addr) {
  96. pr_debug("%s: Failed to output memory.\n", __func__);
  97. status = -E_FAILURE;
  98. goto clr_input;
  99. }
  100. /*--------------------------------------------------------------------
  101. Set DF AES mode
  102. ----------------------------------------------------------------------*/
  103. ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0);
  104. if ((NULL == ctx->df_aes_ctx.tfm) || IS_ERR(ctx->df_aes_ctx.tfm)) {
  105. pr_info("%s: qcom-ecb(aes) failed", __func__);
  106. ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0);
  107. if (IS_ERR(ctx->df_aes_ctx.tfm) ||
  108. (NULL == ctx->df_aes_ctx.tfm)) {
  109. pr_err("%s: ecb(aes) failed", __func__);
  110. status = -E_FAILURE;
  111. goto clr_output;
  112. }
  113. }
  114. ctx->df_aes_ctx.req = ablkcipher_request_alloc(ctx->df_aes_ctx.tfm,
  115. GFP_KERNEL);
  116. if (IS_ERR(ctx->df_aes_ctx.req) || (NULL == ctx->df_aes_ctx.req)) {
  117. pr_debug(": Failed to allocate request.\n");
  118. status = -E_FAILURE;
  119. goto clr_df_tfm;
  120. }
  121. ablkcipher_request_set_callback(ctx->df_aes_ctx.req,
  122. CRYPTO_TFM_REQ_MAY_BACKLOG,
  123. _crypto_cipher_test_complete,
  124. &ctx->df_aes_ctx.result);
  125. memset(&ctx->df_aes_ctx.input, 0, sizeof(struct msm_ctr_buffer_s));
  126. memset(&ctx->df_aes_ctx.output, 0, sizeof(struct msm_ctr_buffer_s));
  127. ctx->df_aes_ctx.input.virt_addr = kmalloc(AES128_BLOCK_SIZE,
  128. GFP_KERNEL | __GFP_DMA);
  129. if (NULL == ctx->df_aes_ctx.input.virt_addr) {
  130. pr_debug(": Failed to input memory.\n");
  131. status = -E_FAILURE;
  132. goto clr_df_req;
  133. }
  134. ctx->df_aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE,
  135. GFP_KERNEL | __GFP_DMA);
  136. if (NULL == ctx->df_aes_ctx.output.virt_addr) {
  137. pr_debug(": Failed to output memory.\n");
  138. status = -E_FAILURE;
  139. goto clr_df_input;
  140. }
  141. goto out;
  142. clr_df_input:
  143. if (ctx->df_aes_ctx.input.virt_addr) {
  144. kzfree(ctx->df_aes_ctx.input.virt_addr);
  145. ctx->df_aes_ctx.input.virt_addr = NULL;
  146. }
  147. clr_df_req:
  148. if (ctx->df_aes_ctx.req) {
  149. ablkcipher_request_free(ctx->df_aes_ctx.req);
  150. ctx->df_aes_ctx.req = NULL;
  151. }
  152. clr_df_tfm:
  153. if (ctx->df_aes_ctx.tfm) {
  154. crypto_free_ablkcipher(ctx->df_aes_ctx.tfm);
  155. ctx->df_aes_ctx.tfm = NULL;
  156. }
  157. clr_output:
  158. if (ctx->aes_ctx.output.virt_addr) {
  159. kzfree(ctx->aes_ctx.output.virt_addr);
  160. ctx->aes_ctx.output.virt_addr = NULL;
  161. }
  162. clr_input:
  163. if (ctx->aes_ctx.input.virt_addr) {
  164. kzfree(ctx->aes_ctx.input.virt_addr);
  165. ctx->aes_ctx.input.virt_addr = NULL;
  166. }
  167. clr_req:
  168. if (ctx->aes_ctx.req) {
  169. ablkcipher_request_free(ctx->aes_ctx.req);
  170. ctx->aes_ctx.req = NULL;
  171. }
  172. clr_tfm:
  173. if (ctx->aes_ctx.tfm) {
  174. crypto_free_ablkcipher(ctx->aes_ctx.tfm);
  175. ctx->aes_ctx.tfm = NULL;
  176. }
  177. out:
  178. return status;
  179. }
  180. /*
  181. * Increments the V field in *ctx
  182. */
  183. static void increment_V(struct ctr_drbg_ctx_s *ctx)
  184. {
  185. unsigned sum = 1;
  186. int i;
  187. uint8_t *p = &ctx->seed.key_V.V[0];
  188. /*
  189. * To make known answer tests work, this has to be done big_endian.
  190. * So we just do it by bytes.
  191. * since we are using AES-128, the key size is 16 bytes.
  192. */
  193. for (i = 15; sum != 0 && i >= 0; --i) {
  194. sum += p[i];
  195. p[i] = (sum & 0xff);
  196. sum >>= 8;
  197. }
  198. return;
  199. }
  200. /*
  201. * The NIST update function. It updates the key and V to new values
  202. * (to prevent backtracking) and optionally stirs in data. data may
  203. * be null, otherwise *data is from 0 to 256 bits long.
  204. * keysched is an optional keyschedule to use as an optimization. It
  205. * must be consistent with the key in *ctx. No changes are made to
  206. * *ctx until it is assured that there will be no failures. Note that
  207. * data_len is in bytes. (That may not be offical NIST
  208. * recommendation, but I do it anyway; they say "or equivalent" and
  209. * this is equivalent enough.)
  210. */
  211. static enum ctr_drbg_status_t
  212. update(struct ctr_drbg_ctx_s *ctx, const uint8_t *data, size_t data_len)
  213. {
  214. uint8_t temp[32];
  215. unsigned int i;
  216. int rc;
  217. struct scatterlist sg_in, sg_out;
  218. for (i = 0; i < 2; ++i) {
  219. increment_V(ctx);
  220. init_completion(&ctx->aes_ctx.result.completion);
  221. /*
  222. * Note: personalize these called routines for
  223. * specific testing.
  224. */
  225. memcpy(ctx->aes_ctx.input.virt_addr,
  226. ctx->seed.key_V.V,
  227. CTR_DRBG_BLOCK_LEN_BYTES);
  228. crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
  229. /* Encrypt some clear text! */
  230. sg_init_one(&sg_in,
  231. ctx->aes_ctx.input.virt_addr,
  232. AES128_BLOCK_SIZE);
  233. sg_init_one(&sg_out,
  234. ctx->aes_ctx.output.virt_addr,
  235. AES128_BLOCK_SIZE);
  236. ablkcipher_request_set_crypt(ctx->aes_ctx.req,
  237. &sg_in,
  238. &sg_out,
  239. CTR_DRBG_BLOCK_LEN_BYTES,
  240. NULL);
  241. rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
  242. switch (rc) {
  243. case 0:
  244. break;
  245. case -EINPROGRESS:
  246. case -EBUSY:
  247. rc = wait_for_completion_interruptible(
  248. &ctx->aes_ctx.result.completion);
  249. if (!rc && !ctx->aes_ctx.result.err) {
  250. INIT_COMPLETION(ctx->aes_ctx.result.completion);
  251. break;
  252. }
  253. /* fall through */
  254. default:
  255. pr_debug("crypto_ablkcipher_encrypt returned");
  256. pr_debug(" with %d result %d on iteration\n",
  257. rc,
  258. ctx->aes_ctx.result.err);
  259. break;
  260. }
  261. init_completion(&ctx->aes_ctx.result.completion);
  262. memcpy(temp + AES128_BLOCK_SIZE * i,
  263. ctx->aes_ctx.output.virt_addr,
  264. AES128_BLOCK_SIZE);
  265. }
  266. if (data_len > 0)
  267. pr_debug("in upadte, data_len = %zu\n", data_len);
  268. for (i = 0; i < data_len; ++i)
  269. ctx->seed.as_bytes[i] = temp[i] ^ data[i];
  270. /* now copy the rest of temp to key and V */
  271. if (32 > data_len) {
  272. memcpy(ctx->seed.as_bytes + data_len,
  273. temp + data_len,
  274. 32 - data_len);
  275. }
  276. memset(temp, 0, 32);
  277. return CTR_DRBG_SUCCESS;
  278. }
  279. /*
  280. * Reseeds the CTR_DRBG instance with entropy. entropy_len_bits must
  281. * be exactly 256.
  282. */
  283. enum ctr_drbg_status_t ctr_drbg_reseed(struct ctr_drbg_ctx_s *ctx,
  284. const void *entropy,
  285. size_t entropy_len_bits)
  286. {
  287. enum ctr_drbg_status_t update_rv;
  288. uint8_t seed_material[32];
  289. int rc;
  290. if (ctx == NULL || entropy == NULL)
  291. return CTR_DRBG_INVALID_ARG;
  292. update_rv = block_cipher_df(ctx,
  293. (uint8_t *)entropy,
  294. (entropy_len_bits / 8),
  295. seed_material,
  296. 32
  297. );
  298. if (CTR_DRBG_SUCCESS != update_rv) {
  299. memset(seed_material, 0, 32);
  300. return CTR_DRBG_GENERAL_ERROR;
  301. }
  302. rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm,
  303. ctx->seed.key_V.key,
  304. AES128_KEY_SIZE
  305. );
  306. if (rc) {
  307. memset(seed_material, 0, 32);
  308. pr_debug("set-key in Instantiate failed, returns with %d", rc);
  309. return CTR_DRBG_GENERAL_ERROR;
  310. }
  311. pr_debug("ctr_drbg_reseed, to call update\n");
  312. update_rv = update(ctx, (const uint8_t *)seed_material, 32);
  313. pr_debug("ctr_drbg_reseed, after called update\n");
  314. if (update_rv != CTR_DRBG_SUCCESS) {
  315. memset(seed_material, 0, 32);
  316. return update_rv;
  317. }
  318. ctx->reseed_counter = 1; /* think 0 but SP 800-90 says 1 */
  319. memset(seed_material, 0, 32);
  320. return CTR_DRBG_SUCCESS;
  321. }
  322. /*
  323. * The NIST instantiate function. entropy_len_bits must be exactly
  324. * 256. After reseed_interval generate requests, generated requests
  325. * will fail until the CTR_DRBG instance is reseeded. As per NIST SP
  326. * 800-90, an error is returned if reseed_interval > 2^48.
  327. */
  328. enum ctr_drbg_status_t
  329. ctr_drbg_instantiate(struct ctr_drbg_ctx_s *ctx,
  330. const uint8_t *entropy,
  331. size_t entropy_len_bits,
  332. const uint8_t *nonce,
  333. size_t nonce_len_bits,
  334. unsigned long long reseed_interval)
  335. {
  336. enum ctr_drbg_status_t update_rv;
  337. uint8_t seed_material[32];
  338. uint8_t df_input[32];
  339. int rc;
  340. if (ctx == NULL || entropy == NULL || nonce == NULL)
  341. return CTR_DRBG_INVALID_ARG;
  342. if (((nonce_len_bits / 8) + (entropy_len_bits / 8)) > 32) {
  343. pr_info("\nentropy_len_bits + nonce_len_bits is too long!");
  344. pr_info("\nnonce len: %zu, entropy: %zu\n",
  345. nonce_len_bits, entropy_len_bits);
  346. return CTR_DRBG_INVALID_ARG + 1;
  347. }
  348. if (reseed_interval > (1ULL << 48))
  349. return CTR_DRBG_INVALID_ARG + 2;
  350. ctr_aes_init(ctx);
  351. memset(ctx->seed.as_bytes, 0, sizeof(ctx->seed.as_bytes));
  352. memcpy(df_input, (uint8_t *)entropy, entropy_len_bits / 8);
  353. memcpy(df_input + (entropy_len_bits / 8), nonce, nonce_len_bits / 8);
  354. update_rv = block_cipher_df(ctx, df_input, 24, seed_material, 32);
  355. memset(df_input, 0, 32);
  356. if (CTR_DRBG_SUCCESS != update_rv) {
  357. pr_debug("block_cipher_df failed, returns %d", update_rv);
  358. memset(seed_material, 0, 32);
  359. return CTR_DRBG_GENERAL_ERROR;
  360. }
  361. rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm,
  362. ctx->seed.key_V.key,
  363. AES128_KEY_SIZE);
  364. if (rc) {
  365. pr_debug("crypto_ablkcipher_setkey API failed: %d", rc);
  366. memset(seed_material, 0, 32);
  367. return CTR_DRBG_GENERAL_ERROR;
  368. }
  369. update_rv = update(ctx, (const uint8_t *)seed_material, 32);
  370. if (update_rv != CTR_DRBG_SUCCESS) {
  371. memset(seed_material, 0, 32);
  372. return update_rv;
  373. }
  374. ctx->reseed_counter = 1; /* think 0 but SP 800-90 says 1 */
  375. ctx->reseed_interval = reseed_interval;
  376. memset(seed_material, 0, 32);
  377. pr_debug(" return from ctr_drbg_instantiate\n");
  378. return CTR_DRBG_SUCCESS;
  379. }
  380. /*
  381. * Generate random bits. len_bits is specified in bits, as required by
  382. * NIST SP800-90. It fails with CTR_DRBG_NEEDS_RESEED if the number
  383. * of generates since instantiation or the last reseed >= the
  384. * reseed_interval supplied at instantiation. len_bits must be a
  385. * multiple of 8. len_bits must not exceed 2^19, as per NIST SP
  386. * 800-90. Optionally stirs in additional_input which is
  387. * additional_input_len_bits long, and is silently rounded up to a
  388. * multiple of 8. CTR_DRBG_INVALID_ARG is returned if any pointer arg
  389. * is null and the corresponding length is non-zero or if
  390. * additioanl_input_len_bits > 256.
  391. */
  392. enum ctr_drbg_status_t
  393. ctr_drbg_generate_w_data(struct ctr_drbg_ctx_s *ctx,
  394. void *additional_input,
  395. size_t additional_input_len_bits,
  396. void *buffer,
  397. size_t len_bits)
  398. {
  399. size_t total_blocks = (len_bits + 127) / 128;
  400. enum ctr_drbg_status_t update_rv;
  401. int rv = 0;
  402. size_t i;
  403. int rc;
  404. struct scatterlist sg_in, sg_out;
  405. if (ctx == NULL)
  406. return CTR_DRBG_INVALID_ARG;
  407. if (buffer == NULL && len_bits > 0)
  408. return CTR_DRBG_INVALID_ARG;
  409. if (len_bits % 8 != 0)
  410. return CTR_DRBG_INVALID_ARG;
  411. if (len_bits > (1<<19))
  412. return CTR_DRBG_INVALID_ARG;
  413. if ((additional_input == NULL && additional_input_len_bits > 0) ||
  414. additional_input_len_bits > CTR_DRBG_SEED_LEN_BITS)
  415. return CTR_DRBG_INVALID_ARG;
  416. if (ctx->reseed_counter > ctx->reseed_interval)
  417. return CTR_DRBG_NEEDS_RESEED;
  418. rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm,
  419. ctx->seed.key_V.key,
  420. AES128_KEY_SIZE);
  421. if (rc) {
  422. pr_debug("crypto_ablkcipher_setkey API failed: %d", rc);
  423. return CTR_DRBG_GENERAL_ERROR;
  424. }
  425. if (rv < 0)
  426. return CTR_DRBG_GENERAL_ERROR;
  427. if (!ctx->continuous_test_started) {
  428. increment_V(ctx);
  429. init_completion(&ctx->aes_ctx.result.completion);
  430. crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
  431. memcpy(ctx->aes_ctx.input.virt_addr, ctx->seed.key_V.V, 16);
  432. sg_init_one(&sg_in, ctx->aes_ctx.input.virt_addr, 16);
  433. sg_init_one(&sg_out, ctx->aes_ctx.output.virt_addr, 16);
  434. ablkcipher_request_set_crypt(ctx->aes_ctx.req, &sg_in, &sg_out,
  435. CTR_DRBG_BLOCK_LEN_BYTES, NULL);
  436. rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
  437. switch (rc) {
  438. case 0:
  439. break;
  440. case -EINPROGRESS:
  441. case -EBUSY:
  442. rc = wait_for_completion_interruptible(
  443. &ctx->aes_ctx.result.completion);
  444. if (!rc && !ctx->aes_ctx.result.err) {
  445. INIT_COMPLETION(ctx->aes_ctx.result.completion);
  446. break;
  447. }
  448. /* fall through */
  449. default:
  450. pr_debug(":crypto_ablkcipher_encrypt returned with %d result %d on iteration\n",
  451. rc,
  452. ctx->aes_ctx.result.err);
  453. break;
  454. }
  455. init_completion(&ctx->aes_ctx.result.completion);
  456. memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16);
  457. ctx->continuous_test_started = 1;
  458. }
  459. /* Generate the output */
  460. for (i = 0; i < total_blocks; ++i) {
  461. /* Increment the counter */
  462. increment_V(ctx);
  463. if (((len_bits % 128) != 0) && (i == (total_blocks - 1))) {
  464. /* last block and it's a fragment */
  465. init_completion(&ctx->aes_ctx.result.completion);
  466. /*
  467. * Note: personalize these called routines for
  468. * specific testing.
  469. */
  470. crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
  471. /* Encrypt some clear text! */
  472. memcpy(ctx->aes_ctx.input.virt_addr,
  473. ctx->seed.key_V.V,
  474. 16);
  475. sg_init_one(&sg_in,
  476. ctx->aes_ctx.input.virt_addr,
  477. 16);
  478. sg_init_one(&sg_out,
  479. ctx->aes_ctx.output.virt_addr,
  480. 16);
  481. ablkcipher_request_set_crypt(ctx->aes_ctx.req,
  482. &sg_in,
  483. &sg_out,
  484. CTR_DRBG_BLOCK_LEN_BYTES,
  485. NULL);
  486. rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
  487. switch (rc) {
  488. case 0:
  489. break;
  490. case -EINPROGRESS:
  491. case -EBUSY:
  492. rc = wait_for_completion_interruptible(
  493. &ctx->aes_ctx.result.completion);
  494. if (!rc && !ctx->aes_ctx.result.err) {
  495. INIT_COMPLETION(
  496. ctx->aes_ctx.result.completion);
  497. break;
  498. }
  499. /* fall through */
  500. default:
  501. break;
  502. }
  503. init_completion(&ctx->aes_ctx.result.completion);
  504. if (!memcmp(ctx->prev_drn,
  505. ctx->aes_ctx.output.virt_addr,
  506. 16))
  507. return CTR_DRBG_GENERAL_ERROR;
  508. else
  509. memcpy(ctx->prev_drn,
  510. ctx->aes_ctx.output.virt_addr,
  511. 16);
  512. rv = 0;
  513. memcpy((uint8_t *)buffer + 16*i,
  514. ctx->aes_ctx.output.virt_addr,
  515. (len_bits % 128)/8);
  516. } else {
  517. /* normal case: encrypt direct to target buffer */
  518. init_completion(&ctx->aes_ctx.result.completion);
  519. /*
  520. * Note: personalize these called routines for
  521. * specific testing.
  522. */
  523. crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0);
  524. /* Encrypt some clear text! */
  525. memcpy(ctx->aes_ctx.input.virt_addr,
  526. ctx->seed.key_V.V,
  527. 16);
  528. sg_init_one(&sg_in,
  529. ctx->aes_ctx.input.virt_addr,
  530. 16);
  531. sg_init_one(&sg_out,
  532. ctx->aes_ctx.output.virt_addr,
  533. 16);
  534. ablkcipher_request_set_crypt(ctx->aes_ctx.req,
  535. &sg_in,
  536. &sg_out,
  537. CTR_DRBG_BLOCK_LEN_BYTES,
  538. NULL);
  539. rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req);
  540. switch (rc) {
  541. case 0:
  542. break;
  543. case -EINPROGRESS:
  544. case -EBUSY:
  545. rc = wait_for_completion_interruptible(
  546. &ctx->aes_ctx.result.completion);
  547. if (!rc && !ctx->aes_ctx.result.err) {
  548. INIT_COMPLETION(
  549. ctx->aes_ctx.result.completion);
  550. break;
  551. }
  552. /* fall through */
  553. default:
  554. break;
  555. }
  556. if (!memcmp(ctx->prev_drn,
  557. ctx->aes_ctx.output.virt_addr,
  558. 16))
  559. return CTR_DRBG_GENERAL_ERROR;
  560. else
  561. memcpy(ctx->prev_drn,
  562. ctx->aes_ctx.output.virt_addr,
  563. 16);
  564. memcpy((uint8_t *)buffer + 16*i,
  565. ctx->aes_ctx.output.virt_addr,
  566. 16);
  567. rv = 0;
  568. }
  569. }
  570. update_rv = update(ctx,
  571. additional_input,
  572. (additional_input_len_bits + 7) / 8); /* round up */
  573. if (update_rv != CTR_DRBG_SUCCESS)
  574. return update_rv;
  575. ctx->reseed_counter += 1;
  576. return CTR_DRBG_SUCCESS;
  577. }
  578. /*
  579. * Generate random bits, but with no provided data. See notes on
  580. * ctr_drbg_generate_w_data()
  581. */
  582. enum ctr_drbg_status_t
  583. ctr_drbg_generate(struct ctr_drbg_ctx_s *ctx,
  584. void *buffer,
  585. size_t len_bits)
  586. {
  587. return ctr_drbg_generate_w_data(ctx, NULL, 0, buffer, len_bits);
  588. }
  589. void ctr_aes_deinit(struct ctr_drbg_ctx_s *ctx)
  590. {
  591. if (ctx->aes_ctx.req) {
  592. ablkcipher_request_free(ctx->aes_ctx.req);
  593. ctx->aes_ctx.req = NULL;
  594. }
  595. if (ctx->aes_ctx.tfm) {
  596. crypto_free_ablkcipher(ctx->aes_ctx.tfm);
  597. ctx->aes_ctx.tfm = NULL;
  598. }
  599. if (ctx->aes_ctx.input.virt_addr) {
  600. kzfree(ctx->aes_ctx.input.virt_addr);
  601. ctx->aes_ctx.input.virt_addr = NULL;
  602. }
  603. if (ctx->aes_ctx.output.virt_addr) {
  604. kzfree(ctx->aes_ctx.output.virt_addr);
  605. ctx->aes_ctx.output.virt_addr = NULL;
  606. }
  607. if (ctx->df_aes_ctx.req) {
  608. ablkcipher_request_free(ctx->df_aes_ctx.req);
  609. ctx->df_aes_ctx.req = NULL;
  610. }
  611. if (ctx->df_aes_ctx.tfm) {
  612. crypto_free_ablkcipher(ctx->df_aes_ctx.tfm);
  613. ctx->df_aes_ctx.tfm = NULL;
  614. }
  615. if (ctx->df_aes_ctx.input.virt_addr) {
  616. kzfree(ctx->df_aes_ctx.input.virt_addr);
  617. ctx->df_aes_ctx.input.virt_addr = NULL;
  618. }
  619. if (ctx->df_aes_ctx.output.virt_addr) {
  620. kzfree(ctx->df_aes_ctx.output.virt_addr);
  621. ctx->df_aes_ctx.output.virt_addr = NULL;
  622. }
  623. }
  624. /*
  625. * Zeroizes the context structure. In some future implemenation it
  626. * could also free resources. So do call it.
  627. */
  628. void
  629. ctr_drbg_uninstantiate(struct ctr_drbg_ctx_s *ctx)
  630. {
  631. ctr_aes_deinit(ctx);
  632. memset(ctx, 0, sizeof(*ctx));
  633. }
  634. /*
  635. * the derivation functions to handle biased entropy input.
  636. */
  637. enum ctr_drbg_status_t df_bcc_func(struct ctr_drbg_ctx_s *ctx,
  638. uint8_t *key,
  639. uint8_t *input,
  640. uint32_t input_size,
  641. uint8_t *output)
  642. {
  643. enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS;
  644. uint8_t *p;
  645. int rc;
  646. int i;
  647. int n;
  648. struct scatterlist sg_in, sg_out;
  649. if (0 != (input_size % CTR_DRBG_BLOCK_LEN_BYTES))
  650. return CTR_DRBG_INVALID_ARG;
  651. n = input_size / CTR_DRBG_BLOCK_LEN_BYTES;
  652. for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++)
  653. ctx->df_aes_ctx.output.virt_addr[i] = 0;
  654. rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm,
  655. key,
  656. AES128_KEY_SIZE);
  657. if (rc) {
  658. pr_debug("crypto_ablkcipher_setkey API failed: %d\n", rc);
  659. return CTR_DRBG_GENERAL_ERROR;
  660. }
  661. p = input;
  662. while (n > 0) {
  663. for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++, p++)
  664. ctx->df_aes_ctx.input.virt_addr[i] =
  665. ctx->df_aes_ctx.output.virt_addr[i] ^ (*p);
  666. init_completion(&ctx->df_aes_ctx.result.completion);
  667. /*
  668. * Note: personalize these called routines for
  669. * specific testing.
  670. */
  671. crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0);
  672. /* Encrypt some clear text! */
  673. sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16);
  674. sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16);
  675. ablkcipher_request_set_crypt(ctx->df_aes_ctx.req,
  676. &sg_in,
  677. &sg_out,
  678. CTR_DRBG_BLOCK_LEN_BYTES,
  679. NULL);
  680. rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req);
  681. switch (rc) {
  682. case 0:
  683. break;
  684. case -EINPROGRESS:
  685. case -EBUSY:
  686. rc = wait_for_completion_interruptible(
  687. &ctx->df_aes_ctx.result.completion);
  688. if (!rc && !ctx->df_aes_ctx.result.err) {
  689. INIT_COMPLETION(
  690. ctx->df_aes_ctx.result.completion);
  691. break;
  692. }
  693. /* fall through */
  694. default:
  695. break;
  696. }
  697. init_completion(&ctx->df_aes_ctx.result.completion);
  698. n--;
  699. }
  700. for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++)
  701. output[i] = ctx->df_aes_ctx.output.virt_addr[i];
  702. return ret_val;
  703. }
  704. /* output_size must <= 512 bits (<= 64) */
  705. enum ctr_drbg_status_t
  706. block_cipher_df(struct ctr_drbg_ctx_s *ctx,
  707. const uint8_t *input,
  708. uint32_t input_size,
  709. uint8_t *output,
  710. uint32_t output_size)
  711. {
  712. enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS;
  713. uint32_t s_len = 0;
  714. uint32_t s_pad_len = 0;
  715. uint8_t temp[32];
  716. uint32_t out_len = 0;
  717. uint8_t siv_string[64];
  718. uint8_t *p_s_string = NULL;
  719. int rc;
  720. struct scatterlist sg_in, sg_out;
  721. if (output_size > 64)
  722. return CTR_DRBG_INVALID_ARG;
  723. s_len = input_size + 9;
  724. s_pad_len = s_len % 16;
  725. if (0 != s_pad_len)
  726. s_len += (16 - s_pad_len);
  727. /* add the length of IV */
  728. s_len += 16;
  729. if (s_len > 64)
  730. pr_debug("error! s_len is too big!!!!!!!!!!!!\n");
  731. memset(siv_string, 0, 64);
  732. p_s_string = siv_string + 16;
  733. p_s_string[3] = input_size;
  734. p_s_string[7] = output_size;
  735. memcpy(p_s_string + 8, input, input_size);
  736. p_s_string[8 + input_size] = 0x80;
  737. if (0 < s_pad_len)
  738. memset(p_s_string + 9 + input_size, '\0', s_pad_len);
  739. ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp);
  740. if (CTR_DRBG_SUCCESS != ret_val) {
  741. pr_debug("df_bcc_func failed, returned %d", ret_val);
  742. goto out;
  743. }
  744. siv_string[3] = 0x1;
  745. ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp + 16);
  746. if (CTR_DRBG_SUCCESS != ret_val)
  747. goto out;
  748. out_len = 0;
  749. rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm,
  750. temp,
  751. AES128_KEY_SIZE);
  752. if (rc) {
  753. pr_debug("crypto_ablkcipher_setkey API failed: %d", rc);
  754. goto out;
  755. }
  756. memcpy(ctx->df_aes_ctx.input.virt_addr, temp + 16, 16);
  757. while (out_len < output_size) {
  758. init_completion(&ctx->df_aes_ctx.result.completion);
  759. /*
  760. * Note: personalize these called routines for
  761. * specific testing.
  762. */
  763. crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0);
  764. /* Encrypt some clear text! */
  765. sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16);
  766. sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16);
  767. ablkcipher_request_set_crypt(ctx->df_aes_ctx.req,
  768. &sg_in,
  769. &sg_out,
  770. CTR_DRBG_BLOCK_LEN_BYTES,
  771. NULL);
  772. rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req);
  773. switch (rc) {
  774. case 0:
  775. break;
  776. case -EINPROGRESS:
  777. case -EBUSY:
  778. rc = wait_for_completion_interruptible(
  779. &ctx->df_aes_ctx.result.completion);
  780. if (!rc && !ctx->df_aes_ctx.result.err) {
  781. INIT_COMPLETION(
  782. ctx->df_aes_ctx.result.completion);
  783. break;
  784. }
  785. /* fall through */
  786. default:
  787. break;
  788. }
  789. init_completion(&ctx->df_aes_ctx.result.completion);
  790. memcpy(output + out_len, ctx->df_aes_ctx.output.virt_addr, 16);
  791. memcpy(ctx->df_aes_ctx.input.virt_addr, output + out_len, 16);
  792. out_len += 16;
  793. }
  794. out:
  795. memset(siv_string, 0, 64);
  796. memset(temp, 0, 32);
  797. return ret_val;
  798. }