algif_skcipher.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /*
  2. * algif_skcipher: User-space interface for skcipher algorithms
  3. *
  4. * This file provides the user-space API for symmetric key ciphers.
  5. *
  6. * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <crypto/scatterwalk.h>
  15. #include <crypto/skcipher.h>
  16. #include <crypto/if_alg.h>
  17. #include <linux/init.h>
  18. #include <linux/list.h>
  19. #include <linux/kernel.h>
  20. #include <linux/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/net.h>
  23. #include <net/sock.h>
  24. struct skcipher_sg_list {
  25. struct list_head list;
  26. int cur;
  27. struct scatterlist sg[0];
  28. };
  29. struct skcipher_tfm {
  30. struct crypto_skcipher *skcipher;
  31. bool has_key;
  32. };
  33. struct skcipher_ctx {
  34. struct list_head tsgl;
  35. struct af_alg_sgl rsgl;
  36. void *iv;
  37. struct af_alg_completion completion;
  38. atomic_t inflight;
  39. size_t used;
  40. unsigned int len;
  41. bool more;
  42. bool merge;
  43. bool enc;
  44. struct skcipher_request req;
  45. };
  46. struct skcipher_async_rsgl {
  47. struct af_alg_sgl sgl;
  48. struct list_head list;
  49. };
  50. struct skcipher_async_req {
  51. struct kiocb *iocb;
  52. struct skcipher_async_rsgl first_sgl;
  53. struct list_head list;
  54. struct scatterlist *tsg;
  55. atomic_t *inflight;
  56. struct skcipher_request req;
  57. };
  58. #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
  59. sizeof(struct scatterlist) - 1)
  60. static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
  61. {
  62. struct skcipher_async_rsgl *rsgl, *tmp;
  63. struct scatterlist *sgl;
  64. struct scatterlist *sg;
  65. int i, n;
  66. list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
  67. af_alg_free_sg(&rsgl->sgl);
  68. if (rsgl != &sreq->first_sgl)
  69. kfree(rsgl);
  70. }
  71. sgl = sreq->tsg;
  72. n = sg_nents(sgl);
  73. for_each_sg(sgl, sg, n, i) {
  74. struct page *page = sg_page(sg);
  75. /* some SGs may not have a page mapped */
  76. if (page && page_ref_count(page))
  77. put_page(page);
  78. }
  79. kfree(sreq->tsg);
  80. }
  81. static void skcipher_async_cb(struct crypto_async_request *req, int err)
  82. {
  83. struct skcipher_async_req *sreq = req->data;
  84. struct kiocb *iocb = sreq->iocb;
  85. atomic_dec(sreq->inflight);
  86. skcipher_free_async_sgls(sreq);
  87. kzfree(sreq);
  88. iocb->ki_complete(iocb, err, err);
  89. }
  90. static inline int skcipher_sndbuf(struct sock *sk)
  91. {
  92. struct alg_sock *ask = alg_sk(sk);
  93. struct skcipher_ctx *ctx = ask->private;
  94. return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
  95. ctx->used, 0);
  96. }
  97. static inline bool skcipher_writable(struct sock *sk)
  98. {
  99. return PAGE_SIZE <= skcipher_sndbuf(sk);
  100. }
  101. static int skcipher_alloc_sgl(struct sock *sk)
  102. {
  103. struct alg_sock *ask = alg_sk(sk);
  104. struct skcipher_ctx *ctx = ask->private;
  105. struct skcipher_sg_list *sgl;
  106. struct scatterlist *sg = NULL;
  107. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  108. if (!list_empty(&ctx->tsgl))
  109. sg = sgl->sg;
  110. if (!sg || sgl->cur >= MAX_SGL_ENTS) {
  111. sgl = sock_kmalloc(sk, sizeof(*sgl) +
  112. sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
  113. GFP_KERNEL);
  114. if (!sgl)
  115. return -ENOMEM;
  116. sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
  117. sgl->cur = 0;
  118. if (sg) {
  119. sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
  120. sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
  121. }
  122. list_add_tail(&sgl->list, &ctx->tsgl);
  123. }
  124. return 0;
  125. }
  126. static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
  127. {
  128. struct alg_sock *ask = alg_sk(sk);
  129. struct skcipher_ctx *ctx = ask->private;
  130. struct skcipher_sg_list *sgl;
  131. struct scatterlist *sg;
  132. int i;
  133. while (!list_empty(&ctx->tsgl)) {
  134. sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
  135. list);
  136. sg = sgl->sg;
  137. for (i = 0; i < sgl->cur; i++) {
  138. size_t plen = min_t(size_t, used, sg[i].length);
  139. if (!sg_page(sg + i))
  140. continue;
  141. sg[i].length -= plen;
  142. sg[i].offset += plen;
  143. used -= plen;
  144. ctx->used -= plen;
  145. if (sg[i].length)
  146. return;
  147. if (put)
  148. put_page(sg_page(sg + i));
  149. sg_assign_page(sg + i, NULL);
  150. }
  151. list_del(&sgl->list);
  152. sock_kfree_s(sk, sgl,
  153. sizeof(*sgl) + sizeof(sgl->sg[0]) *
  154. (MAX_SGL_ENTS + 1));
  155. }
  156. if (!ctx->used)
  157. ctx->merge = 0;
  158. }
  159. static void skcipher_free_sgl(struct sock *sk)
  160. {
  161. struct alg_sock *ask = alg_sk(sk);
  162. struct skcipher_ctx *ctx = ask->private;
  163. skcipher_pull_sgl(sk, ctx->used, 1);
  164. }
  165. static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
  166. {
  167. long timeout;
  168. DEFINE_WAIT(wait);
  169. int err = -ERESTARTSYS;
  170. if (flags & MSG_DONTWAIT)
  171. return -EAGAIN;
  172. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  173. for (;;) {
  174. if (signal_pending(current))
  175. break;
  176. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  177. timeout = MAX_SCHEDULE_TIMEOUT;
  178. if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
  179. err = 0;
  180. break;
  181. }
  182. }
  183. finish_wait(sk_sleep(sk), &wait);
  184. return err;
  185. }
  186. static void skcipher_wmem_wakeup(struct sock *sk)
  187. {
  188. struct socket_wq *wq;
  189. if (!skcipher_writable(sk))
  190. return;
  191. rcu_read_lock();
  192. wq = rcu_dereference(sk->sk_wq);
  193. if (skwq_has_sleeper(wq))
  194. wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
  195. POLLRDNORM |
  196. POLLRDBAND);
  197. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  198. rcu_read_unlock();
  199. }
  200. static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
  201. {
  202. struct alg_sock *ask = alg_sk(sk);
  203. struct skcipher_ctx *ctx = ask->private;
  204. long timeout;
  205. DEFINE_WAIT(wait);
  206. int err = -ERESTARTSYS;
  207. if (flags & MSG_DONTWAIT) {
  208. return -EAGAIN;
  209. }
  210. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  211. for (;;) {
  212. if (signal_pending(current))
  213. break;
  214. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  215. timeout = MAX_SCHEDULE_TIMEOUT;
  216. if (sk_wait_event(sk, &timeout, ctx->used)) {
  217. err = 0;
  218. break;
  219. }
  220. }
  221. finish_wait(sk_sleep(sk), &wait);
  222. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  223. return err;
  224. }
  225. static void skcipher_data_wakeup(struct sock *sk)
  226. {
  227. struct alg_sock *ask = alg_sk(sk);
  228. struct skcipher_ctx *ctx = ask->private;
  229. struct socket_wq *wq;
  230. if (!ctx->used)
  231. return;
  232. rcu_read_lock();
  233. wq = rcu_dereference(sk->sk_wq);
  234. if (skwq_has_sleeper(wq))
  235. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  236. POLLRDNORM |
  237. POLLRDBAND);
  238. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  239. rcu_read_unlock();
  240. }
  241. static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
  242. size_t size)
  243. {
  244. struct sock *sk = sock->sk;
  245. struct alg_sock *ask = alg_sk(sk);
  246. struct sock *psk = ask->parent;
  247. struct alg_sock *pask = alg_sk(psk);
  248. struct skcipher_ctx *ctx = ask->private;
  249. struct skcipher_tfm *skc = pask->private;
  250. struct crypto_skcipher *tfm = skc->skcipher;
  251. unsigned ivsize = crypto_skcipher_ivsize(tfm);
  252. struct skcipher_sg_list *sgl;
  253. struct af_alg_control con = {};
  254. long copied = 0;
  255. bool enc = 0;
  256. bool init = 0;
  257. int err;
  258. int i;
  259. if (msg->msg_controllen) {
  260. err = af_alg_cmsg_send(msg, &con);
  261. if (err)
  262. return err;
  263. init = 1;
  264. switch (con.op) {
  265. case ALG_OP_ENCRYPT:
  266. enc = 1;
  267. break;
  268. case ALG_OP_DECRYPT:
  269. enc = 0;
  270. break;
  271. default:
  272. return -EINVAL;
  273. }
  274. if (con.iv && con.iv->ivlen != ivsize)
  275. return -EINVAL;
  276. }
  277. err = -EINVAL;
  278. lock_sock(sk);
  279. if (!ctx->more && ctx->used)
  280. goto unlock;
  281. if (init) {
  282. ctx->enc = enc;
  283. if (con.iv)
  284. memcpy(ctx->iv, con.iv->iv, ivsize);
  285. }
  286. while (size) {
  287. struct scatterlist *sg;
  288. unsigned long len = size;
  289. size_t plen;
  290. if (ctx->merge) {
  291. sgl = list_entry(ctx->tsgl.prev,
  292. struct skcipher_sg_list, list);
  293. sg = sgl->sg + sgl->cur - 1;
  294. len = min_t(unsigned long, len,
  295. PAGE_SIZE - sg->offset - sg->length);
  296. err = memcpy_from_msg(page_address(sg_page(sg)) +
  297. sg->offset + sg->length,
  298. msg, len);
  299. if (err)
  300. goto unlock;
  301. sg->length += len;
  302. ctx->merge = (sg->offset + sg->length) &
  303. (PAGE_SIZE - 1);
  304. ctx->used += len;
  305. copied += len;
  306. size -= len;
  307. continue;
  308. }
  309. if (!skcipher_writable(sk)) {
  310. err = skcipher_wait_for_wmem(sk, msg->msg_flags);
  311. if (err)
  312. goto unlock;
  313. }
  314. len = min_t(unsigned long, len, skcipher_sndbuf(sk));
  315. err = skcipher_alloc_sgl(sk);
  316. if (err)
  317. goto unlock;
  318. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  319. sg = sgl->sg;
  320. if (sgl->cur)
  321. sg_unmark_end(sg + sgl->cur - 1);
  322. do {
  323. i = sgl->cur;
  324. plen = min_t(size_t, len, PAGE_SIZE);
  325. sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
  326. err = -ENOMEM;
  327. if (!sg_page(sg + i))
  328. goto unlock;
  329. err = memcpy_from_msg(page_address(sg_page(sg + i)),
  330. msg, plen);
  331. if (err) {
  332. __free_page(sg_page(sg + i));
  333. sg_assign_page(sg + i, NULL);
  334. goto unlock;
  335. }
  336. sg[i].length = plen;
  337. len -= plen;
  338. ctx->used += plen;
  339. copied += plen;
  340. size -= plen;
  341. sgl->cur++;
  342. } while (len && sgl->cur < MAX_SGL_ENTS);
  343. if (!size)
  344. sg_mark_end(sg + sgl->cur - 1);
  345. ctx->merge = plen & (PAGE_SIZE - 1);
  346. }
  347. err = 0;
  348. ctx->more = msg->msg_flags & MSG_MORE;
  349. unlock:
  350. skcipher_data_wakeup(sk);
  351. release_sock(sk);
  352. return copied ?: err;
  353. }
  354. static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
  355. int offset, size_t size, int flags)
  356. {
  357. struct sock *sk = sock->sk;
  358. struct alg_sock *ask = alg_sk(sk);
  359. struct skcipher_ctx *ctx = ask->private;
  360. struct skcipher_sg_list *sgl;
  361. int err = -EINVAL;
  362. if (flags & MSG_SENDPAGE_NOTLAST)
  363. flags |= MSG_MORE;
  364. lock_sock(sk);
  365. if (!ctx->more && ctx->used)
  366. goto unlock;
  367. if (!size)
  368. goto done;
  369. if (!skcipher_writable(sk)) {
  370. err = skcipher_wait_for_wmem(sk, flags);
  371. if (err)
  372. goto unlock;
  373. }
  374. err = skcipher_alloc_sgl(sk);
  375. if (err)
  376. goto unlock;
  377. ctx->merge = 0;
  378. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  379. if (sgl->cur)
  380. sg_unmark_end(sgl->sg + sgl->cur - 1);
  381. sg_mark_end(sgl->sg + sgl->cur);
  382. get_page(page);
  383. sg_set_page(sgl->sg + sgl->cur, page, size, offset);
  384. sgl->cur++;
  385. ctx->used += size;
  386. done:
  387. ctx->more = flags & MSG_MORE;
  388. unlock:
  389. skcipher_data_wakeup(sk);
  390. release_sock(sk);
  391. return err ?: size;
  392. }
  393. static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
  394. {
  395. struct skcipher_sg_list *sgl;
  396. struct scatterlist *sg;
  397. int nents = 0;
  398. list_for_each_entry(sgl, &ctx->tsgl, list) {
  399. sg = sgl->sg;
  400. while (!sg->length)
  401. sg++;
  402. nents += sg_nents(sg);
  403. }
  404. return nents;
  405. }
  406. static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
  407. int flags)
  408. {
  409. struct sock *sk = sock->sk;
  410. struct alg_sock *ask = alg_sk(sk);
  411. struct sock *psk = ask->parent;
  412. struct alg_sock *pask = alg_sk(psk);
  413. struct skcipher_ctx *ctx = ask->private;
  414. struct skcipher_tfm *skc = pask->private;
  415. struct crypto_skcipher *tfm = skc->skcipher;
  416. struct skcipher_sg_list *sgl;
  417. struct scatterlist *sg;
  418. struct skcipher_async_req *sreq;
  419. struct skcipher_request *req;
  420. struct skcipher_async_rsgl *last_rsgl = NULL;
  421. unsigned int txbufs = 0, len = 0, tx_nents;
  422. unsigned int reqsize = crypto_skcipher_reqsize(tfm);
  423. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  424. int err = -ENOMEM;
  425. bool mark = false;
  426. char *iv;
  427. sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
  428. if (unlikely(!sreq))
  429. goto out;
  430. req = &sreq->req;
  431. iv = (char *)(req + 1) + reqsize;
  432. sreq->iocb = msg->msg_iocb;
  433. INIT_LIST_HEAD(&sreq->list);
  434. sreq->inflight = &ctx->inflight;
  435. lock_sock(sk);
  436. tx_nents = skcipher_all_sg_nents(ctx);
  437. sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
  438. if (unlikely(!sreq->tsg))
  439. goto unlock;
  440. sg_init_table(sreq->tsg, tx_nents);
  441. memcpy(iv, ctx->iv, ivsize);
  442. skcipher_request_set_tfm(req, tfm);
  443. skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
  444. skcipher_async_cb, sreq);
  445. while (iov_iter_count(&msg->msg_iter)) {
  446. struct skcipher_async_rsgl *rsgl;
  447. int used;
  448. if (!ctx->used) {
  449. err = skcipher_wait_for_data(sk, flags);
  450. if (err)
  451. goto free;
  452. }
  453. sgl = list_first_entry(&ctx->tsgl,
  454. struct skcipher_sg_list, list);
  455. sg = sgl->sg;
  456. while (!sg->length)
  457. sg++;
  458. used = min_t(unsigned long, ctx->used,
  459. iov_iter_count(&msg->msg_iter));
  460. used = min_t(unsigned long, used, sg->length);
  461. if (txbufs == tx_nents) {
  462. struct scatterlist *tmp;
  463. int x;
  464. /* Ran out of tx slots in async request
  465. * need to expand */
  466. tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
  467. GFP_KERNEL);
  468. if (!tmp)
  469. goto free;
  470. sg_init_table(tmp, tx_nents * 2);
  471. for (x = 0; x < tx_nents; x++)
  472. sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
  473. sreq->tsg[x].length,
  474. sreq->tsg[x].offset);
  475. kfree(sreq->tsg);
  476. sreq->tsg = tmp;
  477. tx_nents *= 2;
  478. mark = true;
  479. }
  480. /* Need to take over the tx sgl from ctx
  481. * to the asynch req - these sgls will be freed later */
  482. sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
  483. sg->offset);
  484. if (list_empty(&sreq->list)) {
  485. rsgl = &sreq->first_sgl;
  486. list_add_tail(&rsgl->list, &sreq->list);
  487. } else {
  488. rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
  489. if (!rsgl) {
  490. err = -ENOMEM;
  491. goto free;
  492. }
  493. list_add_tail(&rsgl->list, &sreq->list);
  494. }
  495. used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
  496. err = used;
  497. if (used < 0)
  498. goto free;
  499. if (last_rsgl)
  500. af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
  501. last_rsgl = rsgl;
  502. len += used;
  503. skcipher_pull_sgl(sk, used, 0);
  504. iov_iter_advance(&msg->msg_iter, used);
  505. }
  506. if (mark)
  507. sg_mark_end(sreq->tsg + txbufs - 1);
  508. skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
  509. len, iv);
  510. err = ctx->enc ? crypto_skcipher_encrypt(req) :
  511. crypto_skcipher_decrypt(req);
  512. if (err == -EINPROGRESS) {
  513. atomic_inc(&ctx->inflight);
  514. err = -EIOCBQUEUED;
  515. sreq = NULL;
  516. goto unlock;
  517. }
  518. free:
  519. skcipher_free_async_sgls(sreq);
  520. unlock:
  521. skcipher_wmem_wakeup(sk);
  522. release_sock(sk);
  523. kzfree(sreq);
  524. out:
  525. return err;
  526. }
  527. static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
  528. int flags)
  529. {
  530. struct sock *sk = sock->sk;
  531. struct alg_sock *ask = alg_sk(sk);
  532. struct sock *psk = ask->parent;
  533. struct alg_sock *pask = alg_sk(psk);
  534. struct skcipher_ctx *ctx = ask->private;
  535. struct skcipher_tfm *skc = pask->private;
  536. struct crypto_skcipher *tfm = skc->skcipher;
  537. unsigned bs = crypto_skcipher_blocksize(tfm);
  538. struct skcipher_sg_list *sgl;
  539. struct scatterlist *sg;
  540. int err = -EAGAIN;
  541. int used;
  542. long copied = 0;
  543. lock_sock(sk);
  544. while (msg_data_left(msg)) {
  545. if (!ctx->used) {
  546. err = skcipher_wait_for_data(sk, flags);
  547. if (err)
  548. goto unlock;
  549. }
  550. used = min_t(unsigned long, ctx->used, msg_data_left(msg));
  551. used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
  552. err = used;
  553. if (err < 0)
  554. goto unlock;
  555. if (ctx->more || used < ctx->used)
  556. used -= used % bs;
  557. err = -EINVAL;
  558. if (!used)
  559. goto free;
  560. sgl = list_first_entry(&ctx->tsgl,
  561. struct skcipher_sg_list, list);
  562. sg = sgl->sg;
  563. while (!sg->length)
  564. sg++;
  565. skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
  566. ctx->iv);
  567. err = af_alg_wait_for_completion(
  568. ctx->enc ?
  569. crypto_skcipher_encrypt(&ctx->req) :
  570. crypto_skcipher_decrypt(&ctx->req),
  571. &ctx->completion);
  572. free:
  573. af_alg_free_sg(&ctx->rsgl);
  574. if (err)
  575. goto unlock;
  576. copied += used;
  577. skcipher_pull_sgl(sk, used, 1);
  578. iov_iter_advance(&msg->msg_iter, used);
  579. }
  580. err = 0;
  581. unlock:
  582. skcipher_wmem_wakeup(sk);
  583. release_sock(sk);
  584. return copied ?: err;
  585. }
  586. static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
  587. size_t ignored, int flags)
  588. {
  589. return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
  590. skcipher_recvmsg_async(sock, msg, flags) :
  591. skcipher_recvmsg_sync(sock, msg, flags);
  592. }
  593. static unsigned int skcipher_poll(struct file *file, struct socket *sock,
  594. poll_table *wait)
  595. {
  596. struct sock *sk = sock->sk;
  597. struct alg_sock *ask = alg_sk(sk);
  598. struct skcipher_ctx *ctx = ask->private;
  599. unsigned int mask;
  600. sock_poll_wait(file, sk_sleep(sk), wait);
  601. mask = 0;
  602. if (ctx->used)
  603. mask |= POLLIN | POLLRDNORM;
  604. if (skcipher_writable(sk))
  605. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  606. return mask;
  607. }
  608. static struct proto_ops algif_skcipher_ops = {
  609. .family = PF_ALG,
  610. .connect = sock_no_connect,
  611. .socketpair = sock_no_socketpair,
  612. .getname = sock_no_getname,
  613. .ioctl = sock_no_ioctl,
  614. .listen = sock_no_listen,
  615. .shutdown = sock_no_shutdown,
  616. .getsockopt = sock_no_getsockopt,
  617. .mmap = sock_no_mmap,
  618. .bind = sock_no_bind,
  619. .accept = sock_no_accept,
  620. .setsockopt = sock_no_setsockopt,
  621. .release = af_alg_release,
  622. .sendmsg = skcipher_sendmsg,
  623. .sendpage = skcipher_sendpage,
  624. .recvmsg = skcipher_recvmsg,
  625. .poll = skcipher_poll,
  626. };
  627. static int skcipher_check_key(struct socket *sock)
  628. {
  629. int err = 0;
  630. struct sock *psk;
  631. struct alg_sock *pask;
  632. struct skcipher_tfm *tfm;
  633. struct sock *sk = sock->sk;
  634. struct alg_sock *ask = alg_sk(sk);
  635. lock_sock(sk);
  636. if (ask->refcnt)
  637. goto unlock_child;
  638. psk = ask->parent;
  639. pask = alg_sk(ask->parent);
  640. tfm = pask->private;
  641. err = -ENOKEY;
  642. lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
  643. if (!tfm->has_key)
  644. goto unlock;
  645. if (!pask->refcnt++)
  646. sock_hold(psk);
  647. ask->refcnt = 1;
  648. sock_put(psk);
  649. err = 0;
  650. unlock:
  651. release_sock(psk);
  652. unlock_child:
  653. release_sock(sk);
  654. return err;
  655. }
  656. static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
  657. size_t size)
  658. {
  659. int err;
  660. err = skcipher_check_key(sock);
  661. if (err)
  662. return err;
  663. return skcipher_sendmsg(sock, msg, size);
  664. }
  665. static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
  666. int offset, size_t size, int flags)
  667. {
  668. int err;
  669. err = skcipher_check_key(sock);
  670. if (err)
  671. return err;
  672. return skcipher_sendpage(sock, page, offset, size, flags);
  673. }
  674. static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
  675. size_t ignored, int flags)
  676. {
  677. int err;
  678. err = skcipher_check_key(sock);
  679. if (err)
  680. return err;
  681. return skcipher_recvmsg(sock, msg, ignored, flags);
  682. }
  683. static struct proto_ops algif_skcipher_ops_nokey = {
  684. .family = PF_ALG,
  685. .connect = sock_no_connect,
  686. .socketpair = sock_no_socketpair,
  687. .getname = sock_no_getname,
  688. .ioctl = sock_no_ioctl,
  689. .listen = sock_no_listen,
  690. .shutdown = sock_no_shutdown,
  691. .getsockopt = sock_no_getsockopt,
  692. .mmap = sock_no_mmap,
  693. .bind = sock_no_bind,
  694. .accept = sock_no_accept,
  695. .setsockopt = sock_no_setsockopt,
  696. .release = af_alg_release,
  697. .sendmsg = skcipher_sendmsg_nokey,
  698. .sendpage = skcipher_sendpage_nokey,
  699. .recvmsg = skcipher_recvmsg_nokey,
  700. .poll = skcipher_poll,
  701. };
  702. static void *skcipher_bind(const char *name, u32 type, u32 mask)
  703. {
  704. struct skcipher_tfm *tfm;
  705. struct crypto_skcipher *skcipher;
  706. tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
  707. if (!tfm)
  708. return ERR_PTR(-ENOMEM);
  709. skcipher = crypto_alloc_skcipher(name, type, mask);
  710. if (IS_ERR(skcipher)) {
  711. kfree(tfm);
  712. return ERR_CAST(skcipher);
  713. }
  714. tfm->skcipher = skcipher;
  715. return tfm;
  716. }
  717. static void skcipher_release(void *private)
  718. {
  719. struct skcipher_tfm *tfm = private;
  720. crypto_free_skcipher(tfm->skcipher);
  721. kfree(tfm);
  722. }
  723. static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
  724. {
  725. struct skcipher_tfm *tfm = private;
  726. int err;
  727. err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
  728. tfm->has_key = !err;
  729. return err;
  730. }
  731. static void skcipher_wait(struct sock *sk)
  732. {
  733. struct alg_sock *ask = alg_sk(sk);
  734. struct skcipher_ctx *ctx = ask->private;
  735. int ctr = 0;
  736. while (atomic_read(&ctx->inflight) && ctr++ < 100)
  737. msleep(100);
  738. }
  739. static void skcipher_sock_destruct(struct sock *sk)
  740. {
  741. struct alg_sock *ask = alg_sk(sk);
  742. struct skcipher_ctx *ctx = ask->private;
  743. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
  744. if (atomic_read(&ctx->inflight))
  745. skcipher_wait(sk);
  746. skcipher_free_sgl(sk);
  747. sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
  748. sock_kfree_s(sk, ctx, ctx->len);
  749. af_alg_release_parent(sk);
  750. }
  751. static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
  752. {
  753. struct skcipher_ctx *ctx;
  754. struct alg_sock *ask = alg_sk(sk);
  755. struct skcipher_tfm *tfm = private;
  756. struct crypto_skcipher *skcipher = tfm->skcipher;
  757. unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
  758. ctx = sock_kmalloc(sk, len, GFP_KERNEL);
  759. if (!ctx)
  760. return -ENOMEM;
  761. ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
  762. GFP_KERNEL);
  763. if (!ctx->iv) {
  764. sock_kfree_s(sk, ctx, len);
  765. return -ENOMEM;
  766. }
  767. memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
  768. INIT_LIST_HEAD(&ctx->tsgl);
  769. ctx->len = len;
  770. ctx->used = 0;
  771. ctx->more = 0;
  772. ctx->merge = 0;
  773. ctx->enc = 0;
  774. atomic_set(&ctx->inflight, 0);
  775. af_alg_init_completion(&ctx->completion);
  776. ask->private = ctx;
  777. skcipher_request_set_tfm(&ctx->req, skcipher);
  778. skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  779. CRYPTO_TFM_REQ_MAY_BACKLOG,
  780. af_alg_complete, &ctx->completion);
  781. sk->sk_destruct = skcipher_sock_destruct;
  782. return 0;
  783. }
  784. static int skcipher_accept_parent(void *private, struct sock *sk)
  785. {
  786. struct skcipher_tfm *tfm = private;
  787. if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
  788. return -ENOKEY;
  789. return skcipher_accept_parent_nokey(private, sk);
  790. }
  791. static const struct af_alg_type algif_type_skcipher = {
  792. .bind = skcipher_bind,
  793. .release = skcipher_release,
  794. .setkey = skcipher_setkey,
  795. .accept = skcipher_accept_parent,
  796. .accept_nokey = skcipher_accept_parent_nokey,
  797. .ops = &algif_skcipher_ops,
  798. .ops_nokey = &algif_skcipher_ops_nokey,
  799. .name = "skcipher",
  800. .owner = THIS_MODULE
  801. };
  802. static int __init algif_skcipher_init(void)
  803. {
  804. return af_alg_register_type(&algif_type_skcipher);
  805. }
  806. static void __exit algif_skcipher_exit(void)
  807. {
  808. int err = af_alg_unregister_type(&algif_type_skcipher);
  809. BUG_ON(err);
  810. }
  811. module_init(algif_skcipher_init);
  812. module_exit(algif_skcipher_exit);
  813. MODULE_LICENSE("GPL");