omap-crypto.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * OMAP Crypto driver common support routines.
  3. *
  4. * Copyright (c) 2017 Texas Instruments Incorporated
  5. * Tero Kristo <t-kristo@ti.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as published
  9. * by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/scatterlist.h>
  14. #include <crypto/scatterwalk.h>
  15. #include "omap-crypto.h"
  16. static int omap_crypto_copy_sg_lists(int total, int bs,
  17. struct scatterlist **sg,
  18. struct scatterlist *new_sg, u16 flags)
  19. {
  20. int n = sg_nents(*sg);
  21. struct scatterlist *tmp;
  22. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
  23. new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  24. if (!new_sg)
  25. return -ENOMEM;
  26. sg_init_table(new_sg, n);
  27. }
  28. tmp = new_sg;
  29. while (*sg && total) {
  30. int len = (*sg)->length;
  31. if (total < len)
  32. len = total;
  33. if (len > 0) {
  34. total -= len;
  35. sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
  36. if (total <= 0)
  37. sg_mark_end(tmp);
  38. tmp = sg_next(tmp);
  39. }
  40. *sg = sg_next(*sg);
  41. }
  42. *sg = new_sg;
  43. return 0;
  44. }
  45. static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
  46. struct scatterlist *new_sg, u16 flags)
  47. {
  48. void *buf;
  49. int pages;
  50. int new_len;
  51. new_len = ALIGN(total, bs);
  52. pages = get_order(new_len);
  53. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  54. if (!buf) {
  55. pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
  56. __func__);
  57. return -ENOMEM;
  58. }
  59. if (flags & OMAP_CRYPTO_COPY_DATA) {
  60. scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
  61. if (flags & OMAP_CRYPTO_ZERO_BUF)
  62. memset(buf + total, 0, new_len - total);
  63. }
  64. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
  65. sg_init_table(new_sg, 1);
  66. sg_set_buf(new_sg, buf, new_len);
  67. *sg = new_sg;
  68. return 0;
  69. }
  70. static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
  71. u16 flags)
  72. {
  73. int len = 0;
  74. int num_sg = 0;
  75. if (!IS_ALIGNED(total, bs))
  76. return OMAP_CRYPTO_NOT_ALIGNED;
  77. while (sg) {
  78. num_sg++;
  79. if (!IS_ALIGNED(sg->offset, 4))
  80. return OMAP_CRYPTO_NOT_ALIGNED;
  81. if (!IS_ALIGNED(sg->length, bs))
  82. return OMAP_CRYPTO_NOT_ALIGNED;
  83. len += sg->length;
  84. sg = sg_next(sg);
  85. if (len >= total)
  86. break;
  87. }
  88. if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
  89. return OMAP_CRYPTO_NOT_ALIGNED;
  90. if (len != total)
  91. return OMAP_CRYPTO_BAD_DATA_LENGTH;
  92. return 0;
  93. }
  94. int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
  95. struct scatterlist *new_sg, u16 flags,
  96. u8 flags_shift, unsigned long *dd_flags)
  97. {
  98. int ret;
  99. *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
  100. if (flags & OMAP_CRYPTO_FORCE_COPY)
  101. ret = OMAP_CRYPTO_NOT_ALIGNED;
  102. else
  103. ret = omap_crypto_check_sg(*sg, total, bs, flags);
  104. if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
  105. ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
  106. if (ret)
  107. return ret;
  108. *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
  109. } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
  110. ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
  111. if (ret)
  112. return ret;
  113. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
  114. *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
  115. } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
  116. sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
  117. }
  118. return 0;
  119. }
  120. EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
  121. void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
  122. int offset, int len, u8 flags_shift,
  123. unsigned long flags)
  124. {
  125. void *buf;
  126. int pages;
  127. flags >>= flags_shift;
  128. flags &= OMAP_CRYPTO_COPY_MASK;
  129. if (!flags)
  130. return;
  131. buf = sg_virt(sg);
  132. pages = get_order(len);
  133. if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
  134. scatterwalk_map_and_copy(buf, orig, offset, len, 1);
  135. if (flags & OMAP_CRYPTO_DATA_COPIED)
  136. free_pages((unsigned long)buf, pages);
  137. else if (flags & OMAP_CRYPTO_SG_COPIED)
  138. kfree(sg);
  139. }
  140. EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
  141. MODULE_DESCRIPTION("OMAP crypto support library.");
  142. MODULE_LICENSE("GPL v2");
  143. MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");