chcr_core.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /**
  2. * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
  3. *
  4. * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. *
  10. * Written and Maintained by:
  11. * Manoj Malviya (manojmalviya@chelsio.com)
  12. * Atul Gupta (atul.gupta@chelsio.com)
  13. * Jitendra Lulla (jlulla@chelsio.com)
  14. * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  15. * Harsh Jain (harsh@chelsio.com)
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/skbuff.h>
  20. #include <crypto/aes.h>
  21. #include <crypto/hash.h>
  22. #include "t4_msg.h"
  23. #include "chcr_core.h"
  24. #include "cxgb4_uld.h"
  25. static LIST_HEAD(uld_ctx_list);
  26. static DEFINE_MUTEX(dev_mutex);
  27. static atomic_t dev_count;
  28. static struct uld_ctx *ctx_rr;
  29. typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
  30. static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
  31. static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
  32. static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
  33. static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
  34. [CPL_FW6_PLD] = cpl_fw6_pld_handler,
  35. };
  36. static struct cxgb4_uld_info chcr_uld_info = {
  37. .name = DRV_MODULE_NAME,
  38. .nrxq = MAX_ULD_QSETS,
  39. .ntxq = MAX_ULD_QSETS,
  40. .rxq_size = 1024,
  41. .add = chcr_uld_add,
  42. .state_change = chcr_uld_state_change,
  43. .rx_handler = chcr_uld_rx_handler,
  44. };
  45. struct uld_ctx *assign_chcr_device(void)
  46. {
  47. struct uld_ctx *u_ctx = NULL;
  48. /*
  49. * When multiple devices are present in system select
  50. * device in round-robin fashion for crypto operations
  51. * Although One session must use the same device to
  52. * maintain request-response ordering.
  53. */
  54. mutex_lock(&dev_mutex);
  55. if (!list_empty(&uld_ctx_list)) {
  56. u_ctx = ctx_rr;
  57. if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
  58. ctx_rr = list_first_entry(&uld_ctx_list,
  59. struct uld_ctx,
  60. entry);
  61. else
  62. ctx_rr = list_next_entry(ctx_rr, entry);
  63. }
  64. mutex_unlock(&dev_mutex);
  65. return u_ctx;
  66. }
  67. static int chcr_dev_add(struct uld_ctx *u_ctx)
  68. {
  69. struct chcr_dev *dev;
  70. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  71. if (!dev)
  72. return -ENXIO;
  73. spin_lock_init(&dev->lock_chcr_dev);
  74. u_ctx->dev = dev;
  75. dev->u_ctx = u_ctx;
  76. atomic_inc(&dev_count);
  77. mutex_lock(&dev_mutex);
  78. list_add_tail(&u_ctx->entry, &uld_ctx_list);
  79. if (!ctx_rr)
  80. ctx_rr = u_ctx;
  81. mutex_unlock(&dev_mutex);
  82. return 0;
  83. }
  84. static int chcr_dev_remove(struct uld_ctx *u_ctx)
  85. {
  86. if (ctx_rr == u_ctx) {
  87. if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
  88. ctx_rr = list_first_entry(&uld_ctx_list,
  89. struct uld_ctx,
  90. entry);
  91. else
  92. ctx_rr = list_next_entry(ctx_rr, entry);
  93. }
  94. list_del(&u_ctx->entry);
  95. if (list_empty(&uld_ctx_list))
  96. ctx_rr = NULL;
  97. kfree(u_ctx->dev);
  98. u_ctx->dev = NULL;
  99. atomic_dec(&dev_count);
  100. return 0;
  101. }
  102. static int cpl_fw6_pld_handler(struct chcr_dev *dev,
  103. unsigned char *input)
  104. {
  105. struct crypto_async_request *req;
  106. struct cpl_fw6_pld *fw6_pld;
  107. u32 ack_err_status = 0;
  108. int error_status = 0;
  109. struct adapter *adap = padap(dev);
  110. fw6_pld = (struct cpl_fw6_pld *)input;
  111. req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
  112. fw6_pld->data[1]);
  113. ack_err_status =
  114. ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
  115. if (ack_err_status) {
  116. if (CHK_MAC_ERR_BIT(ack_err_status) ||
  117. CHK_PAD_ERR_BIT(ack_err_status))
  118. error_status = -EBADMSG;
  119. atomic_inc(&adap->chcr_stats.error);
  120. }
  121. /* call completion callback with failure status */
  122. if (req) {
  123. error_status = chcr_handle_resp(req, input, error_status);
  124. } else {
  125. pr_err("Incorrect request address from the firmware\n");
  126. return -EFAULT;
  127. }
  128. return 0;
  129. }
  130. int chcr_send_wr(struct sk_buff *skb)
  131. {
  132. return cxgb4_crypto_send(skb->dev, skb);
  133. }
  134. static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
  135. {
  136. struct uld_ctx *u_ctx;
  137. /* Create the device and add it in the device list */
  138. u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
  139. if (!u_ctx) {
  140. u_ctx = ERR_PTR(-ENOMEM);
  141. goto out;
  142. }
  143. if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
  144. u_ctx = ERR_PTR(-ENOMEM);
  145. goto out;
  146. }
  147. u_ctx->lldi = *lld;
  148. out:
  149. return u_ctx;
  150. }
  151. int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
  152. const struct pkt_gl *pgl)
  153. {
  154. struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
  155. struct chcr_dev *dev = u_ctx->dev;
  156. const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
  157. if (rpl->opcode != CPL_FW6_PLD) {
  158. pr_err("Unsupported opcode\n");
  159. return 0;
  160. }
  161. if (!pgl)
  162. work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
  163. else
  164. work_handlers[rpl->opcode](dev, pgl->va);
  165. return 0;
  166. }
  167. static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
  168. {
  169. struct uld_ctx *u_ctx = handle;
  170. int ret = 0;
  171. switch (state) {
  172. case CXGB4_STATE_UP:
  173. if (!u_ctx->dev) {
  174. ret = chcr_dev_add(u_ctx);
  175. if (ret != 0)
  176. return ret;
  177. }
  178. if (atomic_read(&dev_count) == 1)
  179. ret = start_crypto();
  180. break;
  181. case CXGB4_STATE_DETACH:
  182. if (u_ctx->dev) {
  183. mutex_lock(&dev_mutex);
  184. chcr_dev_remove(u_ctx);
  185. mutex_unlock(&dev_mutex);
  186. }
  187. if (!atomic_read(&dev_count))
  188. stop_crypto();
  189. break;
  190. case CXGB4_STATE_START_RECOVERY:
  191. case CXGB4_STATE_DOWN:
  192. default:
  193. break;
  194. }
  195. return ret;
  196. }
  197. static int __init chcr_crypto_init(void)
  198. {
  199. if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
  200. pr_err("ULD register fail: No chcr crypto support in cxgb4");
  201. return 0;
  202. }
  203. static void __exit chcr_crypto_exit(void)
  204. {
  205. struct uld_ctx *u_ctx, *tmp;
  206. if (atomic_read(&dev_count))
  207. stop_crypto();
  208. /* Remove all devices from list */
  209. mutex_lock(&dev_mutex);
  210. list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
  211. if (u_ctx->dev)
  212. chcr_dev_remove(u_ctx);
  213. kfree(u_ctx);
  214. }
  215. mutex_unlock(&dev_mutex);
  216. cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
  217. }
  218. module_init(chcr_crypto_init);
  219. module_exit(chcr_crypto_exit);
  220. MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
  221. MODULE_LICENSE("GPL");
  222. MODULE_AUTHOR("Chelsio Communications");
  223. MODULE_VERSION(DRV_VERSION);