usnic_ib_qp_grp.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. /*
  2. * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/bug.h>
  34. #include <linux/errno.h>
  35. #include <linux/module.h>
  36. #include <linux/spinlock.h>
  37. #include "usnic_log.h"
  38. #include "usnic_vnic.h"
  39. #include "usnic_fwd.h"
  40. #include "usnic_uiom.h"
  41. #include "usnic_debugfs.h"
  42. #include "usnic_ib_qp_grp.h"
  43. #include "usnic_ib_sysfs.h"
  44. #include "usnic_transport.h"
  45. #define DFLT_RQ_IDX 0
  46. const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
  47. {
  48. switch (state) {
  49. case IB_QPS_RESET:
  50. return "Rst";
  51. case IB_QPS_INIT:
  52. return "Init";
  53. case IB_QPS_RTR:
  54. return "RTR";
  55. case IB_QPS_RTS:
  56. return "RTS";
  57. case IB_QPS_SQD:
  58. return "SQD";
  59. case IB_QPS_SQE:
  60. return "SQE";
  61. case IB_QPS_ERR:
  62. return "ERR";
  63. default:
  64. return "UNKNOWN STATE";
  65. }
  66. }
  67. int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
  68. {
  69. return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
  70. }
  71. int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
  72. {
  73. struct usnic_ib_qp_grp *qp_grp = obj;
  74. struct usnic_ib_qp_grp_flow *default_flow;
  75. if (obj) {
  76. default_flow = list_first_entry(&qp_grp->flows_lst,
  77. struct usnic_ib_qp_grp_flow, link);
  78. return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
  79. qp_grp->ibqp.qp_num,
  80. usnic_ib_qp_grp_state_to_string(
  81. qp_grp->state),
  82. qp_grp->owner_pid,
  83. usnic_vnic_get_index(qp_grp->vf->vnic),
  84. default_flow->flow->flow_id);
  85. } else {
  86. return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
  87. }
  88. }
  89. static struct usnic_vnic_res_chunk *
  90. get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
  91. {
  92. lockdep_assert_held(&qp_grp->lock);
  93. /*
  94. * The QP res chunk, used to derive qp indices,
  95. * are just indices of the RQs
  96. */
  97. return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  98. }
  99. static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  100. {
  101. int status;
  102. int i, vnic_idx;
  103. struct usnic_vnic_res_chunk *res_chunk;
  104. struct usnic_vnic_res *res;
  105. lockdep_assert_held(&qp_grp->lock);
  106. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  107. res_chunk = get_qp_res_chunk(qp_grp);
  108. if (IS_ERR_OR_NULL(res_chunk)) {
  109. usnic_err("Unable to get qp res with err %ld\n",
  110. PTR_ERR(res_chunk));
  111. return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
  112. }
  113. for (i = 0; i < res_chunk->cnt; i++) {
  114. res = res_chunk->res[i];
  115. status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
  116. res->vnic_idx);
  117. if (status) {
  118. usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
  119. res->vnic_idx, qp_grp->ufdev->name,
  120. vnic_idx, status);
  121. goto out_err;
  122. }
  123. }
  124. return 0;
  125. out_err:
  126. for (i--; i >= 0; i--) {
  127. res = res_chunk->res[i];
  128. usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  129. res->vnic_idx);
  130. }
  131. return status;
  132. }
  133. static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  134. {
  135. int i, vnic_idx;
  136. struct usnic_vnic_res_chunk *res_chunk;
  137. struct usnic_vnic_res *res;
  138. int status = 0;
  139. lockdep_assert_held(&qp_grp->lock);
  140. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  141. res_chunk = get_qp_res_chunk(qp_grp);
  142. if (IS_ERR_OR_NULL(res_chunk)) {
  143. usnic_err("Unable to get qp res with err %ld\n",
  144. PTR_ERR(res_chunk));
  145. return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
  146. }
  147. for (i = 0; i < res_chunk->cnt; i++) {
  148. res = res_chunk->res[i];
  149. status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  150. res->vnic_idx);
  151. if (status) {
  152. usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
  153. res->vnic_idx,
  154. qp_grp->ufdev->name,
  155. vnic_idx, status);
  156. }
  157. }
  158. return status;
  159. }
  160. static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
  161. struct usnic_filter_action *uaction)
  162. {
  163. struct usnic_vnic_res_chunk *res_chunk;
  164. res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  165. if (IS_ERR_OR_NULL(res_chunk)) {
  166. usnic_err("Unable to get %s with err %ld\n",
  167. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
  168. PTR_ERR(res_chunk));
  169. return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
  170. }
  171. uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  172. uaction->action.type = FILTER_ACTION_RQ_STEERING;
  173. uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
  174. return 0;
  175. }
  176. static struct usnic_ib_qp_grp_flow*
  177. create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
  178. struct usnic_transport_spec *trans_spec)
  179. {
  180. uint16_t port_num;
  181. int err;
  182. struct filter filter;
  183. struct usnic_filter_action uaction;
  184. struct usnic_ib_qp_grp_flow *qp_flow;
  185. struct usnic_fwd_flow *flow;
  186. enum usnic_transport_type trans_type;
  187. trans_type = trans_spec->trans_type;
  188. port_num = trans_spec->usnic_roce.port_num;
  189. /* Reserve Port */
  190. port_num = usnic_transport_rsrv_port(trans_type, port_num);
  191. if (port_num == 0)
  192. return ERR_PTR(-EINVAL);
  193. /* Create Flow */
  194. usnic_fwd_init_usnic_filter(&filter, port_num);
  195. err = init_filter_action(qp_grp, &uaction);
  196. if (err)
  197. goto out_unreserve_port;
  198. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  199. if (IS_ERR_OR_NULL(flow)) {
  200. usnic_err("Unable to alloc flow failed with err %ld\n",
  201. PTR_ERR(flow));
  202. err = flow ? PTR_ERR(flow) : -EFAULT;
  203. goto out_unreserve_port;
  204. }
  205. /* Create Flow Handle */
  206. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  207. if (!qp_flow) {
  208. err = -ENOMEM;
  209. goto out_dealloc_flow;
  210. }
  211. qp_flow->flow = flow;
  212. qp_flow->trans_type = trans_type;
  213. qp_flow->usnic_roce.port_num = port_num;
  214. qp_flow->qp_grp = qp_grp;
  215. return qp_flow;
  216. out_dealloc_flow:
  217. usnic_fwd_dealloc_flow(flow);
  218. out_unreserve_port:
  219. usnic_transport_unrsrv_port(trans_type, port_num);
  220. return ERR_PTR(err);
  221. }
  222. static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  223. {
  224. usnic_fwd_dealloc_flow(qp_flow->flow);
  225. usnic_transport_unrsrv_port(qp_flow->trans_type,
  226. qp_flow->usnic_roce.port_num);
  227. kfree(qp_flow);
  228. }
  229. static struct usnic_ib_qp_grp_flow*
  230. create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
  231. struct usnic_transport_spec *trans_spec)
  232. {
  233. struct socket *sock;
  234. int sock_fd;
  235. int err;
  236. struct filter filter;
  237. struct usnic_filter_action uaction;
  238. struct usnic_ib_qp_grp_flow *qp_flow;
  239. struct usnic_fwd_flow *flow;
  240. enum usnic_transport_type trans_type;
  241. uint32_t addr;
  242. uint16_t port_num;
  243. int proto;
  244. trans_type = trans_spec->trans_type;
  245. sock_fd = trans_spec->udp.sock_fd;
  246. /* Get and check socket */
  247. sock = usnic_transport_get_socket(sock_fd);
  248. if (IS_ERR_OR_NULL(sock))
  249. return ERR_CAST(sock);
  250. err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
  251. if (err)
  252. goto out_put_sock;
  253. if (proto != IPPROTO_UDP) {
  254. usnic_err("Protocol for fd %d is not UDP", sock_fd);
  255. err = -EPERM;
  256. goto out_put_sock;
  257. }
  258. /* Create flow */
  259. usnic_fwd_init_udp_filter(&filter, addr, port_num);
  260. err = init_filter_action(qp_grp, &uaction);
  261. if (err)
  262. goto out_put_sock;
  263. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  264. if (IS_ERR_OR_NULL(flow)) {
  265. usnic_err("Unable to alloc flow failed with err %ld\n",
  266. PTR_ERR(flow));
  267. err = flow ? PTR_ERR(flow) : -EFAULT;
  268. goto out_put_sock;
  269. }
  270. /* Create qp_flow */
  271. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  272. if (!qp_flow) {
  273. err = -ENOMEM;
  274. goto out_dealloc_flow;
  275. }
  276. qp_flow->flow = flow;
  277. qp_flow->trans_type = trans_type;
  278. qp_flow->udp.sock = sock;
  279. qp_flow->qp_grp = qp_grp;
  280. return qp_flow;
  281. out_dealloc_flow:
  282. usnic_fwd_dealloc_flow(flow);
  283. out_put_sock:
  284. usnic_transport_put_socket(sock);
  285. return ERR_PTR(err);
  286. }
  287. static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  288. {
  289. usnic_fwd_dealloc_flow(qp_flow->flow);
  290. usnic_transport_put_socket(qp_flow->udp.sock);
  291. kfree(qp_flow);
  292. }
  293. static struct usnic_ib_qp_grp_flow*
  294. create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
  295. struct usnic_transport_spec *trans_spec)
  296. {
  297. struct usnic_ib_qp_grp_flow *qp_flow;
  298. enum usnic_transport_type trans_type;
  299. trans_type = trans_spec->trans_type;
  300. switch (trans_type) {
  301. case USNIC_TRANSPORT_ROCE_CUSTOM:
  302. qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
  303. break;
  304. case USNIC_TRANSPORT_IPV4_UDP:
  305. qp_flow = create_udp_flow(qp_grp, trans_spec);
  306. break;
  307. default:
  308. usnic_err("Unsupported transport %u\n",
  309. trans_spec->trans_type);
  310. return ERR_PTR(-EINVAL);
  311. }
  312. if (!IS_ERR_OR_NULL(qp_flow)) {
  313. list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
  314. usnic_debugfs_flow_add(qp_flow);
  315. }
  316. return qp_flow;
  317. }
  318. static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  319. {
  320. usnic_debugfs_flow_remove(qp_flow);
  321. list_del(&qp_flow->link);
  322. switch (qp_flow->trans_type) {
  323. case USNIC_TRANSPORT_ROCE_CUSTOM:
  324. release_roce_custom_flow(qp_flow);
  325. break;
  326. case USNIC_TRANSPORT_IPV4_UDP:
  327. release_udp_flow(qp_flow);
  328. break;
  329. default:
  330. WARN(1, "Unsupported transport %u\n",
  331. qp_flow->trans_type);
  332. break;
  333. }
  334. }
  335. static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
  336. {
  337. struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
  338. list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
  339. release_and_remove_flow(qp_flow);
  340. }
  341. int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
  342. enum ib_qp_state new_state,
  343. void *data)
  344. {
  345. int status = 0;
  346. int vnic_idx;
  347. struct ib_event ib_event;
  348. enum ib_qp_state old_state;
  349. struct usnic_transport_spec *trans_spec;
  350. struct usnic_ib_qp_grp_flow *qp_flow;
  351. old_state = qp_grp->state;
  352. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  353. trans_spec = (struct usnic_transport_spec *) data;
  354. spin_lock(&qp_grp->lock);
  355. switch (new_state) {
  356. case IB_QPS_RESET:
  357. switch (old_state) {
  358. case IB_QPS_RESET:
  359. /* NO-OP */
  360. break;
  361. case IB_QPS_INIT:
  362. release_and_remove_all_flows(qp_grp);
  363. status = 0;
  364. break;
  365. case IB_QPS_RTR:
  366. case IB_QPS_RTS:
  367. case IB_QPS_ERR:
  368. status = disable_qp_grp(qp_grp);
  369. release_and_remove_all_flows(qp_grp);
  370. break;
  371. default:
  372. status = -EINVAL;
  373. }
  374. break;
  375. case IB_QPS_INIT:
  376. switch (old_state) {
  377. case IB_QPS_RESET:
  378. if (trans_spec) {
  379. qp_flow = create_and_add_flow(qp_grp,
  380. trans_spec);
  381. if (IS_ERR_OR_NULL(qp_flow)) {
  382. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  383. break;
  384. }
  385. } else {
  386. /*
  387. * Optional to specify filters.
  388. */
  389. status = 0;
  390. }
  391. break;
  392. case IB_QPS_INIT:
  393. if (trans_spec) {
  394. qp_flow = create_and_add_flow(qp_grp,
  395. trans_spec);
  396. if (IS_ERR_OR_NULL(qp_flow)) {
  397. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  398. break;
  399. }
  400. } else {
  401. /*
  402. * Doesn't make sense to go into INIT state
  403. * from INIT state w/o adding filters.
  404. */
  405. status = -EINVAL;
  406. }
  407. break;
  408. case IB_QPS_RTR:
  409. status = disable_qp_grp(qp_grp);
  410. break;
  411. case IB_QPS_RTS:
  412. status = disable_qp_grp(qp_grp);
  413. break;
  414. default:
  415. status = -EINVAL;
  416. }
  417. break;
  418. case IB_QPS_RTR:
  419. switch (old_state) {
  420. case IB_QPS_INIT:
  421. status = enable_qp_grp(qp_grp);
  422. break;
  423. default:
  424. status = -EINVAL;
  425. }
  426. break;
  427. case IB_QPS_RTS:
  428. switch (old_state) {
  429. case IB_QPS_RTR:
  430. /* NO-OP FOR NOW */
  431. break;
  432. default:
  433. status = -EINVAL;
  434. }
  435. break;
  436. case IB_QPS_ERR:
  437. ib_event.device = &qp_grp->vf->pf->ib_dev;
  438. ib_event.element.qp = &qp_grp->ibqp;
  439. ib_event.event = IB_EVENT_QP_FATAL;
  440. switch (old_state) {
  441. case IB_QPS_RESET:
  442. qp_grp->ibqp.event_handler(&ib_event,
  443. qp_grp->ibqp.qp_context);
  444. break;
  445. case IB_QPS_INIT:
  446. release_and_remove_all_flows(qp_grp);
  447. qp_grp->ibqp.event_handler(&ib_event,
  448. qp_grp->ibqp.qp_context);
  449. break;
  450. case IB_QPS_RTR:
  451. case IB_QPS_RTS:
  452. status = disable_qp_grp(qp_grp);
  453. release_and_remove_all_flows(qp_grp);
  454. qp_grp->ibqp.event_handler(&ib_event,
  455. qp_grp->ibqp.qp_context);
  456. break;
  457. default:
  458. status = -EINVAL;
  459. }
  460. break;
  461. default:
  462. status = -EINVAL;
  463. }
  464. spin_unlock(&qp_grp->lock);
  465. if (!status) {
  466. qp_grp->state = new_state;
  467. usnic_info("Transitioned %u from %s to %s",
  468. qp_grp->grp_id,
  469. usnic_ib_qp_grp_state_to_string(old_state),
  470. usnic_ib_qp_grp_state_to_string(new_state));
  471. } else {
  472. usnic_err("Failed to transition %u from %s to %s",
  473. qp_grp->grp_id,
  474. usnic_ib_qp_grp_state_to_string(old_state),
  475. usnic_ib_qp_grp_state_to_string(new_state));
  476. }
  477. return status;
  478. }
  479. static struct usnic_vnic_res_chunk**
  480. alloc_res_chunk_list(struct usnic_vnic *vnic,
  481. struct usnic_vnic_res_spec *res_spec, void *owner_obj)
  482. {
  483. enum usnic_vnic_res_type res_type;
  484. struct usnic_vnic_res_chunk **res_chunk_list;
  485. int err, i, res_cnt, res_lst_sz;
  486. for (res_lst_sz = 0;
  487. res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
  488. res_lst_sz++) {
  489. /* Do Nothing */
  490. }
  491. res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
  492. GFP_ATOMIC);
  493. if (!res_chunk_list)
  494. return ERR_PTR(-ENOMEM);
  495. for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
  496. i++) {
  497. res_type = res_spec->resources[i].type;
  498. res_cnt = res_spec->resources[i].cnt;
  499. res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
  500. res_cnt, owner_obj);
  501. if (IS_ERR_OR_NULL(res_chunk_list[i])) {
  502. err = res_chunk_list[i] ?
  503. PTR_ERR(res_chunk_list[i]) : -ENOMEM;
  504. usnic_err("Failed to get %s from %s with err %d\n",
  505. usnic_vnic_res_type_to_str(res_type),
  506. usnic_vnic_pci_name(vnic),
  507. err);
  508. goto out_free_res;
  509. }
  510. }
  511. return res_chunk_list;
  512. out_free_res:
  513. for (i--; i >= 0; i--)
  514. usnic_vnic_put_resources(res_chunk_list[i]);
  515. kfree(res_chunk_list);
  516. return ERR_PTR(err);
  517. }
  518. static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
  519. {
  520. int i;
  521. for (i = 0; res_chunk_list[i]; i++)
  522. usnic_vnic_put_resources(res_chunk_list[i]);
  523. kfree(res_chunk_list);
  524. }
  525. static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
  526. struct usnic_ib_pd *pd,
  527. struct usnic_ib_qp_grp *qp_grp)
  528. {
  529. int err;
  530. struct pci_dev *pdev;
  531. lockdep_assert_held(&vf->lock);
  532. pdev = usnic_vnic_get_pdev(vf->vnic);
  533. if (vf->qp_grp_ref_cnt == 0) {
  534. err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
  535. if (err) {
  536. usnic_err("Failed to attach %s to domain\n",
  537. pci_name(pdev));
  538. return err;
  539. }
  540. vf->pd = pd;
  541. }
  542. vf->qp_grp_ref_cnt++;
  543. WARN_ON(vf->pd != pd);
  544. qp_grp->vf = vf;
  545. return 0;
  546. }
  547. static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
  548. {
  549. struct pci_dev *pdev;
  550. struct usnic_ib_pd *pd;
  551. lockdep_assert_held(&qp_grp->vf->lock);
  552. pd = qp_grp->vf->pd;
  553. pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
  554. if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
  555. qp_grp->vf->pd = NULL;
  556. usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
  557. }
  558. qp_grp->vf = NULL;
  559. }
  560. static void log_spec(struct usnic_vnic_res_spec *res_spec)
  561. {
  562. char buf[512];
  563. usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
  564. usnic_dbg("%s\n", buf);
  565. }
  566. static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
  567. uint32_t *id)
  568. {
  569. enum usnic_transport_type trans_type = qp_flow->trans_type;
  570. int err;
  571. uint16_t port_num = 0;
  572. switch (trans_type) {
  573. case USNIC_TRANSPORT_ROCE_CUSTOM:
  574. *id = qp_flow->usnic_roce.port_num;
  575. break;
  576. case USNIC_TRANSPORT_IPV4_UDP:
  577. err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
  578. NULL, NULL,
  579. &port_num);
  580. if (err)
  581. return err;
  582. /*
  583. * Copy port_num to stack first and then to *id,
  584. * so that the short to int cast works for little
  585. * and big endian systems.
  586. */
  587. *id = port_num;
  588. break;
  589. default:
  590. usnic_err("Unsupported transport %u\n", trans_type);
  591. return -EINVAL;
  592. }
  593. return 0;
  594. }
  595. struct usnic_ib_qp_grp *
  596. usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
  597. struct usnic_ib_pd *pd,
  598. struct usnic_vnic_res_spec *res_spec,
  599. struct usnic_transport_spec *transport_spec)
  600. {
  601. struct usnic_ib_qp_grp *qp_grp;
  602. int err;
  603. enum usnic_transport_type transport = transport_spec->trans_type;
  604. struct usnic_ib_qp_grp_flow *qp_flow;
  605. lockdep_assert_held(&vf->lock);
  606. err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
  607. res_spec);
  608. if (err) {
  609. usnic_err("Spec does not meet miniumum req for transport %d\n",
  610. transport);
  611. log_spec(res_spec);
  612. return ERR_PTR(err);
  613. }
  614. qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
  615. if (!qp_grp) {
  616. usnic_err("Unable to alloc qp_grp - Out of memory\n");
  617. return NULL;
  618. }
  619. qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
  620. qp_grp);
  621. if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
  622. err = qp_grp->res_chunk_list ?
  623. PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
  624. usnic_err("Unable to alloc res for %d with err %d\n",
  625. qp_grp->grp_id, err);
  626. goto out_free_qp_grp;
  627. }
  628. err = qp_grp_and_vf_bind(vf, pd, qp_grp);
  629. if (err)
  630. goto out_free_res;
  631. INIT_LIST_HEAD(&qp_grp->flows_lst);
  632. spin_lock_init(&qp_grp->lock);
  633. qp_grp->ufdev = ufdev;
  634. qp_grp->state = IB_QPS_RESET;
  635. qp_grp->owner_pid = current->pid;
  636. qp_flow = create_and_add_flow(qp_grp, transport_spec);
  637. if (IS_ERR_OR_NULL(qp_flow)) {
  638. usnic_err("Unable to create and add flow with err %ld\n",
  639. PTR_ERR(qp_flow));
  640. err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  641. goto out_qp_grp_vf_unbind;
  642. }
  643. err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
  644. if (err)
  645. goto out_release_flow;
  646. qp_grp->ibqp.qp_num = qp_grp->grp_id;
  647. usnic_ib_sysfs_qpn_add(qp_grp);
  648. return qp_grp;
  649. out_release_flow:
  650. release_and_remove_flow(qp_flow);
  651. out_qp_grp_vf_unbind:
  652. qp_grp_and_vf_unbind(qp_grp);
  653. out_free_res:
  654. free_qp_grp_res(qp_grp->res_chunk_list);
  655. out_free_qp_grp:
  656. kfree(qp_grp);
  657. return ERR_PTR(err);
  658. }
  659. void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
  660. {
  661. WARN_ON(qp_grp->state != IB_QPS_RESET);
  662. lockdep_assert_held(&qp_grp->vf->lock);
  663. release_and_remove_all_flows(qp_grp);
  664. usnic_ib_sysfs_qpn_remove(qp_grp);
  665. qp_grp_and_vf_unbind(qp_grp);
  666. free_qp_grp_res(qp_grp->res_chunk_list);
  667. kfree(qp_grp);
  668. }
  669. struct usnic_vnic_res_chunk*
  670. usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
  671. enum usnic_vnic_res_type res_type)
  672. {
  673. int i;
  674. for (i = 0; qp_grp->res_chunk_list[i]; i++) {
  675. if (qp_grp->res_chunk_list[i]->type == res_type)
  676. return qp_grp->res_chunk_list[i];
  677. }
  678. return ERR_PTR(-EINVAL);
  679. }