vt.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. /*
  2. * Copyright(c) 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/module.h>
  48. #include <linux/kernel.h>
  49. #include <linux/dma-mapping.h>
  50. #include "vt.h"
  51. #include "trace.h"
  52. #define RVT_UVERBS_ABI_VERSION 2
  53. MODULE_LICENSE("Dual BSD/GPL");
  54. MODULE_DESCRIPTION("RDMA Verbs Transport Library");
  55. static int rvt_init(void)
  56. {
  57. /*
  58. * rdmavt does not need to do anything special when it starts up. All it
  59. * needs to do is sit and wait until a driver attempts registration.
  60. */
  61. return 0;
  62. }
  63. module_init(rvt_init);
  64. static void rvt_cleanup(void)
  65. {
  66. /*
  67. * Nothing to do at exit time either. The module won't be able to be
  68. * removed until all drivers are gone which means all the dev structs
  69. * are gone so there is really nothing to do.
  70. */
  71. }
  72. module_exit(rvt_cleanup);
  73. /**
  74. * rvt_alloc_device - allocate rdi
  75. * @size: how big of a structure to allocate
  76. * @nports: number of ports to allocate array slots for
  77. *
  78. * Use IB core device alloc to allocate space for the rdi which is assumed to be
  79. * inside of the ib_device. Any extra space that drivers require should be
  80. * included in size.
  81. *
  82. * We also allocate a port array based on the number of ports.
  83. *
  84. * Return: pointer to allocated rdi
  85. */
  86. struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
  87. {
  88. struct rvt_dev_info *rdi = ERR_PTR(-ENOMEM);
  89. rdi = (struct rvt_dev_info *)ib_alloc_device(size);
  90. if (!rdi)
  91. return rdi;
  92. rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
  93. if (!rdi->ports)
  94. ib_dealloc_device(&rdi->ibdev);
  95. return rdi;
  96. }
  97. EXPORT_SYMBOL(rvt_alloc_device);
  98. /**
  99. * rvt_dealloc_device - deallocate rdi
  100. * @rdi: structure to free
  101. *
  102. * Free a structure allocated with rvt_alloc_device()
  103. */
  104. void rvt_dealloc_device(struct rvt_dev_info *rdi)
  105. {
  106. kfree(rdi->ports);
  107. ib_dealloc_device(&rdi->ibdev);
  108. }
  109. EXPORT_SYMBOL(rvt_dealloc_device);
  110. static int rvt_query_device(struct ib_device *ibdev,
  111. struct ib_device_attr *props,
  112. struct ib_udata *uhw)
  113. {
  114. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  115. if (uhw->inlen || uhw->outlen)
  116. return -EINVAL;
  117. /*
  118. * Return rvt_dev_info.dparms.props contents
  119. */
  120. *props = rdi->dparms.props;
  121. return 0;
  122. }
  123. static int rvt_modify_device(struct ib_device *device,
  124. int device_modify_mask,
  125. struct ib_device_modify *device_modify)
  126. {
  127. /*
  128. * There is currently no need to supply this based on qib and hfi1.
  129. * Future drivers may need to implement this though.
  130. */
  131. return -EOPNOTSUPP;
  132. }
  133. /**
  134. * rvt_query_port: Passes the query port call to the driver
  135. * @ibdev: Verbs IB dev
  136. * @port_num: port number, 1 based from ib core
  137. * @props: structure to hold returned properties
  138. *
  139. * Return: 0 on success
  140. */
  141. static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
  142. struct ib_port_attr *props)
  143. {
  144. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  145. struct rvt_ibport *rvp;
  146. int port_index = ibport_num_to_idx(ibdev, port_num);
  147. if (port_index < 0)
  148. return -EINVAL;
  149. rvp = rdi->ports[port_index];
  150. /* props being zeroed by the caller, avoid zeroing it here */
  151. props->sm_lid = rvp->sm_lid;
  152. props->sm_sl = rvp->sm_sl;
  153. props->port_cap_flags = rvp->port_cap_flags;
  154. props->max_msg_sz = 0x80000000;
  155. props->pkey_tbl_len = rvt_get_npkeys(rdi);
  156. props->bad_pkey_cntr = rvp->pkey_violations;
  157. props->qkey_viol_cntr = rvp->qkey_violations;
  158. props->subnet_timeout = rvp->subnet_timeout;
  159. props->init_type_reply = 0;
  160. /* Populate the remaining ib_port_attr elements */
  161. return rdi->driver_f.query_port_state(rdi, port_num, props);
  162. }
  163. /**
  164. * rvt_modify_port
  165. * @ibdev: Verbs IB dev
  166. * @port_num: Port number, 1 based from ib core
  167. * @port_modify_mask: How to change the port
  168. * @props: Structure to fill in
  169. *
  170. * Return: 0 on success
  171. */
  172. static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
  173. int port_modify_mask, struct ib_port_modify *props)
  174. {
  175. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  176. struct rvt_ibport *rvp;
  177. int ret = 0;
  178. int port_index = ibport_num_to_idx(ibdev, port_num);
  179. if (port_index < 0)
  180. return -EINVAL;
  181. rvp = rdi->ports[port_index];
  182. if (port_modify_mask & IB_PORT_OPA_MASK_CHG) {
  183. rvp->port_cap3_flags |= props->set_port_cap_mask;
  184. rvp->port_cap3_flags &= ~props->clr_port_cap_mask;
  185. } else {
  186. rvp->port_cap_flags |= props->set_port_cap_mask;
  187. rvp->port_cap_flags &= ~props->clr_port_cap_mask;
  188. }
  189. if (props->set_port_cap_mask || props->clr_port_cap_mask)
  190. rdi->driver_f.cap_mask_chg(rdi, port_num);
  191. if (port_modify_mask & IB_PORT_SHUTDOWN)
  192. ret = rdi->driver_f.shut_down_port(rdi, port_num);
  193. if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
  194. rvp->qkey_violations = 0;
  195. return ret;
  196. }
  197. /**
  198. * rvt_query_pkey - Return a pkey from the table at a given index
  199. * @ibdev: Verbs IB dev
  200. * @port_num: Port number, 1 based from ib core
  201. * @intex: Index into pkey table
  202. *
  203. * Return: 0 on failure pkey otherwise
  204. */
  205. static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
  206. u16 *pkey)
  207. {
  208. /*
  209. * Driver will be responsible for keeping rvt_dev_info.pkey_table up to
  210. * date. This function will just return that value. There is no need to
  211. * lock, if a stale value is read and sent to the user so be it there is
  212. * no way to protect against that anyway.
  213. */
  214. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  215. int port_index;
  216. port_index = ibport_num_to_idx(ibdev, port_num);
  217. if (port_index < 0)
  218. return -EINVAL;
  219. if (index >= rvt_get_npkeys(rdi))
  220. return -EINVAL;
  221. *pkey = rvt_get_pkey(rdi, port_index, index);
  222. return 0;
  223. }
  224. /**
  225. * rvt_query_gid - Return a gid from the table
  226. * @ibdev: Verbs IB dev
  227. * @port_num: Port number, 1 based from ib core
  228. * @index: = Index in table
  229. * @gid: Gid to return
  230. *
  231. * Return: 0 on success
  232. */
  233. static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
  234. int guid_index, union ib_gid *gid)
  235. {
  236. struct rvt_dev_info *rdi;
  237. struct rvt_ibport *rvp;
  238. int port_index;
  239. /*
  240. * Driver is responsible for updating the guid table. Which will be used
  241. * to craft the return value. This will work similar to how query_pkey()
  242. * is being done.
  243. */
  244. port_index = ibport_num_to_idx(ibdev, port_num);
  245. if (port_index < 0)
  246. return -EINVAL;
  247. rdi = ib_to_rvt(ibdev);
  248. rvp = rdi->ports[port_index];
  249. gid->global.subnet_prefix = rvp->gid_prefix;
  250. return rdi->driver_f.get_guid_be(rdi, rvp, guid_index,
  251. &gid->global.interface_id);
  252. }
  253. struct rvt_ucontext {
  254. struct ib_ucontext ibucontext;
  255. };
  256. static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
  257. *ibucontext)
  258. {
  259. return container_of(ibucontext, struct rvt_ucontext, ibucontext);
  260. }
  261. /**
  262. * rvt_alloc_ucontext - Allocate a user context
  263. * @ibdev: Vers IB dev
  264. * @data: User data allocated
  265. */
  266. static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
  267. struct ib_udata *udata)
  268. {
  269. struct rvt_ucontext *context;
  270. context = kmalloc(sizeof(*context), GFP_KERNEL);
  271. if (!context)
  272. return ERR_PTR(-ENOMEM);
  273. return &context->ibucontext;
  274. }
  275. /**
  276. *rvt_dealloc_ucontext - Free a user context
  277. *@context - Free this
  278. */
  279. static int rvt_dealloc_ucontext(struct ib_ucontext *context)
  280. {
  281. kfree(to_iucontext(context));
  282. return 0;
  283. }
  284. static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
  285. struct ib_port_immutable *immutable)
  286. {
  287. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  288. struct ib_port_attr attr;
  289. int err, port_index;
  290. port_index = ibport_num_to_idx(ibdev, port_num);
  291. if (port_index < 0)
  292. return -EINVAL;
  293. immutable->core_cap_flags = rdi->dparms.core_cap_flags;
  294. err = ib_query_port(ibdev, port_num, &attr);
  295. if (err)
  296. return err;
  297. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  298. immutable->gid_tbl_len = attr.gid_tbl_len;
  299. immutable->max_mad_size = rdi->dparms.max_mad_size;
  300. return 0;
  301. }
  302. enum {
  303. MISC,
  304. QUERY_DEVICE,
  305. MODIFY_DEVICE,
  306. QUERY_PORT,
  307. MODIFY_PORT,
  308. QUERY_PKEY,
  309. QUERY_GID,
  310. ALLOC_UCONTEXT,
  311. DEALLOC_UCONTEXT,
  312. GET_PORT_IMMUTABLE,
  313. CREATE_QP,
  314. MODIFY_QP,
  315. DESTROY_QP,
  316. QUERY_QP,
  317. POST_SEND,
  318. POST_RECV,
  319. POST_SRQ_RECV,
  320. CREATE_AH,
  321. DESTROY_AH,
  322. MODIFY_AH,
  323. QUERY_AH,
  324. CREATE_SRQ,
  325. MODIFY_SRQ,
  326. DESTROY_SRQ,
  327. QUERY_SRQ,
  328. ATTACH_MCAST,
  329. DETACH_MCAST,
  330. GET_DMA_MR,
  331. REG_USER_MR,
  332. DEREG_MR,
  333. ALLOC_MR,
  334. MAP_MR_SG,
  335. ALLOC_FMR,
  336. MAP_PHYS_FMR,
  337. UNMAP_FMR,
  338. DEALLOC_FMR,
  339. MMAP,
  340. CREATE_CQ,
  341. DESTROY_CQ,
  342. POLL_CQ,
  343. REQ_NOTFIY_CQ,
  344. RESIZE_CQ,
  345. ALLOC_PD,
  346. DEALLOC_PD,
  347. _VERB_IDX_MAX /* Must always be last! */
  348. };
  349. static inline int check_driver_override(struct rvt_dev_info *rdi,
  350. size_t offset, void *func)
  351. {
  352. if (!*(void **)((void *)&rdi->ibdev + offset)) {
  353. *(void **)((void *)&rdi->ibdev + offset) = func;
  354. return 0;
  355. }
  356. return 1;
  357. }
  358. static noinline int check_support(struct rvt_dev_info *rdi, int verb)
  359. {
  360. switch (verb) {
  361. case MISC:
  362. /*
  363. * These functions are not part of verbs specifically but are
  364. * required for rdmavt to function.
  365. */
  366. if ((!rdi->driver_f.port_callback) ||
  367. (!rdi->driver_f.get_card_name) ||
  368. (!rdi->driver_f.get_pci_dev))
  369. return -EINVAL;
  370. break;
  371. case QUERY_DEVICE:
  372. check_driver_override(rdi, offsetof(struct ib_device,
  373. query_device),
  374. rvt_query_device);
  375. break;
  376. case MODIFY_DEVICE:
  377. /*
  378. * rdmavt does not support modify device currently drivers must
  379. * provide.
  380. */
  381. if (!check_driver_override(rdi, offsetof(struct ib_device,
  382. modify_device),
  383. rvt_modify_device))
  384. return -EOPNOTSUPP;
  385. break;
  386. case QUERY_PORT:
  387. if (!check_driver_override(rdi, offsetof(struct ib_device,
  388. query_port),
  389. rvt_query_port))
  390. if (!rdi->driver_f.query_port_state)
  391. return -EINVAL;
  392. break;
  393. case MODIFY_PORT:
  394. if (!check_driver_override(rdi, offsetof(struct ib_device,
  395. modify_port),
  396. rvt_modify_port))
  397. if (!rdi->driver_f.cap_mask_chg ||
  398. !rdi->driver_f.shut_down_port)
  399. return -EINVAL;
  400. break;
  401. case QUERY_PKEY:
  402. check_driver_override(rdi, offsetof(struct ib_device,
  403. query_pkey),
  404. rvt_query_pkey);
  405. break;
  406. case QUERY_GID:
  407. if (!check_driver_override(rdi, offsetof(struct ib_device,
  408. query_gid),
  409. rvt_query_gid))
  410. if (!rdi->driver_f.get_guid_be)
  411. return -EINVAL;
  412. break;
  413. case ALLOC_UCONTEXT:
  414. check_driver_override(rdi, offsetof(struct ib_device,
  415. alloc_ucontext),
  416. rvt_alloc_ucontext);
  417. break;
  418. case DEALLOC_UCONTEXT:
  419. check_driver_override(rdi, offsetof(struct ib_device,
  420. dealloc_ucontext),
  421. rvt_dealloc_ucontext);
  422. break;
  423. case GET_PORT_IMMUTABLE:
  424. check_driver_override(rdi, offsetof(struct ib_device,
  425. get_port_immutable),
  426. rvt_get_port_immutable);
  427. break;
  428. case CREATE_QP:
  429. if (!check_driver_override(rdi, offsetof(struct ib_device,
  430. create_qp),
  431. rvt_create_qp))
  432. if (!rdi->driver_f.qp_priv_alloc ||
  433. !rdi->driver_f.qp_priv_free ||
  434. !rdi->driver_f.notify_qp_reset ||
  435. !rdi->driver_f.flush_qp_waiters ||
  436. !rdi->driver_f.stop_send_queue ||
  437. !rdi->driver_f.quiesce_qp)
  438. return -EINVAL;
  439. break;
  440. case MODIFY_QP:
  441. if (!check_driver_override(rdi, offsetof(struct ib_device,
  442. modify_qp),
  443. rvt_modify_qp))
  444. if (!rdi->driver_f.notify_qp_reset ||
  445. !rdi->driver_f.schedule_send ||
  446. !rdi->driver_f.get_pmtu_from_attr ||
  447. !rdi->driver_f.flush_qp_waiters ||
  448. !rdi->driver_f.stop_send_queue ||
  449. !rdi->driver_f.quiesce_qp ||
  450. !rdi->driver_f.notify_error_qp ||
  451. !rdi->driver_f.mtu_from_qp ||
  452. !rdi->driver_f.mtu_to_path_mtu)
  453. return -EINVAL;
  454. break;
  455. case DESTROY_QP:
  456. if (!check_driver_override(rdi, offsetof(struct ib_device,
  457. destroy_qp),
  458. rvt_destroy_qp))
  459. if (!rdi->driver_f.qp_priv_free ||
  460. !rdi->driver_f.notify_qp_reset ||
  461. !rdi->driver_f.flush_qp_waiters ||
  462. !rdi->driver_f.stop_send_queue ||
  463. !rdi->driver_f.quiesce_qp)
  464. return -EINVAL;
  465. break;
  466. case QUERY_QP:
  467. check_driver_override(rdi, offsetof(struct ib_device,
  468. query_qp),
  469. rvt_query_qp);
  470. break;
  471. case POST_SEND:
  472. if (!check_driver_override(rdi, offsetof(struct ib_device,
  473. post_send),
  474. rvt_post_send))
  475. if (!rdi->driver_f.schedule_send ||
  476. !rdi->driver_f.do_send ||
  477. !rdi->post_parms)
  478. return -EINVAL;
  479. break;
  480. case POST_RECV:
  481. check_driver_override(rdi, offsetof(struct ib_device,
  482. post_recv),
  483. rvt_post_recv);
  484. break;
  485. case POST_SRQ_RECV:
  486. check_driver_override(rdi, offsetof(struct ib_device,
  487. post_srq_recv),
  488. rvt_post_srq_recv);
  489. break;
  490. case CREATE_AH:
  491. check_driver_override(rdi, offsetof(struct ib_device,
  492. create_ah),
  493. rvt_create_ah);
  494. break;
  495. case DESTROY_AH:
  496. check_driver_override(rdi, offsetof(struct ib_device,
  497. destroy_ah),
  498. rvt_destroy_ah);
  499. break;
  500. case MODIFY_AH:
  501. check_driver_override(rdi, offsetof(struct ib_device,
  502. modify_ah),
  503. rvt_modify_ah);
  504. break;
  505. case QUERY_AH:
  506. check_driver_override(rdi, offsetof(struct ib_device,
  507. query_ah),
  508. rvt_query_ah);
  509. break;
  510. case CREATE_SRQ:
  511. check_driver_override(rdi, offsetof(struct ib_device,
  512. create_srq),
  513. rvt_create_srq);
  514. break;
  515. case MODIFY_SRQ:
  516. check_driver_override(rdi, offsetof(struct ib_device,
  517. modify_srq),
  518. rvt_modify_srq);
  519. break;
  520. case DESTROY_SRQ:
  521. check_driver_override(rdi, offsetof(struct ib_device,
  522. destroy_srq),
  523. rvt_destroy_srq);
  524. break;
  525. case QUERY_SRQ:
  526. check_driver_override(rdi, offsetof(struct ib_device,
  527. query_srq),
  528. rvt_query_srq);
  529. break;
  530. case ATTACH_MCAST:
  531. check_driver_override(rdi, offsetof(struct ib_device,
  532. attach_mcast),
  533. rvt_attach_mcast);
  534. break;
  535. case DETACH_MCAST:
  536. check_driver_override(rdi, offsetof(struct ib_device,
  537. detach_mcast),
  538. rvt_detach_mcast);
  539. break;
  540. case GET_DMA_MR:
  541. check_driver_override(rdi, offsetof(struct ib_device,
  542. get_dma_mr),
  543. rvt_get_dma_mr);
  544. break;
  545. case REG_USER_MR:
  546. check_driver_override(rdi, offsetof(struct ib_device,
  547. reg_user_mr),
  548. rvt_reg_user_mr);
  549. break;
  550. case DEREG_MR:
  551. check_driver_override(rdi, offsetof(struct ib_device,
  552. dereg_mr),
  553. rvt_dereg_mr);
  554. break;
  555. case ALLOC_FMR:
  556. check_driver_override(rdi, offsetof(struct ib_device,
  557. alloc_fmr),
  558. rvt_alloc_fmr);
  559. break;
  560. case ALLOC_MR:
  561. check_driver_override(rdi, offsetof(struct ib_device,
  562. alloc_mr),
  563. rvt_alloc_mr);
  564. break;
  565. case MAP_MR_SG:
  566. check_driver_override(rdi, offsetof(struct ib_device,
  567. map_mr_sg),
  568. rvt_map_mr_sg);
  569. break;
  570. case MAP_PHYS_FMR:
  571. check_driver_override(rdi, offsetof(struct ib_device,
  572. map_phys_fmr),
  573. rvt_map_phys_fmr);
  574. break;
  575. case UNMAP_FMR:
  576. check_driver_override(rdi, offsetof(struct ib_device,
  577. unmap_fmr),
  578. rvt_unmap_fmr);
  579. break;
  580. case DEALLOC_FMR:
  581. check_driver_override(rdi, offsetof(struct ib_device,
  582. dealloc_fmr),
  583. rvt_dealloc_fmr);
  584. break;
  585. case MMAP:
  586. check_driver_override(rdi, offsetof(struct ib_device,
  587. mmap),
  588. rvt_mmap);
  589. break;
  590. case CREATE_CQ:
  591. check_driver_override(rdi, offsetof(struct ib_device,
  592. create_cq),
  593. rvt_create_cq);
  594. break;
  595. case DESTROY_CQ:
  596. check_driver_override(rdi, offsetof(struct ib_device,
  597. destroy_cq),
  598. rvt_destroy_cq);
  599. break;
  600. case POLL_CQ:
  601. check_driver_override(rdi, offsetof(struct ib_device,
  602. poll_cq),
  603. rvt_poll_cq);
  604. break;
  605. case REQ_NOTFIY_CQ:
  606. check_driver_override(rdi, offsetof(struct ib_device,
  607. req_notify_cq),
  608. rvt_req_notify_cq);
  609. break;
  610. case RESIZE_CQ:
  611. check_driver_override(rdi, offsetof(struct ib_device,
  612. resize_cq),
  613. rvt_resize_cq);
  614. break;
  615. case ALLOC_PD:
  616. check_driver_override(rdi, offsetof(struct ib_device,
  617. alloc_pd),
  618. rvt_alloc_pd);
  619. break;
  620. case DEALLOC_PD:
  621. check_driver_override(rdi, offsetof(struct ib_device,
  622. dealloc_pd),
  623. rvt_dealloc_pd);
  624. break;
  625. default:
  626. return -EINVAL;
  627. }
  628. return 0;
  629. }
  630. /**
  631. * rvt_register_device - register a driver
  632. * @rdi: main dev structure for all of rdmavt operations
  633. *
  634. * It is up to drivers to allocate the rdi and fill in the appropriate
  635. * information.
  636. *
  637. * Return: 0 on success otherwise an errno.
  638. */
  639. int rvt_register_device(struct rvt_dev_info *rdi)
  640. {
  641. int ret = 0, i;
  642. if (!rdi)
  643. return -EINVAL;
  644. /*
  645. * Check to ensure drivers have setup the required helpers for the verbs
  646. * they want rdmavt to handle
  647. */
  648. for (i = 0; i < _VERB_IDX_MAX; i++)
  649. if (check_support(rdi, i)) {
  650. pr_err("Driver support req not met at %d\n", i);
  651. return -EINVAL;
  652. }
  653. /* Once we get past here we can use rvt_pr macros and tracepoints */
  654. trace_rvt_dbg(rdi, "Driver attempting registration");
  655. rvt_mmap_init(rdi);
  656. /* Queue Pairs */
  657. ret = rvt_driver_qp_init(rdi);
  658. if (ret) {
  659. pr_err("Error in driver QP init.\n");
  660. return -EINVAL;
  661. }
  662. /* Address Handle */
  663. spin_lock_init(&rdi->n_ahs_lock);
  664. rdi->n_ahs_allocated = 0;
  665. /* Shared Receive Queue */
  666. rvt_driver_srq_init(rdi);
  667. /* Multicast */
  668. rvt_driver_mcast_init(rdi);
  669. /* Mem Region */
  670. ret = rvt_driver_mr_init(rdi);
  671. if (ret) {
  672. pr_err("Error in driver MR init.\n");
  673. goto bail_no_mr;
  674. }
  675. /* Completion queues */
  676. ret = rvt_driver_cq_init(rdi);
  677. if (ret) {
  678. pr_err("Error in driver CQ init.\n");
  679. goto bail_mr;
  680. }
  681. /* DMA Operations */
  682. rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops;
  683. /* Protection Domain */
  684. spin_lock_init(&rdi->n_pds_lock);
  685. rdi->n_pds_allocated = 0;
  686. /*
  687. * There are some things which could be set by underlying drivers but
  688. * really should be up to rdmavt to set. For instance drivers can't know
  689. * exactly which functions rdmavt supports, nor do they know the ABI
  690. * version, so we do all of this sort of stuff here.
  691. */
  692. rdi->ibdev.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION;
  693. rdi->ibdev.uverbs_cmd_mask =
  694. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  695. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  696. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  697. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  698. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  699. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  700. (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
  701. (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
  702. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  703. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  704. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  705. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  706. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  707. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  708. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  709. (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
  710. (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
  711. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  712. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  713. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  714. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  715. (1ull << IB_USER_VERBS_CMD_POST_SEND) |
  716. (1ull << IB_USER_VERBS_CMD_POST_RECV) |
  717. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  718. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  719. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  720. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  721. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  722. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  723. (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
  724. rdi->ibdev.node_type = RDMA_NODE_IB_CA;
  725. rdi->ibdev.num_comp_vectors = 1;
  726. /* We are now good to announce we exist */
  727. ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
  728. if (ret) {
  729. rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
  730. goto bail_cq;
  731. }
  732. rvt_create_mad_agents(rdi);
  733. rvt_pr_info(rdi, "Registration with rdmavt done.\n");
  734. return ret;
  735. bail_cq:
  736. rvt_cq_exit(rdi);
  737. bail_mr:
  738. rvt_mr_exit(rdi);
  739. bail_no_mr:
  740. rvt_qp_exit(rdi);
  741. return ret;
  742. }
  743. EXPORT_SYMBOL(rvt_register_device);
  744. /**
  745. * rvt_unregister_device - remove a driver
  746. * @rdi: rvt dev struct
  747. */
  748. void rvt_unregister_device(struct rvt_dev_info *rdi)
  749. {
  750. trace_rvt_dbg(rdi, "Driver is unregistering.");
  751. if (!rdi)
  752. return;
  753. rvt_free_mad_agents(rdi);
  754. ib_unregister_device(&rdi->ibdev);
  755. rvt_cq_exit(rdi);
  756. rvt_mr_exit(rdi);
  757. rvt_qp_exit(rdi);
  758. }
  759. EXPORT_SYMBOL(rvt_unregister_device);
  760. /**
  761. * rvt_init_port - init internal data for driver port
  762. * @rdi: rvt dev strut
  763. * @port: rvt port
  764. * @port_index: 0 based index of ports, different from IB core port num
  765. *
  766. * Keep track of a list of ports. No need to have a detach port.
  767. * They persist until the driver goes away.
  768. *
  769. * Return: always 0
  770. */
  771. int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
  772. int port_index, u16 *pkey_table)
  773. {
  774. rdi->ports[port_index] = port;
  775. rdi->ports[port_index]->pkey_table = pkey_table;
  776. return 0;
  777. }
  778. EXPORT_SYMBOL(rvt_init_port);