rdma_vt.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. #ifndef DEF_RDMA_VT_H
  2. #define DEF_RDMA_VT_H
  3. /*
  4. * Copyright(c) 2016 Intel Corporation.
  5. *
  6. * This file is provided under a dual BSD/GPLv2 license. When using or
  7. * redistributing this file, you may do so under either license.
  8. *
  9. * GPL LICENSE SUMMARY
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * BSD LICENSE
  21. *
  22. * Redistribution and use in source and binary forms, with or without
  23. * modification, are permitted provided that the following conditions
  24. * are met:
  25. *
  26. * - Redistributions of source code must retain the above copyright
  27. * notice, this list of conditions and the following disclaimer.
  28. * - Redistributions in binary form must reproduce the above copyright
  29. * notice, this list of conditions and the following disclaimer in
  30. * the documentation and/or other materials provided with the
  31. * distribution.
  32. * - Neither the name of Intel Corporation nor the names of its
  33. * contributors may be used to endorse or promote products derived
  34. * from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  37. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  38. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  39. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  40. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  41. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  42. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  43. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  44. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  45. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  46. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  47. *
  48. */
  49. /*
  50. * Structure that low level drivers will populate in order to register with the
  51. * rdmavt layer.
  52. */
  53. #include <linux/spinlock.h>
  54. #include <linux/list.h>
  55. #include <linux/hash.h>
  56. #include <rdma/ib_verbs.h>
  57. #include <rdma/ib_mad.h>
  58. #include <rdma/rdmavt_mr.h>
  59. #include <rdma/rdmavt_qp.h>
  60. #define RVT_MAX_PKEY_VALUES 16
  61. #define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
  62. #define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
  63. #define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
  64. struct trap_list {
  65. u32 list_len;
  66. struct list_head list;
  67. };
  68. struct rvt_ibport {
  69. struct rvt_qp __rcu *qp[2];
  70. struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
  71. struct rb_root mcast_tree;
  72. spinlock_t lock; /* protect changes in this struct */
  73. /* non-zero when timer is set */
  74. unsigned long mkey_lease_timeout;
  75. unsigned long trap_timeout;
  76. __be64 gid_prefix; /* in network order */
  77. __be64 mkey;
  78. u64 tid;
  79. u32 port_cap_flags;
  80. u16 port_cap3_flags;
  81. u32 pma_sample_start;
  82. u32 pma_sample_interval;
  83. __be16 pma_counter_select[5];
  84. u16 pma_tag;
  85. u16 mkey_lease_period;
  86. u32 sm_lid;
  87. u8 sm_sl;
  88. u8 mkeyprot;
  89. u8 subnet_timeout;
  90. u8 vl_high_limit;
  91. /*
  92. * Driver is expected to keep these up to date. These
  93. * counters are informational only and not required to be
  94. * completely accurate.
  95. */
  96. u64 n_rc_resends;
  97. u64 n_seq_naks;
  98. u64 n_rdma_seq;
  99. u64 n_rnr_naks;
  100. u64 n_other_naks;
  101. u64 n_loop_pkts;
  102. u64 n_pkt_drops;
  103. u64 n_vl15_dropped;
  104. u64 n_rc_timeouts;
  105. u64 n_dmawait;
  106. u64 n_unaligned;
  107. u64 n_rc_dupreq;
  108. u64 n_rc_seqnak;
  109. u16 pkey_violations;
  110. u16 qkey_violations;
  111. u16 mkey_violations;
  112. /* Hot-path per CPU counters to avoid cacheline trading to update */
  113. u64 z_rc_acks;
  114. u64 z_rc_qacks;
  115. u64 z_rc_delayed_comp;
  116. u64 __percpu *rc_acks;
  117. u64 __percpu *rc_qacks;
  118. u64 __percpu *rc_delayed_comp;
  119. void *priv; /* driver private data */
  120. /*
  121. * The pkey table is allocated and maintained by the driver. Drivers
  122. * need to have access to this before registering with rdmav. However
  123. * rdmavt will need access to it so drivers need to proviee this during
  124. * the attach port API call.
  125. */
  126. u16 *pkey_table;
  127. struct rvt_ah *sm_ah;
  128. /*
  129. * Keep a list of traps that have not been repressed. They will be
  130. * resent based on trap_timer.
  131. */
  132. struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
  133. struct timer_list trap_timer;
  134. };
  135. #define RVT_CQN_MAX 16 /* maximum length of cq name */
  136. /*
  137. * Things that are driver specific, module parameters in hfi1 and qib
  138. */
  139. struct rvt_driver_params {
  140. struct ib_device_attr props;
  141. /*
  142. * Anything driver specific that is not covered by props
  143. * For instance special module parameters. Goes here.
  144. */
  145. unsigned int lkey_table_size;
  146. unsigned int qp_table_size;
  147. int qpn_start;
  148. int qpn_inc;
  149. int qpn_res_start;
  150. int qpn_res_end;
  151. int nports;
  152. int npkeys;
  153. char cq_name[RVT_CQN_MAX];
  154. int node;
  155. int psn_mask;
  156. int psn_shift;
  157. int psn_modify_mask;
  158. u32 core_cap_flags;
  159. u32 max_mad_size;
  160. u8 qos_shift;
  161. u8 max_rdma_atomic;
  162. u8 reserved_operations;
  163. };
  164. /* Protection domain */
  165. struct rvt_pd {
  166. struct ib_pd ibpd;
  167. bool user;
  168. };
  169. /* Address handle */
  170. struct rvt_ah {
  171. struct ib_ah ibah;
  172. struct rdma_ah_attr attr;
  173. atomic_t refcount;
  174. u8 vl;
  175. u8 log_pmtu;
  176. };
  177. struct rvt_dev_info;
  178. struct rvt_swqe;
  179. struct rvt_driver_provided {
  180. /*
  181. * Which functions are required depends on which verbs rdmavt is
  182. * providing and which verbs the driver is overriding. See
  183. * check_support() for details.
  184. */
  185. /* hot path calldowns in a single cacheline */
  186. /*
  187. * Give the driver a notice that there is send work to do. It is up to
  188. * the driver to generally push the packets out, this just queues the
  189. * work with the driver. There are two variants here. The no_lock
  190. * version requires the s_lock not to be held. The other assumes the
  191. * s_lock is held.
  192. */
  193. void (*schedule_send)(struct rvt_qp *qp);
  194. void (*schedule_send_no_lock)(struct rvt_qp *qp);
  195. /* Driver specific work request checking */
  196. int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
  197. /*
  198. * Sometimes rdmavt needs to kick the driver's send progress. That is
  199. * done by this call back.
  200. */
  201. void (*do_send)(struct rvt_qp *qp);
  202. /* Passed to ib core registration. Callback to create syfs files */
  203. int (*port_callback)(struct ib_device *, u8, struct kobject *);
  204. /*
  205. * Returns a string to represent the device for which is being
  206. * registered. This is primarily used for error and debug messages on
  207. * the console.
  208. */
  209. const char * (*get_card_name)(struct rvt_dev_info *rdi);
  210. /*
  211. * Returns a pointer to the undelying hardware's PCI device. This is
  212. * used to display information as to what hardware is being referenced
  213. * in an output message
  214. */
  215. struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
  216. /*
  217. * Allocate a private queue pair data structure for driver specific
  218. * information which is opaque to rdmavt. Errors are returned via
  219. * ERR_PTR(err). The driver is free to return NULL or a valid
  220. * pointer.
  221. */
  222. void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
  223. /*
  224. * Free the driver's private qp structure.
  225. */
  226. void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
  227. /*
  228. * Inform the driver the particular qp in quesiton has been reset so
  229. * that it can clean up anything it needs to.
  230. */
  231. void (*notify_qp_reset)(struct rvt_qp *qp);
  232. /*
  233. * Get a path mtu from the driver based on qp attributes.
  234. */
  235. int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
  236. struct ib_qp_attr *attr);
  237. /*
  238. * Notify driver that it needs to flush any outstanding IO requests that
  239. * are waiting on a qp.
  240. */
  241. void (*flush_qp_waiters)(struct rvt_qp *qp);
  242. /*
  243. * Notify driver to stop its queue of sending packets. Nothing else
  244. * should be posted to the queue pair after this has been called.
  245. */
  246. void (*stop_send_queue)(struct rvt_qp *qp);
  247. /*
  248. * Have the drivr drain any in progress operations
  249. */
  250. void (*quiesce_qp)(struct rvt_qp *qp);
  251. /*
  252. * Inform the driver a qp has went to error state.
  253. */
  254. void (*notify_error_qp)(struct rvt_qp *qp);
  255. /*
  256. * Get an MTU for a qp.
  257. */
  258. u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
  259. u32 pmtu);
  260. /*
  261. * Convert an mtu to a path mtu
  262. */
  263. int (*mtu_to_path_mtu)(u32 mtu);
  264. /*
  265. * Get the guid of a port in big endian byte order
  266. */
  267. int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
  268. int guid_index, __be64 *guid);
  269. /*
  270. * Query driver for the state of the port.
  271. */
  272. int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
  273. struct ib_port_attr *props);
  274. /*
  275. * Tell driver to shutdown a port
  276. */
  277. int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
  278. /* Tell driver to send a trap for changed port capabilities */
  279. void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
  280. /*
  281. * The following functions can be safely ignored completely. Any use of
  282. * these is checked for NULL before blindly calling. Rdmavt should also
  283. * be functional if drivers omit these.
  284. */
  285. /* Called to inform the driver that all qps should now be freed. */
  286. unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
  287. /* Driver specific AH validation */
  288. int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
  289. /* Inform the driver a new AH has been created */
  290. void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
  291. struct rvt_ah *);
  292. /* Let the driver pick the next queue pair number*/
  293. int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
  294. enum ib_qp_type type, u8 port_num);
  295. /* Determine if its safe or allowed to modify the qp */
  296. int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
  297. int attr_mask, struct ib_udata *udata);
  298. /* Driver specific QP modification/notification-of */
  299. void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
  300. int attr_mask, struct ib_udata *udata);
  301. /* Notify driver a mad agent has been created */
  302. void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
  303. /* Notify driver a mad agent has been removed */
  304. void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
  305. /* Notify driver to restart rc */
  306. void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
  307. };
  308. struct rvt_dev_info {
  309. struct ib_device ibdev; /* Keep this first. Nothing above here */
  310. /*
  311. * Prior to calling for registration the driver will be responsible for
  312. * allocating space for this structure.
  313. *
  314. * The driver will also be responsible for filling in certain members of
  315. * dparms.props. The driver needs to fill in dparms exactly as it would
  316. * want values reported to a ULP. This will be returned to the caller
  317. * in rdmavt's device. The driver should also therefore refrain from
  318. * modifying this directly after registration with rdmavt.
  319. */
  320. /* Driver specific properties */
  321. struct rvt_driver_params dparms;
  322. /* post send table */
  323. const struct rvt_operation_params *post_parms;
  324. /* Driver specific helper functions */
  325. struct rvt_driver_provided driver_f;
  326. struct rvt_mregion __rcu *dma_mr;
  327. struct rvt_lkey_table lkey_table;
  328. /* Internal use */
  329. int n_pds_allocated;
  330. spinlock_t n_pds_lock; /* Protect pd allocated count */
  331. int n_ahs_allocated;
  332. spinlock_t n_ahs_lock; /* Protect ah allocated count */
  333. u32 n_srqs_allocated;
  334. spinlock_t n_srqs_lock; /* Protect srqs allocated count */
  335. int flags;
  336. struct rvt_ibport **ports;
  337. /* QP */
  338. struct rvt_qp_ibdev *qp_dev;
  339. u32 n_qps_allocated; /* number of QPs allocated for device */
  340. u32 n_rc_qps; /* number of RC QPs allocated for device */
  341. u32 busy_jiffies; /* timeout scaling based on RC QP count */
  342. spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */
  343. /* memory maps */
  344. struct list_head pending_mmaps;
  345. spinlock_t mmap_offset_lock; /* protect mmap_offset */
  346. u32 mmap_offset;
  347. spinlock_t pending_lock; /* protect pending mmap list */
  348. /* CQ */
  349. struct kthread_worker __rcu *worker; /* per device cq worker */
  350. u32 n_cqs_allocated; /* number of CQs allocated for device */
  351. spinlock_t n_cqs_lock; /* protect count of in use cqs */
  352. /* Multicast */
  353. u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
  354. spinlock_t n_mcast_grps_lock;
  355. };
  356. static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
  357. {
  358. return container_of(ibpd, struct rvt_pd, ibpd);
  359. }
  360. static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
  361. {
  362. return container_of(ibah, struct rvt_ah, ibah);
  363. }
  364. static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
  365. {
  366. return container_of(ibdev, struct rvt_dev_info, ibdev);
  367. }
  368. static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
  369. {
  370. return container_of(ibsrq, struct rvt_srq, ibsrq);
  371. }
  372. static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
  373. {
  374. return container_of(ibqp, struct rvt_qp, ibqp);
  375. }
  376. static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
  377. {
  378. /*
  379. * All ports have same number of pkeys.
  380. */
  381. return rdi->dparms.npkeys;
  382. }
  383. /*
  384. * Return the max atomic suitable for determining
  385. * the size of the ack ring buffer in a QP.
  386. */
  387. static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
  388. {
  389. return rdi->dparms.max_rdma_atomic + 1;
  390. }
  391. /*
  392. * Return the indexed PKEY from the port PKEY table.
  393. */
  394. static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
  395. int port_index,
  396. unsigned index)
  397. {
  398. if (index >= rvt_get_npkeys(rdi))
  399. return 0;
  400. else
  401. return rdi->ports[port_index]->pkey_table[index];
  402. }
  403. /**
  404. * rvt_lookup_qpn - return the QP with the given QPN
  405. * @ibp: the ibport
  406. * @qpn: the QP number to look up
  407. *
  408. * The caller must hold the rcu_read_lock(), and keep the lock until
  409. * the returned qp is no longer in use.
  410. */
  411. /* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */
  412. static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
  413. struct rvt_ibport *rvp,
  414. u32 qpn) __must_hold(RCU)
  415. {
  416. struct rvt_qp *qp = NULL;
  417. if (unlikely(qpn <= 1)) {
  418. qp = rcu_dereference(rvp->qp[qpn]);
  419. } else {
  420. u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
  421. for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
  422. qp = rcu_dereference(qp->next))
  423. if (qp->ibqp.qp_num == qpn)
  424. break;
  425. }
  426. return qp;
  427. }
  428. /**
  429. * rvt_mod_retry_timer - mod a retry timer
  430. * @qp - the QP
  431. * Modify a potentially already running retry timer
  432. */
  433. static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
  434. {
  435. struct ib_qp *ibqp = &qp->ibqp;
  436. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  437. lockdep_assert_held(&qp->s_lock);
  438. qp->s_flags |= RVT_S_TIMER;
  439. /* 4.096 usec. * (1 << qp->timeout) */
  440. mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
  441. rdi->busy_jiffies);
  442. }
  443. struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
  444. void rvt_dealloc_device(struct rvt_dev_info *rdi);
  445. int rvt_register_device(struct rvt_dev_info *rvd);
  446. void rvt_unregister_device(struct rvt_dev_info *rvd);
  447. int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
  448. int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
  449. int port_index, u16 *pkey_table);
  450. int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
  451. int access);
  452. int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
  453. int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
  454. u32 len, u64 vaddr, u32 rkey, int acc);
  455. int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
  456. struct rvt_sge *isge, struct rvt_sge *last_sge,
  457. struct ib_sge *sge, int acc);
  458. struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
  459. u16 lid);
  460. #endif /* DEF_RDMA_VT_H */