ehca_classes.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * Struct definition for eHCA internal structures
  5. *
  6. * Authors: Heiko J Schick <schickhj@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Joachim Fenkes <fenkes@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #ifndef __EHCA_CLASSES_H__
  43. #define __EHCA_CLASSES_H__
  44. struct ehca_module;
  45. struct ehca_qp;
  46. struct ehca_cq;
  47. struct ehca_eq;
  48. struct ehca_mr;
  49. struct ehca_mw;
  50. struct ehca_pd;
  51. struct ehca_av;
  52. #include <linux/wait.h>
  53. #include <linux/mutex.h>
  54. #include <rdma/ib_verbs.h>
  55. #include <rdma/ib_user_verbs.h>
  56. #ifdef CONFIG_PPC64
  57. #include "ehca_classes_pSeries.h"
  58. #endif
  59. #include "ipz_pt_fn.h"
  60. #include "ehca_qes.h"
  61. #include "ehca_irq.h"
  62. #define EHCA_EQE_CACHE_SIZE 20
  63. #define EHCA_MAX_NUM_QUEUES 0xffff
  64. struct ehca_eqe_cache_entry {
  65. struct ehca_eqe *eqe;
  66. struct ehca_cq *cq;
  67. };
  68. struct ehca_eq {
  69. u32 length;
  70. struct ipz_queue ipz_queue;
  71. struct ipz_eq_handle ipz_eq_handle;
  72. struct work_struct work;
  73. struct h_galpas galpas;
  74. int is_initialized;
  75. struct ehca_pfeq pf;
  76. spinlock_t spinlock;
  77. struct tasklet_struct interrupt_task;
  78. u32 ist;
  79. spinlock_t irq_spinlock;
  80. struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
  81. };
  82. struct ehca_sma_attr {
  83. u16 lid, lmc, sm_sl, sm_lid;
  84. u16 pkey_tbl_len, pkeys[16];
  85. };
  86. struct ehca_sport {
  87. struct ib_cq *ibcq_aqp1;
  88. struct ib_qp *ibqp_sqp[2];
  89. /* lock to serialze modify_qp() calls for sqp in normal
  90. * and irq path (when event PORT_ACTIVE is received first time)
  91. */
  92. spinlock_t mod_sqp_lock;
  93. enum ib_port_state port_state;
  94. struct ehca_sma_attr saved_attr;
  95. u32 pma_qp_nr;
  96. };
  97. #define HCA_CAP_MR_PGSIZE_4K 0x80000000
  98. #define HCA_CAP_MR_PGSIZE_64K 0x40000000
  99. #define HCA_CAP_MR_PGSIZE_1M 0x20000000
  100. #define HCA_CAP_MR_PGSIZE_16M 0x10000000
  101. struct ehca_shca {
  102. struct ib_device ib_device;
  103. struct platform_device *ofdev;
  104. u8 num_ports;
  105. int hw_level;
  106. struct list_head shca_list;
  107. struct ipz_adapter_handle ipz_hca_handle;
  108. struct ehca_sport sport[2];
  109. struct ehca_eq eq;
  110. struct ehca_eq neq;
  111. struct ehca_mr *maxmr;
  112. struct ehca_pd *pd;
  113. struct h_galpas galpas;
  114. struct mutex modify_mutex;
  115. u64 hca_cap;
  116. /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
  117. u32 hca_cap_mr_pgsize;
  118. int max_mtu;
  119. int max_num_qps;
  120. int max_num_cqs;
  121. atomic_t num_cqs;
  122. atomic_t num_qps;
  123. };
  124. struct ehca_pd {
  125. struct ib_pd ib_pd;
  126. struct ipz_pd fw_pd;
  127. /* small queue mgmt */
  128. struct mutex lock;
  129. struct list_head free[2];
  130. struct list_head full[2];
  131. };
  132. enum ehca_ext_qp_type {
  133. EQPT_NORMAL = 0,
  134. EQPT_LLQP = 1,
  135. EQPT_SRQBASE = 2,
  136. EQPT_SRQ = 3,
  137. };
  138. /* struct to cache modify_qp()'s parms for GSI/SMI qp */
  139. struct ehca_mod_qp_parm {
  140. int mask;
  141. struct ib_qp_attr attr;
  142. };
  143. #define EHCA_MOD_QP_PARM_MAX 4
  144. #define QMAP_IDX_MASK 0xFFFFULL
  145. /* struct for tracking if cqes have been reported to the application */
  146. struct ehca_qmap_entry {
  147. u16 app_wr_id;
  148. u8 reported;
  149. u8 cqe_req;
  150. };
  151. struct ehca_queue_map {
  152. struct ehca_qmap_entry *map;
  153. unsigned int entries;
  154. unsigned int tail;
  155. unsigned int left_to_poll;
  156. unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
  157. };
  158. /* function to calculate the next index for the qmap */
  159. static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
  160. {
  161. unsigned int temp = cur_index + 1;
  162. return (temp == limit) ? 0 : temp;
  163. }
  164. struct ehca_qp {
  165. union {
  166. struct ib_qp ib_qp;
  167. struct ib_srq ib_srq;
  168. };
  169. u32 qp_type;
  170. enum ehca_ext_qp_type ext_type;
  171. enum ib_qp_state state;
  172. struct ipz_queue ipz_squeue;
  173. struct ehca_queue_map sq_map;
  174. struct ipz_queue ipz_rqueue;
  175. struct ehca_queue_map rq_map;
  176. struct h_galpas galpas;
  177. u32 qkey;
  178. u32 real_qp_num;
  179. u32 token;
  180. spinlock_t spinlock_s;
  181. spinlock_t spinlock_r;
  182. u32 sq_max_inline_data_size;
  183. struct ipz_qp_handle ipz_qp_handle;
  184. struct ehca_pfqp pf;
  185. struct ib_qp_init_attr init_attr;
  186. struct ehca_cq *send_cq;
  187. struct ehca_cq *recv_cq;
  188. unsigned int sqerr_purgeflag;
  189. struct hlist_node list_entries;
  190. /* array to cache modify_qp()'s parms for GSI/SMI qp */
  191. struct ehca_mod_qp_parm *mod_qp_parm;
  192. int mod_qp_parm_idx;
  193. /* mmap counter for resources mapped into user space */
  194. u32 mm_count_squeue;
  195. u32 mm_count_rqueue;
  196. u32 mm_count_galpa;
  197. /* unsolicited ack circumvention */
  198. int unsol_ack_circ;
  199. int mtu_shift;
  200. u32 message_count;
  201. u32 packet_count;
  202. atomic_t nr_events; /* events seen */
  203. wait_queue_head_t wait_completion;
  204. int mig_armed;
  205. struct list_head sq_err_node;
  206. struct list_head rq_err_node;
  207. };
  208. #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
  209. #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
  210. #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
  211. /* must be power of 2 */
  212. #define QP_HASHTAB_LEN 8
  213. struct ehca_cq {
  214. struct ib_cq ib_cq;
  215. struct ipz_queue ipz_queue;
  216. struct h_galpas galpas;
  217. spinlock_t spinlock;
  218. u32 cq_number;
  219. u32 token;
  220. u32 nr_of_entries;
  221. struct ipz_cq_handle ipz_cq_handle;
  222. struct ehca_pfcq pf;
  223. spinlock_t cb_lock;
  224. struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
  225. struct list_head entry;
  226. u32 nr_callbacks; /* #events assigned to cpu by scaling code */
  227. atomic_t nr_events; /* #events seen */
  228. wait_queue_head_t wait_completion;
  229. spinlock_t task_lock;
  230. /* mmap counter for resources mapped into user space */
  231. u32 mm_count_queue;
  232. u32 mm_count_galpa;
  233. struct list_head sqp_err_list;
  234. struct list_head rqp_err_list;
  235. };
  236. enum ehca_mr_flag {
  237. EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
  238. EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
  239. };
  240. struct ehca_mr {
  241. union {
  242. struct ib_mr ib_mr; /* must always be first in ehca_mr */
  243. struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
  244. } ib;
  245. struct ib_umem *umem;
  246. spinlock_t mrlock;
  247. enum ehca_mr_flag flags;
  248. u32 num_kpages; /* number of kernel pages */
  249. u32 num_hwpages; /* number of hw pages to form MR */
  250. u64 hwpage_size; /* hw page size used for this MR */
  251. int acl; /* ACL (stored here for usage in reregister) */
  252. u64 *start; /* virtual start address (stored here for */
  253. /* usage in reregister) */
  254. u64 size; /* size (stored here for usage in reregister) */
  255. u32 fmr_page_size; /* page size for FMR */
  256. u32 fmr_max_pages; /* max pages for FMR */
  257. u32 fmr_max_maps; /* max outstanding maps for FMR */
  258. u32 fmr_map_cnt; /* map counter for FMR */
  259. /* fw specific data */
  260. struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
  261. struct h_galpas galpas;
  262. };
  263. struct ehca_mw {
  264. struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
  265. spinlock_t mwlock;
  266. u8 never_bound; /* indication MW was never bound */
  267. struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
  268. struct h_galpas galpas;
  269. };
  270. enum ehca_mr_pgi_type {
  271. EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
  272. * ehca_rereg_phys_mr,
  273. * ehca_reg_internal_maxmr */
  274. EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
  275. EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
  276. };
  277. struct ehca_mr_pginfo {
  278. enum ehca_mr_pgi_type type;
  279. u64 num_kpages;
  280. u64 kpage_cnt;
  281. u64 hwpage_size; /* hw page size used for this MR */
  282. u64 num_hwpages; /* number of hw pages */
  283. u64 hwpage_cnt; /* counter for hw pages */
  284. u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
  285. union {
  286. struct { /* type EHCA_MR_PGI_PHYS section */
  287. int num_phys_buf;
  288. struct ib_phys_buf *phys_buf_array;
  289. u64 next_buf;
  290. } phy;
  291. struct { /* type EHCA_MR_PGI_USER section */
  292. struct ib_umem *region;
  293. struct ib_umem_chunk *next_chunk;
  294. u64 next_nmap;
  295. } usr;
  296. struct { /* type EHCA_MR_PGI_FMR section */
  297. u64 fmr_pgsize;
  298. u64 *page_list;
  299. u64 next_listelem;
  300. } fmr;
  301. } u;
  302. };
  303. /* output parameters for MR/FMR hipz calls */
  304. struct ehca_mr_hipzout_parms {
  305. struct ipz_mrmw_handle handle;
  306. u32 lkey;
  307. u32 rkey;
  308. u64 len;
  309. u64 vaddr;
  310. u32 acl;
  311. };
  312. /* output parameters for MW hipz calls */
  313. struct ehca_mw_hipzout_parms {
  314. struct ipz_mrmw_handle handle;
  315. u32 rkey;
  316. };
  317. struct ehca_av {
  318. struct ib_ah ib_ah;
  319. struct ehca_ud_av av;
  320. };
  321. struct ehca_ucontext {
  322. struct ib_ucontext ib_ucontext;
  323. };
  324. int ehca_init_pd_cache(void);
  325. void ehca_cleanup_pd_cache(void);
  326. int ehca_init_cq_cache(void);
  327. void ehca_cleanup_cq_cache(void);
  328. int ehca_init_qp_cache(void);
  329. void ehca_cleanup_qp_cache(void);
  330. int ehca_init_av_cache(void);
  331. void ehca_cleanup_av_cache(void);
  332. int ehca_init_mrmw_cache(void);
  333. void ehca_cleanup_mrmw_cache(void);
  334. int ehca_init_small_qp_cache(void);
  335. void ehca_cleanup_small_qp_cache(void);
  336. extern rwlock_t ehca_qp_idr_lock;
  337. extern rwlock_t ehca_cq_idr_lock;
  338. extern struct idr ehca_qp_idr;
  339. extern struct idr ehca_cq_idr;
  340. extern spinlock_t shca_list_lock;
  341. extern int ehca_static_rate;
  342. extern int ehca_port_act_time;
  343. extern bool ehca_use_hp_mr;
  344. extern bool ehca_scaling_code;
  345. extern int ehca_lock_hcalls;
  346. extern int ehca_nr_ports;
  347. extern int ehca_max_cq;
  348. extern int ehca_max_qp;
  349. struct ipzu_queue_resp {
  350. u32 qe_size; /* queue entry size */
  351. u32 act_nr_of_sg;
  352. u32 queue_length; /* queue length allocated in bytes */
  353. u32 pagesize;
  354. u32 toggle_state;
  355. u32 offset; /* save offset within a page for small_qp */
  356. };
  357. struct ehca_create_cq_resp {
  358. u32 cq_number;
  359. u32 token;
  360. struct ipzu_queue_resp ipz_queue;
  361. u32 fw_handle_ofs;
  362. u32 dummy;
  363. };
  364. struct ehca_create_qp_resp {
  365. u32 qp_num;
  366. u32 token;
  367. u32 qp_type;
  368. u32 ext_type;
  369. u32 qkey;
  370. /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
  371. u32 real_qp_num;
  372. u32 fw_handle_ofs;
  373. u32 dummy;
  374. struct ipzu_queue_resp ipz_squeue;
  375. struct ipzu_queue_resp ipz_rqueue;
  376. };
  377. struct ehca_alloc_cq_parms {
  378. u32 nr_cqe;
  379. u32 act_nr_of_entries;
  380. u32 act_pages;
  381. struct ipz_eq_handle eq_handle;
  382. };
  383. enum ehca_service_type {
  384. ST_RC = 0,
  385. ST_UC = 1,
  386. ST_RD = 2,
  387. ST_UD = 3,
  388. };
  389. enum ehca_ll_comp_flags {
  390. LLQP_SEND_COMP = 0x20,
  391. LLQP_RECV_COMP = 0x40,
  392. LLQP_COMP_MASK = 0x60,
  393. };
  394. struct ehca_alloc_queue_parms {
  395. /* input parameters */
  396. int max_wr;
  397. int max_sge;
  398. int page_size;
  399. int is_small;
  400. /* output parameters */
  401. u16 act_nr_wqes;
  402. u8 act_nr_sges;
  403. u32 queue_size; /* bytes for small queues, pages otherwise */
  404. };
  405. struct ehca_alloc_qp_parms {
  406. struct ehca_alloc_queue_parms squeue;
  407. struct ehca_alloc_queue_parms rqueue;
  408. /* input parameters */
  409. enum ehca_service_type servicetype;
  410. int qp_storage;
  411. int sigtype;
  412. enum ehca_ext_qp_type ext_type;
  413. enum ehca_ll_comp_flags ll_comp_flags;
  414. int ud_av_l_key_ctl;
  415. u32 token;
  416. struct ipz_eq_handle eq_handle;
  417. struct ipz_pd pd;
  418. struct ipz_cq_handle send_cq_handle, recv_cq_handle;
  419. u32 srq_qpn, srq_token, srq_limit;
  420. /* output parameters */
  421. u32 real_qp_num;
  422. struct ipz_qp_handle qp_handle;
  423. struct h_galpas galpas;
  424. };
  425. int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
  426. int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
  427. struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
  428. #endif