ehea_qmr.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023
  1. /*
  2. * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/mm.h>
  30. #include <linux/slab.h>
  31. #include "ehea.h"
  32. #include "ehea_phyp.h"
  33. #include "ehea_qmr.h"
  34. static struct ehea_bmap *ehea_bmap;
  35. static void *hw_qpageit_get_inc(struct hw_queue *queue)
  36. {
  37. void *retvalue = hw_qeit_get(queue);
  38. queue->current_q_offset += queue->pagesize;
  39. if (queue->current_q_offset > queue->queue_length) {
  40. queue->current_q_offset -= queue->pagesize;
  41. retvalue = NULL;
  42. } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
  43. pr_err("not on pageboundary\n");
  44. retvalue = NULL;
  45. }
  46. return retvalue;
  47. }
  48. static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
  49. const u32 pagesize, const u32 qe_size)
  50. {
  51. int pages_per_kpage = PAGE_SIZE / pagesize;
  52. int i, k;
  53. if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
  54. pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
  55. (int)PAGE_SIZE, (int)pagesize);
  56. return -EINVAL;
  57. }
  58. queue->queue_length = nr_of_pages * pagesize;
  59. queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
  60. GFP_KERNEL);
  61. if (!queue->queue_pages)
  62. return -ENOMEM;
  63. /*
  64. * allocate pages for queue:
  65. * outer loop allocates whole kernel pages (page aligned) and
  66. * inner loop divides a kernel page into smaller hea queue pages
  67. */
  68. i = 0;
  69. while (i < nr_of_pages) {
  70. u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
  71. if (!kpage)
  72. goto out_nomem;
  73. for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
  74. (queue->queue_pages)[i] = (struct ehea_page *)kpage;
  75. kpage += pagesize;
  76. i++;
  77. }
  78. }
  79. queue->current_q_offset = 0;
  80. queue->qe_size = qe_size;
  81. queue->pagesize = pagesize;
  82. queue->toggle_state = 1;
  83. return 0;
  84. out_nomem:
  85. for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
  86. if (!(queue->queue_pages)[i])
  87. break;
  88. free_page((unsigned long)(queue->queue_pages)[i]);
  89. }
  90. return -ENOMEM;
  91. }
  92. static void hw_queue_dtor(struct hw_queue *queue)
  93. {
  94. int pages_per_kpage;
  95. int i, nr_pages;
  96. if (!queue || !queue->queue_pages)
  97. return;
  98. pages_per_kpage = PAGE_SIZE / queue->pagesize;
  99. nr_pages = queue->queue_length / queue->pagesize;
  100. for (i = 0; i < nr_pages; i += pages_per_kpage)
  101. free_page((unsigned long)(queue->queue_pages)[i]);
  102. kfree(queue->queue_pages);
  103. }
  104. struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
  105. int nr_of_cqe, u64 eq_handle, u32 cq_token)
  106. {
  107. struct ehea_cq *cq;
  108. struct h_epa epa;
  109. u64 *cq_handle_ref, hret, rpage;
  110. u32 act_nr_of_entries, act_pages, counter;
  111. int ret;
  112. void *vpage;
  113. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  114. if (!cq)
  115. goto out_nomem;
  116. cq->attr.max_nr_of_cqes = nr_of_cqe;
  117. cq->attr.cq_token = cq_token;
  118. cq->attr.eq_handle = eq_handle;
  119. cq->adapter = adapter;
  120. cq_handle_ref = &cq->fw_handle;
  121. act_nr_of_entries = 0;
  122. act_pages = 0;
  123. hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
  124. &cq->fw_handle, &cq->epas);
  125. if (hret != H_SUCCESS) {
  126. pr_err("alloc_resource_cq failed\n");
  127. goto out_freemem;
  128. }
  129. ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
  130. EHEA_PAGESIZE, sizeof(struct ehea_cqe));
  131. if (ret)
  132. goto out_freeres;
  133. for (counter = 0; counter < cq->attr.nr_pages; counter++) {
  134. vpage = hw_qpageit_get_inc(&cq->hw_queue);
  135. if (!vpage) {
  136. pr_err("hw_qpageit_get_inc failed\n");
  137. goto out_kill_hwq;
  138. }
  139. rpage = __pa(vpage);
  140. hret = ehea_h_register_rpage(adapter->handle,
  141. 0, EHEA_CQ_REGISTER_ORIG,
  142. cq->fw_handle, rpage, 1);
  143. if (hret < H_SUCCESS) {
  144. pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
  145. cq, hret, counter, cq->attr.nr_pages);
  146. goto out_kill_hwq;
  147. }
  148. if (counter == (cq->attr.nr_pages - 1)) {
  149. vpage = hw_qpageit_get_inc(&cq->hw_queue);
  150. if ((hret != H_SUCCESS) || (vpage)) {
  151. pr_err("registration of pages not complete hret=%llx\n",
  152. hret);
  153. goto out_kill_hwq;
  154. }
  155. } else {
  156. if (hret != H_PAGE_REGISTERED) {
  157. pr_err("CQ: registration of page failed hret=%llx\n",
  158. hret);
  159. goto out_kill_hwq;
  160. }
  161. }
  162. }
  163. hw_qeit_reset(&cq->hw_queue);
  164. epa = cq->epas.kernel;
  165. ehea_reset_cq_ep(cq);
  166. ehea_reset_cq_n1(cq);
  167. return cq;
  168. out_kill_hwq:
  169. hw_queue_dtor(&cq->hw_queue);
  170. out_freeres:
  171. ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
  172. out_freemem:
  173. kfree(cq);
  174. out_nomem:
  175. return NULL;
  176. }
  177. static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
  178. {
  179. u64 hret;
  180. u64 adapter_handle = cq->adapter->handle;
  181. /* deregister all previous registered pages */
  182. hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
  183. if (hret != H_SUCCESS)
  184. return hret;
  185. hw_queue_dtor(&cq->hw_queue);
  186. kfree(cq);
  187. return hret;
  188. }
  189. int ehea_destroy_cq(struct ehea_cq *cq)
  190. {
  191. u64 hret, aer, aerr;
  192. if (!cq)
  193. return 0;
  194. hcp_epas_dtor(&cq->epas);
  195. hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
  196. if (hret == H_R_STATE) {
  197. ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
  198. hret = ehea_destroy_cq_res(cq, FORCE_FREE);
  199. }
  200. if (hret != H_SUCCESS) {
  201. pr_err("destroy CQ failed\n");
  202. return -EIO;
  203. }
  204. return 0;
  205. }
  206. struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
  207. const enum ehea_eq_type type,
  208. const u32 max_nr_of_eqes, const u8 eqe_gen)
  209. {
  210. int ret, i;
  211. u64 hret, rpage;
  212. void *vpage;
  213. struct ehea_eq *eq;
  214. eq = kzalloc(sizeof(*eq), GFP_KERNEL);
  215. if (!eq)
  216. return NULL;
  217. eq->adapter = adapter;
  218. eq->attr.type = type;
  219. eq->attr.max_nr_of_eqes = max_nr_of_eqes;
  220. eq->attr.eqe_gen = eqe_gen;
  221. spin_lock_init(&eq->spinlock);
  222. hret = ehea_h_alloc_resource_eq(adapter->handle,
  223. &eq->attr, &eq->fw_handle);
  224. if (hret != H_SUCCESS) {
  225. pr_err("alloc_resource_eq failed\n");
  226. goto out_freemem;
  227. }
  228. ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
  229. EHEA_PAGESIZE, sizeof(struct ehea_eqe));
  230. if (ret) {
  231. pr_err("can't allocate eq pages\n");
  232. goto out_freeres;
  233. }
  234. for (i = 0; i < eq->attr.nr_pages; i++) {
  235. vpage = hw_qpageit_get_inc(&eq->hw_queue);
  236. if (!vpage) {
  237. pr_err("hw_qpageit_get_inc failed\n");
  238. hret = H_RESOURCE;
  239. goto out_kill_hwq;
  240. }
  241. rpage = __pa(vpage);
  242. hret = ehea_h_register_rpage(adapter->handle, 0,
  243. EHEA_EQ_REGISTER_ORIG,
  244. eq->fw_handle, rpage, 1);
  245. if (i == (eq->attr.nr_pages - 1)) {
  246. /* last page */
  247. vpage = hw_qpageit_get_inc(&eq->hw_queue);
  248. if ((hret != H_SUCCESS) || (vpage))
  249. goto out_kill_hwq;
  250. } else {
  251. if (hret != H_PAGE_REGISTERED)
  252. goto out_kill_hwq;
  253. }
  254. }
  255. hw_qeit_reset(&eq->hw_queue);
  256. return eq;
  257. out_kill_hwq:
  258. hw_queue_dtor(&eq->hw_queue);
  259. out_freeres:
  260. ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
  261. out_freemem:
  262. kfree(eq);
  263. return NULL;
  264. }
  265. struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
  266. {
  267. struct ehea_eqe *eqe;
  268. unsigned long flags;
  269. spin_lock_irqsave(&eq->spinlock, flags);
  270. eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
  271. spin_unlock_irqrestore(&eq->spinlock, flags);
  272. return eqe;
  273. }
  274. static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
  275. {
  276. u64 hret;
  277. unsigned long flags;
  278. spin_lock_irqsave(&eq->spinlock, flags);
  279. hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
  280. spin_unlock_irqrestore(&eq->spinlock, flags);
  281. if (hret != H_SUCCESS)
  282. return hret;
  283. hw_queue_dtor(&eq->hw_queue);
  284. kfree(eq);
  285. return hret;
  286. }
  287. int ehea_destroy_eq(struct ehea_eq *eq)
  288. {
  289. u64 hret, aer, aerr;
  290. if (!eq)
  291. return 0;
  292. hcp_epas_dtor(&eq->epas);
  293. hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
  294. if (hret == H_R_STATE) {
  295. ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
  296. hret = ehea_destroy_eq_res(eq, FORCE_FREE);
  297. }
  298. if (hret != H_SUCCESS) {
  299. pr_err("destroy EQ failed\n");
  300. return -EIO;
  301. }
  302. return 0;
  303. }
  304. /* allocates memory for a queue and registers pages in phyp */
  305. static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
  306. int nr_pages, int wqe_size, int act_nr_sges,
  307. struct ehea_adapter *adapter, int h_call_q_selector)
  308. {
  309. u64 hret, rpage;
  310. int ret, cnt;
  311. void *vpage;
  312. ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
  313. if (ret)
  314. return ret;
  315. for (cnt = 0; cnt < nr_pages; cnt++) {
  316. vpage = hw_qpageit_get_inc(hw_queue);
  317. if (!vpage) {
  318. pr_err("hw_qpageit_get_inc failed\n");
  319. goto out_kill_hwq;
  320. }
  321. rpage = __pa(vpage);
  322. hret = ehea_h_register_rpage(adapter->handle,
  323. 0, h_call_q_selector,
  324. qp->fw_handle, rpage, 1);
  325. if (hret < H_SUCCESS) {
  326. pr_err("register_rpage_qp failed\n");
  327. goto out_kill_hwq;
  328. }
  329. }
  330. hw_qeit_reset(hw_queue);
  331. return 0;
  332. out_kill_hwq:
  333. hw_queue_dtor(hw_queue);
  334. return -EIO;
  335. }
  336. static inline u32 map_wqe_size(u8 wqe_enc_size)
  337. {
  338. return 128 << wqe_enc_size;
  339. }
  340. struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
  341. u32 pd, struct ehea_qp_init_attr *init_attr)
  342. {
  343. int ret;
  344. u64 hret;
  345. struct ehea_qp *qp;
  346. u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
  347. u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
  348. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  349. if (!qp)
  350. return NULL;
  351. qp->adapter = adapter;
  352. hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
  353. &qp->fw_handle, &qp->epas);
  354. if (hret != H_SUCCESS) {
  355. pr_err("ehea_h_alloc_resource_qp failed\n");
  356. goto out_freemem;
  357. }
  358. wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
  359. wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
  360. wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
  361. wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
  362. ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
  363. wqe_size_in_bytes_sq,
  364. init_attr->act_wqe_size_enc_sq, adapter,
  365. 0);
  366. if (ret) {
  367. pr_err("can't register for sq ret=%x\n", ret);
  368. goto out_freeres;
  369. }
  370. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
  371. init_attr->nr_rq1_pages,
  372. wqe_size_in_bytes_rq1,
  373. init_attr->act_wqe_size_enc_rq1,
  374. adapter, 1);
  375. if (ret) {
  376. pr_err("can't register for rq1 ret=%x\n", ret);
  377. goto out_kill_hwsq;
  378. }
  379. if (init_attr->rq_count > 1) {
  380. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
  381. init_attr->nr_rq2_pages,
  382. wqe_size_in_bytes_rq2,
  383. init_attr->act_wqe_size_enc_rq2,
  384. adapter, 2);
  385. if (ret) {
  386. pr_err("can't register for rq2 ret=%x\n", ret);
  387. goto out_kill_hwr1q;
  388. }
  389. }
  390. if (init_attr->rq_count > 2) {
  391. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
  392. init_attr->nr_rq3_pages,
  393. wqe_size_in_bytes_rq3,
  394. init_attr->act_wqe_size_enc_rq3,
  395. adapter, 3);
  396. if (ret) {
  397. pr_err("can't register for rq3 ret=%x\n", ret);
  398. goto out_kill_hwr2q;
  399. }
  400. }
  401. qp->init_attr = *init_attr;
  402. return qp;
  403. out_kill_hwr2q:
  404. hw_queue_dtor(&qp->hw_rqueue2);
  405. out_kill_hwr1q:
  406. hw_queue_dtor(&qp->hw_rqueue1);
  407. out_kill_hwsq:
  408. hw_queue_dtor(&qp->hw_squeue);
  409. out_freeres:
  410. ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
  411. ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
  412. out_freemem:
  413. kfree(qp);
  414. return NULL;
  415. }
  416. static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
  417. {
  418. u64 hret;
  419. struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
  420. ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
  421. hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
  422. if (hret != H_SUCCESS)
  423. return hret;
  424. hw_queue_dtor(&qp->hw_squeue);
  425. hw_queue_dtor(&qp->hw_rqueue1);
  426. if (qp_attr->rq_count > 1)
  427. hw_queue_dtor(&qp->hw_rqueue2);
  428. if (qp_attr->rq_count > 2)
  429. hw_queue_dtor(&qp->hw_rqueue3);
  430. kfree(qp);
  431. return hret;
  432. }
  433. int ehea_destroy_qp(struct ehea_qp *qp)
  434. {
  435. u64 hret, aer, aerr;
  436. if (!qp)
  437. return 0;
  438. hcp_epas_dtor(&qp->epas);
  439. hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
  440. if (hret == H_R_STATE) {
  441. ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
  442. hret = ehea_destroy_qp_res(qp, FORCE_FREE);
  443. }
  444. if (hret != H_SUCCESS) {
  445. pr_err("destroy QP failed\n");
  446. return -EIO;
  447. }
  448. return 0;
  449. }
  450. static inline int ehea_calc_index(unsigned long i, unsigned long s)
  451. {
  452. return (i >> s) & EHEA_INDEX_MASK;
  453. }
  454. static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
  455. int dir)
  456. {
  457. if (!ehea_top_bmap->dir[dir]) {
  458. ehea_top_bmap->dir[dir] =
  459. kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
  460. if (!ehea_top_bmap->dir[dir])
  461. return -ENOMEM;
  462. }
  463. return 0;
  464. }
  465. static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
  466. {
  467. if (!ehea_bmap->top[top]) {
  468. ehea_bmap->top[top] =
  469. kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
  470. if (!ehea_bmap->top[top])
  471. return -ENOMEM;
  472. }
  473. return ehea_init_top_bmap(ehea_bmap->top[top], dir);
  474. }
  475. static DEFINE_MUTEX(ehea_busmap_mutex);
  476. static unsigned long ehea_mr_len;
  477. #define EHEA_BUSMAP_ADD_SECT 1
  478. #define EHEA_BUSMAP_REM_SECT 0
  479. static void ehea_rebuild_busmap(void)
  480. {
  481. u64 vaddr = EHEA_BUSMAP_START;
  482. int top, dir, idx;
  483. for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
  484. struct ehea_top_bmap *ehea_top;
  485. int valid_dir_entries = 0;
  486. if (!ehea_bmap->top[top])
  487. continue;
  488. ehea_top = ehea_bmap->top[top];
  489. for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
  490. struct ehea_dir_bmap *ehea_dir;
  491. int valid_entries = 0;
  492. if (!ehea_top->dir[dir])
  493. continue;
  494. valid_dir_entries++;
  495. ehea_dir = ehea_top->dir[dir];
  496. for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
  497. if (!ehea_dir->ent[idx])
  498. continue;
  499. valid_entries++;
  500. ehea_dir->ent[idx] = vaddr;
  501. vaddr += EHEA_SECTSIZE;
  502. }
  503. if (!valid_entries) {
  504. ehea_top->dir[dir] = NULL;
  505. kfree(ehea_dir);
  506. }
  507. }
  508. if (!valid_dir_entries) {
  509. ehea_bmap->top[top] = NULL;
  510. kfree(ehea_top);
  511. }
  512. }
  513. }
  514. static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
  515. {
  516. unsigned long i, start_section, end_section;
  517. if (!nr_pages)
  518. return 0;
  519. if (!ehea_bmap) {
  520. ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
  521. if (!ehea_bmap)
  522. return -ENOMEM;
  523. }
  524. start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
  525. end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
  526. /* Mark entries as valid or invalid only; address is assigned later */
  527. for (i = start_section; i < end_section; i++) {
  528. u64 flag;
  529. int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
  530. int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
  531. int idx = i & EHEA_INDEX_MASK;
  532. if (add) {
  533. int ret = ehea_init_bmap(ehea_bmap, top, dir);
  534. if (ret)
  535. return ret;
  536. flag = 1; /* valid */
  537. ehea_mr_len += EHEA_SECTSIZE;
  538. } else {
  539. if (!ehea_bmap->top[top])
  540. continue;
  541. if (!ehea_bmap->top[top]->dir[dir])
  542. continue;
  543. flag = 0; /* invalid */
  544. ehea_mr_len -= EHEA_SECTSIZE;
  545. }
  546. ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
  547. }
  548. ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
  549. return 0;
  550. }
  551. int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
  552. {
  553. int ret;
  554. mutex_lock(&ehea_busmap_mutex);
  555. ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
  556. mutex_unlock(&ehea_busmap_mutex);
  557. return ret;
  558. }
  559. int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
  560. {
  561. int ret;
  562. mutex_lock(&ehea_busmap_mutex);
  563. ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
  564. mutex_unlock(&ehea_busmap_mutex);
  565. return ret;
  566. }
  567. static int ehea_is_hugepage(unsigned long pfn)
  568. {
  569. int page_order;
  570. if (pfn & EHEA_HUGEPAGE_PFN_MASK)
  571. return 0;
  572. page_order = compound_order(pfn_to_page(pfn));
  573. if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
  574. return 0;
  575. return 1;
  576. }
  577. static int ehea_create_busmap_callback(unsigned long initial_pfn,
  578. unsigned long total_nr_pages, void *arg)
  579. {
  580. int ret;
  581. unsigned long pfn, start_pfn, end_pfn, nr_pages;
  582. if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
  583. return ehea_update_busmap(initial_pfn, total_nr_pages,
  584. EHEA_BUSMAP_ADD_SECT);
  585. /* Given chunk is >= 16GB -> check for hugepages */
  586. start_pfn = initial_pfn;
  587. end_pfn = initial_pfn + total_nr_pages;
  588. pfn = start_pfn;
  589. while (pfn < end_pfn) {
  590. if (ehea_is_hugepage(pfn)) {
  591. /* Add mem found in front of the hugepage */
  592. nr_pages = pfn - start_pfn;
  593. ret = ehea_update_busmap(start_pfn, nr_pages,
  594. EHEA_BUSMAP_ADD_SECT);
  595. if (ret)
  596. return ret;
  597. /* Skip the hugepage */
  598. pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
  599. start_pfn = pfn;
  600. } else
  601. pfn += (EHEA_SECTSIZE / PAGE_SIZE);
  602. }
  603. /* Add mem found behind the hugepage(s) */
  604. nr_pages = pfn - start_pfn;
  605. return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
  606. }
  607. int ehea_create_busmap(void)
  608. {
  609. int ret;
  610. mutex_lock(&ehea_busmap_mutex);
  611. ehea_mr_len = 0;
  612. ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
  613. ehea_create_busmap_callback);
  614. mutex_unlock(&ehea_busmap_mutex);
  615. return ret;
  616. }
  617. void ehea_destroy_busmap(void)
  618. {
  619. int top, dir;
  620. mutex_lock(&ehea_busmap_mutex);
  621. if (!ehea_bmap)
  622. goto out_destroy;
  623. for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
  624. if (!ehea_bmap->top[top])
  625. continue;
  626. for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
  627. if (!ehea_bmap->top[top]->dir[dir])
  628. continue;
  629. kfree(ehea_bmap->top[top]->dir[dir]);
  630. }
  631. kfree(ehea_bmap->top[top]);
  632. }
  633. kfree(ehea_bmap);
  634. ehea_bmap = NULL;
  635. out_destroy:
  636. mutex_unlock(&ehea_busmap_mutex);
  637. }
  638. u64 ehea_map_vaddr(void *caddr)
  639. {
  640. int top, dir, idx;
  641. unsigned long index, offset;
  642. if (!ehea_bmap)
  643. return EHEA_INVAL_ADDR;
  644. index = __pa(caddr) >> SECTION_SIZE_BITS;
  645. top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
  646. if (!ehea_bmap->top[top])
  647. return EHEA_INVAL_ADDR;
  648. dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
  649. if (!ehea_bmap->top[top]->dir[dir])
  650. return EHEA_INVAL_ADDR;
  651. idx = index & EHEA_INDEX_MASK;
  652. if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
  653. return EHEA_INVAL_ADDR;
  654. offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
  655. return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
  656. }
  657. static inline void *ehea_calc_sectbase(int top, int dir, int idx)
  658. {
  659. unsigned long ret = idx;
  660. ret |= dir << EHEA_DIR_INDEX_SHIFT;
  661. ret |= top << EHEA_TOP_INDEX_SHIFT;
  662. return __va(ret << SECTION_SIZE_BITS);
  663. }
  664. static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
  665. struct ehea_adapter *adapter,
  666. struct ehea_mr *mr)
  667. {
  668. void *pg;
  669. u64 j, m, hret;
  670. unsigned long k = 0;
  671. u64 pt_abs = __pa(pt);
  672. void *sectbase = ehea_calc_sectbase(top, dir, idx);
  673. for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
  674. for (m = 0; m < EHEA_MAX_RPAGE; m++) {
  675. pg = sectbase + ((k++) * EHEA_PAGESIZE);
  676. pt[m] = __pa(pg);
  677. }
  678. hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
  679. 0, pt_abs, EHEA_MAX_RPAGE);
  680. if ((hret != H_SUCCESS) &&
  681. (hret != H_PAGE_REGISTERED)) {
  682. ehea_h_free_resource(adapter->handle, mr->handle,
  683. FORCE_FREE);
  684. pr_err("register_rpage_mr failed\n");
  685. return hret;
  686. }
  687. }
  688. return hret;
  689. }
  690. static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
  691. struct ehea_adapter *adapter,
  692. struct ehea_mr *mr)
  693. {
  694. u64 hret = H_SUCCESS;
  695. int idx;
  696. for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
  697. if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
  698. continue;
  699. hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
  700. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  701. return hret;
  702. }
  703. return hret;
  704. }
  705. static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
  706. struct ehea_adapter *adapter,
  707. struct ehea_mr *mr)
  708. {
  709. u64 hret = H_SUCCESS;
  710. int dir;
  711. for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
  712. if (!ehea_bmap->top[top]->dir[dir])
  713. continue;
  714. hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
  715. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  716. return hret;
  717. }
  718. return hret;
  719. }
  720. int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
  721. {
  722. int ret;
  723. u64 *pt;
  724. u64 hret;
  725. u32 acc_ctrl = EHEA_MR_ACC_CTRL;
  726. unsigned long top;
  727. pt = (void *)get_zeroed_page(GFP_KERNEL);
  728. if (!pt) {
  729. pr_err("no mem\n");
  730. ret = -ENOMEM;
  731. goto out;
  732. }
  733. hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
  734. ehea_mr_len, acc_ctrl, adapter->pd,
  735. &mr->handle, &mr->lkey);
  736. if (hret != H_SUCCESS) {
  737. pr_err("alloc_resource_mr failed\n");
  738. ret = -EIO;
  739. goto out;
  740. }
  741. if (!ehea_bmap) {
  742. ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
  743. pr_err("no busmap available\n");
  744. ret = -EIO;
  745. goto out;
  746. }
  747. for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
  748. if (!ehea_bmap->top[top])
  749. continue;
  750. hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
  751. if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
  752. break;
  753. }
  754. if (hret != H_SUCCESS) {
  755. ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
  756. pr_err("registering mr failed\n");
  757. ret = -EIO;
  758. goto out;
  759. }
  760. mr->vaddr = EHEA_BUSMAP_START;
  761. mr->adapter = adapter;
  762. ret = 0;
  763. out:
  764. free_page((unsigned long)pt);
  765. return ret;
  766. }
  767. int ehea_rem_mr(struct ehea_mr *mr)
  768. {
  769. u64 hret;
  770. if (!mr || !mr->adapter)
  771. return -EINVAL;
  772. hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
  773. FORCE_FREE);
  774. if (hret != H_SUCCESS) {
  775. pr_err("destroy MR failed\n");
  776. return -EIO;
  777. }
  778. return 0;
  779. }
  780. int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
  781. struct ehea_mr *shared_mr)
  782. {
  783. u64 hret;
  784. hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
  785. old_mr->vaddr, EHEA_MR_ACC_CTRL,
  786. adapter->pd, shared_mr);
  787. if (hret != H_SUCCESS)
  788. return -EIO;
  789. shared_mr->adapter = adapter;
  790. return 0;
  791. }
  792. static void print_error_data(u64 *data)
  793. {
  794. int length;
  795. u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
  796. u64 resource = data[1];
  797. length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
  798. if (length > EHEA_PAGESIZE)
  799. length = EHEA_PAGESIZE;
  800. if (type == EHEA_AER_RESTYPE_QP)
  801. pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
  802. resource, data[6], data[12], data[22]);
  803. else if (type == EHEA_AER_RESTYPE_CQ)
  804. pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
  805. resource, data[6]);
  806. else if (type == EHEA_AER_RESTYPE_EQ)
  807. pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
  808. resource, data[6]);
  809. ehea_dump(data, length, "error data");
  810. }
  811. u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
  812. u64 *aer, u64 *aerr)
  813. {
  814. unsigned long ret;
  815. u64 *rblock;
  816. u64 type = 0;
  817. rblock = (void *)get_zeroed_page(GFP_KERNEL);
  818. if (!rblock) {
  819. pr_err("Cannot allocate rblock memory\n");
  820. goto out;
  821. }
  822. ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
  823. if (ret == H_SUCCESS) {
  824. type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
  825. *aer = rblock[6];
  826. *aerr = rblock[12];
  827. print_error_data(rblock);
  828. } else if (ret == H_R_STATE) {
  829. pr_err("No error data available: %llX\n", res_handle);
  830. } else
  831. pr_err("Error data could not be fetched: %llX\n", res_handle);
  832. free_page((unsigned long)rblock);
  833. out:
  834. return type;
  835. }