resource.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /* Crude resource management */
  33. #include <linux/kernel.h>
  34. #include <linux/random.h>
  35. #include <linux/slab.h>
  36. #include <linux/kfifo.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/errno.h>
  39. #include <linux/genalloc.h>
  40. #include <linux/ratelimit.h>
  41. #include "iw_cxgb4.h"
  42. #define RANDOM_SIZE 16
  43. static int __c4iw_init_resource_fifo(struct kfifo *fifo,
  44. spinlock_t *fifo_lock,
  45. u32 nr, u32 skip_low,
  46. u32 skip_high,
  47. int random)
  48. {
  49. u32 i, j, entry = 0, idx;
  50. u32 random_bytes;
  51. u32 rarray[16];
  52. spin_lock_init(fifo_lock);
  53. if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
  54. return -ENOMEM;
  55. for (i = 0; i < skip_low + skip_high; i++)
  56. kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
  57. if (random) {
  58. j = 0;
  59. random_bytes = random32();
  60. for (i = 0; i < RANDOM_SIZE; i++)
  61. rarray[i] = i + skip_low;
  62. for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
  63. if (j >= RANDOM_SIZE) {
  64. j = 0;
  65. random_bytes = random32();
  66. }
  67. idx = (random_bytes >> (j * 2)) & 0xF;
  68. kfifo_in(fifo,
  69. (unsigned char *) &rarray[idx],
  70. sizeof(u32));
  71. rarray[idx] = i;
  72. j++;
  73. }
  74. for (i = 0; i < RANDOM_SIZE; i++)
  75. kfifo_in(fifo,
  76. (unsigned char *) &rarray[i],
  77. sizeof(u32));
  78. } else
  79. for (i = skip_low; i < nr - skip_high; i++)
  80. kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
  81. for (i = 0; i < skip_low + skip_high; i++)
  82. if (kfifo_out_locked(fifo, (unsigned char *) &entry,
  83. sizeof(u32), fifo_lock))
  84. break;
  85. return 0;
  86. }
  87. static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
  88. u32 nr, u32 skip_low, u32 skip_high)
  89. {
  90. return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
  91. skip_high, 0);
  92. }
  93. static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
  94. spinlock_t *fifo_lock,
  95. u32 nr, u32 skip_low, u32 skip_high)
  96. {
  97. return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
  98. skip_high, 1);
  99. }
  100. static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
  101. {
  102. u32 i;
  103. spin_lock_init(&rdev->resource.qid_fifo_lock);
  104. if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size *
  105. sizeof(u32), GFP_KERNEL))
  106. return -ENOMEM;
  107. for (i = rdev->lldi.vr->qp.start;
  108. i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
  109. if (!(i & rdev->qpmask))
  110. kfifo_in(&rdev->resource.qid_fifo,
  111. (unsigned char *) &i, sizeof(u32));
  112. return 0;
  113. }
  114. /* nr_* must be power of 2 */
  115. int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
  116. {
  117. int err = 0;
  118. err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
  119. &rdev->resource.tpt_fifo_lock,
  120. nr_tpt, 1, 0);
  121. if (err)
  122. goto tpt_err;
  123. err = c4iw_init_qid_fifo(rdev);
  124. if (err)
  125. goto qid_err;
  126. err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
  127. &rdev->resource.pdid_fifo_lock,
  128. nr_pdid, 1, 0);
  129. if (err)
  130. goto pdid_err;
  131. return 0;
  132. pdid_err:
  133. kfifo_free(&rdev->resource.qid_fifo);
  134. qid_err:
  135. kfifo_free(&rdev->resource.tpt_fifo);
  136. tpt_err:
  137. return -ENOMEM;
  138. }
  139. /*
  140. * returns 0 if no resource available
  141. */
  142. u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
  143. {
  144. u32 entry;
  145. if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
  146. return entry;
  147. else
  148. return 0;
  149. }
  150. void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
  151. {
  152. PDBG("%s entry 0x%x\n", __func__, entry);
  153. kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
  154. }
  155. u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
  156. {
  157. struct c4iw_qid_list *entry;
  158. u32 qid;
  159. int i;
  160. mutex_lock(&uctx->lock);
  161. if (!list_empty(&uctx->cqids)) {
  162. entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
  163. entry);
  164. list_del(&entry->entry);
  165. qid = entry->qid;
  166. kfree(entry);
  167. } else {
  168. qid = c4iw_get_resource(&rdev->resource.qid_fifo,
  169. &rdev->resource.qid_fifo_lock);
  170. if (!qid)
  171. goto out;
  172. for (i = qid+1; i & rdev->qpmask; i++) {
  173. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  174. if (!entry)
  175. goto out;
  176. entry->qid = i;
  177. list_add_tail(&entry->entry, &uctx->cqids);
  178. }
  179. /*
  180. * now put the same ids on the qp list since they all
  181. * map to the same db/gts page.
  182. */
  183. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  184. if (!entry)
  185. goto out;
  186. entry->qid = qid;
  187. list_add_tail(&entry->entry, &uctx->qpids);
  188. for (i = qid+1; i & rdev->qpmask; i++) {
  189. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  190. if (!entry)
  191. goto out;
  192. entry->qid = i;
  193. list_add_tail(&entry->entry, &uctx->qpids);
  194. }
  195. }
  196. out:
  197. mutex_unlock(&uctx->lock);
  198. PDBG("%s qid 0x%x\n", __func__, qid);
  199. return qid;
  200. }
  201. void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
  202. struct c4iw_dev_ucontext *uctx)
  203. {
  204. struct c4iw_qid_list *entry;
  205. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  206. if (!entry)
  207. return;
  208. PDBG("%s qid 0x%x\n", __func__, qid);
  209. entry->qid = qid;
  210. mutex_lock(&uctx->lock);
  211. list_add_tail(&entry->entry, &uctx->cqids);
  212. mutex_unlock(&uctx->lock);
  213. }
  214. u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
  215. {
  216. struct c4iw_qid_list *entry;
  217. u32 qid;
  218. int i;
  219. mutex_lock(&uctx->lock);
  220. if (!list_empty(&uctx->qpids)) {
  221. entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
  222. entry);
  223. list_del(&entry->entry);
  224. qid = entry->qid;
  225. kfree(entry);
  226. } else {
  227. qid = c4iw_get_resource(&rdev->resource.qid_fifo,
  228. &rdev->resource.qid_fifo_lock);
  229. if (!qid)
  230. goto out;
  231. for (i = qid+1; i & rdev->qpmask; i++) {
  232. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  233. if (!entry)
  234. goto out;
  235. entry->qid = i;
  236. list_add_tail(&entry->entry, &uctx->qpids);
  237. }
  238. /*
  239. * now put the same ids on the cq list since they all
  240. * map to the same db/gts page.
  241. */
  242. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  243. if (!entry)
  244. goto out;
  245. entry->qid = qid;
  246. list_add_tail(&entry->entry, &uctx->cqids);
  247. for (i = qid; i & rdev->qpmask; i++) {
  248. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  249. if (!entry)
  250. goto out;
  251. entry->qid = i;
  252. list_add_tail(&entry->entry, &uctx->cqids);
  253. }
  254. }
  255. out:
  256. mutex_unlock(&uctx->lock);
  257. PDBG("%s qid 0x%x\n", __func__, qid);
  258. return qid;
  259. }
  260. void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
  261. struct c4iw_dev_ucontext *uctx)
  262. {
  263. struct c4iw_qid_list *entry;
  264. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  265. if (!entry)
  266. return;
  267. PDBG("%s qid 0x%x\n", __func__, qid);
  268. entry->qid = qid;
  269. mutex_lock(&uctx->lock);
  270. list_add_tail(&entry->entry, &uctx->qpids);
  271. mutex_unlock(&uctx->lock);
  272. }
  273. void c4iw_destroy_resource(struct c4iw_resource *rscp)
  274. {
  275. kfifo_free(&rscp->tpt_fifo);
  276. kfifo_free(&rscp->qid_fifo);
  277. kfifo_free(&rscp->pdid_fifo);
  278. }
  279. /*
  280. * PBL Memory Manager. Uses Linux generic allocator.
  281. */
  282. #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
  283. u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
  284. {
  285. unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
  286. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
  287. if (!addr)
  288. printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n",
  289. pci_name(rdev->lldi.pdev));
  290. return (u32)addr;
  291. }
  292. void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  293. {
  294. PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
  295. gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
  296. }
  297. int c4iw_pblpool_create(struct c4iw_rdev *rdev)
  298. {
  299. unsigned pbl_start, pbl_chunk, pbl_top;
  300. rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
  301. if (!rdev->pbl_pool)
  302. return -ENOMEM;
  303. pbl_start = rdev->lldi.vr->pbl.start;
  304. pbl_chunk = rdev->lldi.vr->pbl.size;
  305. pbl_top = pbl_start + pbl_chunk;
  306. while (pbl_start < pbl_top) {
  307. pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
  308. if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
  309. PDBG("%s failed to add PBL chunk (%x/%x)\n",
  310. __func__, pbl_start, pbl_chunk);
  311. if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
  312. printk(KERN_WARNING MOD
  313. "Failed to add all PBL chunks (%x/%x)\n",
  314. pbl_start,
  315. pbl_top - pbl_start);
  316. return 0;
  317. }
  318. pbl_chunk >>= 1;
  319. } else {
  320. PDBG("%s added PBL chunk (%x/%x)\n",
  321. __func__, pbl_start, pbl_chunk);
  322. pbl_start += pbl_chunk;
  323. }
  324. }
  325. return 0;
  326. }
  327. void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
  328. {
  329. gen_pool_destroy(rdev->pbl_pool);
  330. }
  331. /*
  332. * RQT Memory Manager. Uses Linux generic allocator.
  333. */
  334. #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
  335. u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
  336. {
  337. unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
  338. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
  339. if (!addr)
  340. printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
  341. pci_name(rdev->lldi.pdev));
  342. return (u32)addr;
  343. }
  344. void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  345. {
  346. PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
  347. gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
  348. }
  349. int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
  350. {
  351. unsigned rqt_start, rqt_chunk, rqt_top;
  352. rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
  353. if (!rdev->rqt_pool)
  354. return -ENOMEM;
  355. rqt_start = rdev->lldi.vr->rq.start;
  356. rqt_chunk = rdev->lldi.vr->rq.size;
  357. rqt_top = rqt_start + rqt_chunk;
  358. while (rqt_start < rqt_top) {
  359. rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
  360. if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
  361. PDBG("%s failed to add RQT chunk (%x/%x)\n",
  362. __func__, rqt_start, rqt_chunk);
  363. if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
  364. printk(KERN_WARNING MOD
  365. "Failed to add all RQT chunks (%x/%x)\n",
  366. rqt_start, rqt_top - rqt_start);
  367. return 0;
  368. }
  369. rqt_chunk >>= 1;
  370. } else {
  371. PDBG("%s added RQT chunk (%x/%x)\n",
  372. __func__, rqt_start, rqt_chunk);
  373. rqt_start += rqt_chunk;
  374. }
  375. }
  376. return 0;
  377. }
  378. void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
  379. {
  380. gen_pool_destroy(rdev->rqt_pool);
  381. }
  382. /*
  383. * On-Chip QP Memory.
  384. */
  385. #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
  386. u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
  387. {
  388. unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
  389. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
  390. return (u32)addr;
  391. }
  392. void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  393. {
  394. PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
  395. gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
  396. }
  397. int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
  398. {
  399. unsigned start, chunk, top;
  400. rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
  401. if (!rdev->ocqp_pool)
  402. return -ENOMEM;
  403. start = rdev->lldi.vr->ocq.start;
  404. chunk = rdev->lldi.vr->ocq.size;
  405. top = start + chunk;
  406. while (start < top) {
  407. chunk = min(top - start + 1, chunk);
  408. if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
  409. PDBG("%s failed to add OCQP chunk (%x/%x)\n",
  410. __func__, start, chunk);
  411. if (chunk <= 1024 << MIN_OCQP_SHIFT) {
  412. printk(KERN_WARNING MOD
  413. "Failed to add all OCQP chunks (%x/%x)\n",
  414. start, top - start);
  415. return 0;
  416. }
  417. chunk >>= 1;
  418. } else {
  419. PDBG("%s added OCQP chunk (%x/%x)\n",
  420. __func__, start, chunk);
  421. start += chunk;
  422. }
  423. }
  424. return 0;
  425. }
  426. void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
  427. {
  428. gen_pool_destroy(rdev->ocqp_pool);
  429. }