cxgb4_uld.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. /*
  2. * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written by: Atul Gupta (atul.gupta@chelsio.com)
  35. * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
  36. */
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/errno.h>
  40. #include <linux/types.h>
  41. #include <linux/debugfs.h>
  42. #include <linux/export.h>
  43. #include <linux/list.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/pci.h>
  46. #include "cxgb4.h"
  47. #include "cxgb4_uld.h"
  48. #include "t4_regs.h"
  49. #include "t4fw_api.h"
  50. #include "t4_msg.h"
  51. #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
  52. static int get_msix_idx_from_bmap(struct adapter *adap)
  53. {
  54. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  55. unsigned long flags;
  56. unsigned int msix_idx;
  57. spin_lock_irqsave(&bmap->lock, flags);
  58. msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
  59. if (msix_idx < bmap->mapsize) {
  60. __set_bit(msix_idx, bmap->msix_bmap);
  61. } else {
  62. spin_unlock_irqrestore(&bmap->lock, flags);
  63. return -ENOSPC;
  64. }
  65. spin_unlock_irqrestore(&bmap->lock, flags);
  66. return msix_idx;
  67. }
  68. static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
  69. {
  70. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  71. unsigned long flags;
  72. spin_lock_irqsave(&bmap->lock, flags);
  73. __clear_bit(msix_idx, bmap->msix_bmap);
  74. spin_unlock_irqrestore(&bmap->lock, flags);
  75. }
  76. /* Flush the aggregated lro sessions */
  77. static void uldrx_flush_handler(struct sge_rspq *q)
  78. {
  79. struct adapter *adap = q->adap;
  80. if (adap->uld[q->uld].lro_flush)
  81. adap->uld[q->uld].lro_flush(&q->lro_mgr);
  82. }
  83. /**
  84. * uldrx_handler - response queue handler for ULD queues
  85. * @q: the response queue that received the packet
  86. * @rsp: the response queue descriptor holding the offload message
  87. * @gl: the gather list of packet fragments
  88. *
  89. * Deliver an ingress offload packet to a ULD. All processing is done by
  90. * the ULD, we just maintain statistics.
  91. */
  92. static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
  93. const struct pkt_gl *gl)
  94. {
  95. struct adapter *adap = q->adap;
  96. struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
  97. int ret;
  98. /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
  99. if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
  100. ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
  101. rsp += 2;
  102. if (q->flush_handler)
  103. ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
  104. rsp, gl, &q->lro_mgr,
  105. &q->napi);
  106. else
  107. ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
  108. rsp, gl);
  109. if (ret) {
  110. rxq->stats.nomem++;
  111. return -1;
  112. }
  113. if (!gl)
  114. rxq->stats.imm++;
  115. else if (gl == CXGB4_MSG_AN)
  116. rxq->stats.an++;
  117. else
  118. rxq->stats.pkts++;
  119. return 0;
  120. }
  121. static int alloc_uld_rxqs(struct adapter *adap,
  122. struct sge_uld_rxq_info *rxq_info, bool lro)
  123. {
  124. struct sge *s = &adap->sge;
  125. unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
  126. struct sge_ofld_rxq *q = rxq_info->uldrxq;
  127. unsigned short *ids = rxq_info->rspq_id;
  128. unsigned int bmap_idx = 0;
  129. unsigned int per_chan;
  130. int i, err, msi_idx, que_idx = 0;
  131. per_chan = rxq_info->nrxq / adap->params.nports;
  132. if (adap->flags & USING_MSIX)
  133. msi_idx = 1;
  134. else
  135. msi_idx = -((int)s->intrq.abs_id + 1);
  136. for (i = 0; i < nq; i++, q++) {
  137. if (i == rxq_info->nrxq) {
  138. /* start allocation of concentrator queues */
  139. per_chan = rxq_info->nciq / adap->params.nports;
  140. que_idx = 0;
  141. }
  142. if (msi_idx >= 0) {
  143. bmap_idx = get_msix_idx_from_bmap(adap);
  144. msi_idx = adap->msix_info_ulds[bmap_idx].idx;
  145. }
  146. err = t4_sge_alloc_rxq(adap, &q->rspq, false,
  147. adap->port[que_idx++ / per_chan],
  148. msi_idx,
  149. q->fl.size ? &q->fl : NULL,
  150. uldrx_handler,
  151. lro ? uldrx_flush_handler : NULL,
  152. 0);
  153. if (err)
  154. goto freeout;
  155. if (msi_idx >= 0)
  156. rxq_info->msix_tbl[i] = bmap_idx;
  157. memset(&q->stats, 0, sizeof(q->stats));
  158. if (ids)
  159. ids[i] = q->rspq.abs_id;
  160. }
  161. return 0;
  162. freeout:
  163. q = rxq_info->uldrxq;
  164. for ( ; i; i--, q++) {
  165. if (q->rspq.desc)
  166. free_rspq_fl(adap, &q->rspq,
  167. q->fl.size ? &q->fl : NULL);
  168. }
  169. return err;
  170. }
  171. static int
  172. setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
  173. {
  174. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  175. int i, ret = 0;
  176. if (adap->flags & USING_MSIX) {
  177. rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
  178. sizeof(unsigned short),
  179. GFP_KERNEL);
  180. if (!rxq_info->msix_tbl)
  181. return -ENOMEM;
  182. }
  183. ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
  184. /* Tell uP to route control queue completions to rdma rspq */
  185. if (adap->flags & FULL_INIT_DONE &&
  186. !ret && uld_type == CXGB4_ULD_RDMA) {
  187. struct sge *s = &adap->sge;
  188. unsigned int cmplqid;
  189. u32 param, cmdop;
  190. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  191. for_each_port(adap, i) {
  192. cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
  193. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  194. FW_PARAMS_PARAM_X_V(cmdop) |
  195. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  196. ret = t4_set_params(adap, adap->mbox, adap->pf,
  197. 0, 1, &param, &cmplqid);
  198. }
  199. }
  200. return ret;
  201. }
  202. static void t4_free_uld_rxqs(struct adapter *adap, int n,
  203. struct sge_ofld_rxq *q)
  204. {
  205. for ( ; n; n--, q++) {
  206. if (q->rspq.desc)
  207. free_rspq_fl(adap, &q->rspq,
  208. q->fl.size ? &q->fl : NULL);
  209. }
  210. }
  211. static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
  212. {
  213. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  214. if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
  215. struct sge *s = &adap->sge;
  216. u32 param, cmdop, cmplqid = 0;
  217. int i;
  218. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  219. for_each_port(adap, i) {
  220. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  221. FW_PARAMS_PARAM_X_V(cmdop) |
  222. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  223. t4_set_params(adap, adap->mbox, adap->pf,
  224. 0, 1, &param, &cmplqid);
  225. }
  226. }
  227. if (rxq_info->nciq)
  228. t4_free_uld_rxqs(adap, rxq_info->nciq,
  229. rxq_info->uldrxq + rxq_info->nrxq);
  230. t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
  231. if (adap->flags & USING_MSIX)
  232. kfree(rxq_info->msix_tbl);
  233. }
  234. static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
  235. const struct cxgb4_uld_info *uld_info)
  236. {
  237. struct sge *s = &adap->sge;
  238. struct sge_uld_rxq_info *rxq_info;
  239. int i, nrxq, ciq_size;
  240. rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
  241. if (!rxq_info)
  242. return -ENOMEM;
  243. if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
  244. i = s->nqs_per_uld;
  245. rxq_info->nrxq = roundup(i, adap->params.nports);
  246. } else {
  247. i = min_t(int, uld_info->nrxq,
  248. num_online_cpus());
  249. rxq_info->nrxq = roundup(i, adap->params.nports);
  250. }
  251. if (!uld_info->ciq) {
  252. rxq_info->nciq = 0;
  253. } else {
  254. if (adap->flags & USING_MSIX)
  255. rxq_info->nciq = min_t(int, s->nqs_per_uld,
  256. num_online_cpus());
  257. else
  258. rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
  259. num_online_cpus());
  260. rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
  261. adap->params.nports);
  262. rxq_info->nciq = max_t(int, rxq_info->nciq,
  263. adap->params.nports);
  264. }
  265. nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
  266. rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
  267. GFP_KERNEL);
  268. if (!rxq_info->uldrxq) {
  269. kfree(rxq_info);
  270. return -ENOMEM;
  271. }
  272. rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
  273. if (!rxq_info->rspq_id) {
  274. kfree(rxq_info->uldrxq);
  275. kfree(rxq_info);
  276. return -ENOMEM;
  277. }
  278. for (i = 0; i < rxq_info->nrxq; i++) {
  279. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  280. init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
  281. r->rspq.uld = uld_type;
  282. r->fl.size = 72;
  283. }
  284. ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
  285. if (ciq_size > SGE_MAX_IQ_SIZE) {
  286. dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
  287. ciq_size = SGE_MAX_IQ_SIZE;
  288. }
  289. for (i = rxq_info->nrxq; i < nrxq; i++) {
  290. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  291. init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
  292. r->rspq.uld = uld_type;
  293. }
  294. memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
  295. adap->sge.uld_rxq_info[uld_type] = rxq_info;
  296. return 0;
  297. }
  298. static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
  299. {
  300. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  301. kfree(rxq_info->rspq_id);
  302. kfree(rxq_info->uldrxq);
  303. kfree(rxq_info);
  304. }
  305. static int
  306. request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  307. {
  308. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  309. int err = 0;
  310. unsigned int idx, bmap_idx;
  311. for_each_uldrxq(rxq_info, idx) {
  312. bmap_idx = rxq_info->msix_tbl[idx];
  313. err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
  314. t4_sge_intr_msix, 0,
  315. adap->msix_info_ulds[bmap_idx].desc,
  316. &rxq_info->uldrxq[idx].rspq);
  317. if (err)
  318. goto unwind;
  319. }
  320. return 0;
  321. unwind:
  322. while (idx-- > 0) {
  323. bmap_idx = rxq_info->msix_tbl[idx];
  324. free_msix_idx_in_bmap(adap, bmap_idx);
  325. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  326. &rxq_info->uldrxq[idx].rspq);
  327. }
  328. return err;
  329. }
  330. static void
  331. free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  332. {
  333. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  334. unsigned int idx, bmap_idx;
  335. for_each_uldrxq(rxq_info, idx) {
  336. bmap_idx = rxq_info->msix_tbl[idx];
  337. free_msix_idx_in_bmap(adap, bmap_idx);
  338. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  339. &rxq_info->uldrxq[idx].rspq);
  340. }
  341. }
  342. static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
  343. {
  344. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  345. int n = sizeof(adap->msix_info_ulds[0].desc);
  346. unsigned int idx, bmap_idx;
  347. for_each_uldrxq(rxq_info, idx) {
  348. bmap_idx = rxq_info->msix_tbl[idx];
  349. snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
  350. adap->port[0]->name, rxq_info->name, idx);
  351. }
  352. }
  353. static void enable_rx(struct adapter *adap, struct sge_rspq *q)
  354. {
  355. if (!q)
  356. return;
  357. if (q->handler) {
  358. cxgb_busy_poll_init_lock(q);
  359. napi_enable(&q->napi);
  360. }
  361. /* 0-increment GTS to start the timer and enable interrupts */
  362. t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
  363. SEINTARM_V(q->intr_params) |
  364. INGRESSQID_V(q->cntxt_id));
  365. }
  366. static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
  367. {
  368. if (q && q->handler) {
  369. napi_disable(&q->napi);
  370. local_bh_disable();
  371. while (!cxgb_poll_lock_napi(q))
  372. mdelay(1);
  373. local_bh_enable();
  374. }
  375. }
  376. static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
  377. {
  378. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  379. int idx;
  380. for_each_uldrxq(rxq_info, idx)
  381. enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
  382. }
  383. static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
  384. {
  385. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  386. int idx;
  387. for_each_uldrxq(rxq_info, idx)
  388. quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
  389. }
  390. static void
  391. free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
  392. {
  393. int nq = txq_info->ntxq;
  394. int i;
  395. for (i = 0; i < nq; i++) {
  396. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  397. if (txq && txq->q.desc) {
  398. tasklet_kill(&txq->qresume_tsk);
  399. t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
  400. txq->q.cntxt_id);
  401. free_tx_desc(adap, &txq->q, txq->q.in_use, false);
  402. kfree(txq->q.sdesc);
  403. __skb_queue_purge(&txq->sendq);
  404. free_txq(adap, &txq->q);
  405. }
  406. }
  407. }
  408. static int
  409. alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
  410. unsigned int uld_type)
  411. {
  412. struct sge *s = &adap->sge;
  413. int nq = txq_info->ntxq;
  414. int i, j, err;
  415. j = nq / adap->params.nports;
  416. for (i = 0; i < nq; i++) {
  417. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  418. txq->q.size = 1024;
  419. err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
  420. s->fw_evtq.cntxt_id, uld_type);
  421. if (err)
  422. goto freeout;
  423. }
  424. return 0;
  425. freeout:
  426. free_sge_txq_uld(adap, txq_info);
  427. return err;
  428. }
  429. static void
  430. release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
  431. {
  432. struct sge_uld_txq_info *txq_info = NULL;
  433. int tx_uld_type = TX_ULD(uld_type);
  434. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  435. if (txq_info && atomic_dec_and_test(&txq_info->users)) {
  436. free_sge_txq_uld(adap, txq_info);
  437. kfree(txq_info->uldtxq);
  438. kfree(txq_info);
  439. adap->sge.uld_txq_info[tx_uld_type] = NULL;
  440. }
  441. }
  442. static int
  443. setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
  444. const struct cxgb4_uld_info *uld_info)
  445. {
  446. struct sge_uld_txq_info *txq_info = NULL;
  447. int tx_uld_type, i;
  448. tx_uld_type = TX_ULD(uld_type);
  449. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  450. if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
  451. (atomic_inc_return(&txq_info->users) > 1))
  452. return 0;
  453. txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
  454. if (!txq_info)
  455. return -ENOMEM;
  456. i = min_t(int, uld_info->ntxq, num_online_cpus());
  457. txq_info->ntxq = roundup(i, adap->params.nports);
  458. txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
  459. GFP_KERNEL);
  460. if (!txq_info->uldtxq) {
  461. kfree(txq_info);
  462. return -ENOMEM;
  463. }
  464. if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
  465. kfree(txq_info->uldtxq);
  466. kfree(txq_info);
  467. return -ENOMEM;
  468. }
  469. atomic_inc(&txq_info->users);
  470. adap->sge.uld_txq_info[tx_uld_type] = txq_info;
  471. return 0;
  472. }
  473. static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
  474. struct cxgb4_lld_info *lli)
  475. {
  476. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  477. lli->rxq_ids = rxq_info->rspq_id;
  478. lli->nrxq = rxq_info->nrxq;
  479. lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
  480. lli->nciq = rxq_info->nciq;
  481. }
  482. int t4_uld_mem_alloc(struct adapter *adap)
  483. {
  484. struct sge *s = &adap->sge;
  485. adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
  486. if (!adap->uld)
  487. return -ENOMEM;
  488. s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
  489. sizeof(struct sge_uld_rxq_info *),
  490. GFP_KERNEL);
  491. if (!s->uld_rxq_info)
  492. goto err_uld;
  493. s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
  494. sizeof(struct sge_uld_txq_info *),
  495. GFP_KERNEL);
  496. if (!s->uld_txq_info)
  497. goto err_uld_rx;
  498. return 0;
  499. err_uld_rx:
  500. kfree(s->uld_rxq_info);
  501. err_uld:
  502. kfree(adap->uld);
  503. return -ENOMEM;
  504. }
  505. void t4_uld_mem_free(struct adapter *adap)
  506. {
  507. struct sge *s = &adap->sge;
  508. kfree(s->uld_txq_info);
  509. kfree(s->uld_rxq_info);
  510. kfree(adap->uld);
  511. }
  512. void t4_uld_clean_up(struct adapter *adap)
  513. {
  514. struct sge_uld_rxq_info *rxq_info;
  515. unsigned int i;
  516. if (!adap->uld)
  517. return;
  518. for (i = 0; i < CXGB4_ULD_MAX; i++) {
  519. if (!adap->uld[i].handle)
  520. continue;
  521. rxq_info = adap->sge.uld_rxq_info[i];
  522. if (adap->flags & FULL_INIT_DONE)
  523. quiesce_rx_uld(adap, i);
  524. if (adap->flags & USING_MSIX)
  525. free_msix_queue_irqs_uld(adap, i);
  526. free_sge_queues_uld(adap, i);
  527. free_queues_uld(adap, i);
  528. }
  529. }
  530. static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
  531. {
  532. int i;
  533. lld->pdev = adap->pdev;
  534. lld->pf = adap->pf;
  535. lld->l2t = adap->l2t;
  536. lld->tids = &adap->tids;
  537. lld->ports = adap->port;
  538. lld->vr = &adap->vres;
  539. lld->mtus = adap->params.mtus;
  540. lld->ntxq = adap->sge.ofldqsets;
  541. lld->nchan = adap->params.nports;
  542. lld->nports = adap->params.nports;
  543. lld->wr_cred = adap->params.ofldq_wr_cred;
  544. lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
  545. lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
  546. lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
  547. lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
  548. lld->iscsi_ppm = &adap->iscsi_ppm;
  549. lld->adapter_type = adap->params.chip;
  550. lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
  551. lld->udb_density = 1 << adap->params.sge.eq_qpp;
  552. lld->ucq_density = 1 << adap->params.sge.iq_qpp;
  553. lld->filt_mode = adap->params.tp.vlan_pri_map;
  554. /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
  555. for (i = 0; i < NCHAN; i++)
  556. lld->tx_modq[i] = i;
  557. lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
  558. lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
  559. lld->fw_vers = adap->params.fw_vers;
  560. lld->dbfifo_int_thresh = dbfifo_int_thresh;
  561. lld->sge_ingpadboundary = adap->sge.fl_align;
  562. lld->sge_egrstatuspagesize = adap->sge.stat_len;
  563. lld->sge_pktshift = adap->sge.pktshift;
  564. lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
  565. lld->max_ordird_qp = adap->params.max_ordird_qp;
  566. lld->max_ird_adapter = adap->params.max_ird_adapter;
  567. lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
  568. lld->nodeid = dev_to_node(adap->pdev_dev);
  569. lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
  570. }
  571. static void uld_attach(struct adapter *adap, unsigned int uld)
  572. {
  573. void *handle;
  574. struct cxgb4_lld_info lli;
  575. uld_init(adap, &lli);
  576. uld_queue_init(adap, uld, &lli);
  577. handle = adap->uld[uld].add(&lli);
  578. if (IS_ERR(handle)) {
  579. dev_warn(adap->pdev_dev,
  580. "could not attach to the %s driver, error %ld\n",
  581. adap->uld[uld].name, PTR_ERR(handle));
  582. return;
  583. }
  584. adap->uld[uld].handle = handle;
  585. t4_register_netevent_notifier();
  586. if (adap->flags & FULL_INIT_DONE)
  587. adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
  588. }
  589. /**
  590. * cxgb4_register_uld - register an upper-layer driver
  591. * @type: the ULD type
  592. * @p: the ULD methods
  593. *
  594. * Registers an upper-layer driver with this driver and notifies the ULD
  595. * about any presently available devices that support its type. Returns
  596. * %-EBUSY if a ULD of the same type is already registered.
  597. */
  598. int cxgb4_register_uld(enum cxgb4_uld type,
  599. const struct cxgb4_uld_info *p)
  600. {
  601. int ret = 0;
  602. unsigned int adap_idx = 0;
  603. struct adapter *adap;
  604. if (type >= CXGB4_ULD_MAX)
  605. return -EINVAL;
  606. mutex_lock(&uld_mutex);
  607. list_for_each_entry(adap, &adapter_list, list_node) {
  608. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  609. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  610. continue;
  611. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  612. continue;
  613. ret = cfg_queues_uld(adap, type, p);
  614. if (ret)
  615. goto out;
  616. ret = setup_sge_queues_uld(adap, type, p->lro);
  617. if (ret)
  618. goto free_queues;
  619. if (adap->flags & USING_MSIX) {
  620. name_msix_vecs_uld(adap, type);
  621. ret = request_msix_queue_irqs_uld(adap, type);
  622. if (ret)
  623. goto free_rxq;
  624. }
  625. if (adap->flags & FULL_INIT_DONE)
  626. enable_rx_uld(adap, type);
  627. if (adap->uld[type].add) {
  628. ret = -EBUSY;
  629. goto free_irq;
  630. }
  631. ret = setup_sge_txq_uld(adap, type, p);
  632. if (ret)
  633. goto free_irq;
  634. adap->uld[type] = *p;
  635. uld_attach(adap, type);
  636. adap_idx++;
  637. }
  638. mutex_unlock(&uld_mutex);
  639. return 0;
  640. free_irq:
  641. if (adap->flags & FULL_INIT_DONE)
  642. quiesce_rx_uld(adap, type);
  643. if (adap->flags & USING_MSIX)
  644. free_msix_queue_irqs_uld(adap, type);
  645. free_rxq:
  646. free_sge_queues_uld(adap, type);
  647. free_queues:
  648. free_queues_uld(adap, type);
  649. out:
  650. list_for_each_entry(adap, &adapter_list, list_node) {
  651. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  652. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  653. continue;
  654. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  655. continue;
  656. if (!adap_idx)
  657. break;
  658. adap->uld[type].handle = NULL;
  659. adap->uld[type].add = NULL;
  660. release_sge_txq_uld(adap, type);
  661. if (adap->flags & FULL_INIT_DONE)
  662. quiesce_rx_uld(adap, type);
  663. if (adap->flags & USING_MSIX)
  664. free_msix_queue_irqs_uld(adap, type);
  665. free_sge_queues_uld(adap, type);
  666. free_queues_uld(adap, type);
  667. adap_idx--;
  668. }
  669. mutex_unlock(&uld_mutex);
  670. return ret;
  671. }
  672. EXPORT_SYMBOL(cxgb4_register_uld);
  673. /**
  674. * cxgb4_unregister_uld - unregister an upper-layer driver
  675. * @type: the ULD type
  676. *
  677. * Unregisters an existing upper-layer driver.
  678. */
  679. int cxgb4_unregister_uld(enum cxgb4_uld type)
  680. {
  681. struct adapter *adap;
  682. if (type >= CXGB4_ULD_MAX)
  683. return -EINVAL;
  684. mutex_lock(&uld_mutex);
  685. list_for_each_entry(adap, &adapter_list, list_node) {
  686. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  687. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  688. continue;
  689. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  690. continue;
  691. adap->uld[type].handle = NULL;
  692. adap->uld[type].add = NULL;
  693. release_sge_txq_uld(adap, type);
  694. if (adap->flags & FULL_INIT_DONE)
  695. quiesce_rx_uld(adap, type);
  696. if (adap->flags & USING_MSIX)
  697. free_msix_queue_irqs_uld(adap, type);
  698. free_sge_queues_uld(adap, type);
  699. free_queues_uld(adap, type);
  700. }
  701. mutex_unlock(&uld_mutex);
  702. return 0;
  703. }
  704. EXPORT_SYMBOL(cxgb4_unregister_uld);