nes_mgt.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163
  1. /*
  2. * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/skbuff.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/kthread.h>
  36. #include <linux/ip.h>
  37. #include <linux/tcp.h>
  38. #include <net/tcp.h>
  39. #include "nes.h"
  40. #include "nes_mgt.h"
  41. atomic_t pau_qps_created;
  42. atomic_t pau_qps_destroyed;
  43. static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
  44. {
  45. unsigned long flags;
  46. dma_addr_t bus_address;
  47. struct sk_buff *skb;
  48. struct nes_hw_nic_rq_wqe *nic_rqe;
  49. struct nes_hw_mgt *nesmgt;
  50. struct nes_device *nesdev;
  51. struct nes_rskb_cb *cb;
  52. u32 rx_wqes_posted = 0;
  53. nesmgt = &mgtvnic->mgt;
  54. nesdev = mgtvnic->nesvnic->nesdev;
  55. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  56. if (nesmgt->replenishing_rq != 0) {
  57. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  58. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  59. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  60. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  61. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  62. add_timer(&mgtvnic->rq_wqes_timer);
  63. } else {
  64. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  65. }
  66. return;
  67. }
  68. nesmgt->replenishing_rq = 1;
  69. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  70. do {
  71. skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size);
  72. if (skb) {
  73. skb->dev = mgtvnic->nesvnic->netdev;
  74. bus_address = pci_map_single(nesdev->pcidev,
  75. skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  76. cb = (struct nes_rskb_cb *)&skb->cb[0];
  77. cb->busaddr = bus_address;
  78. cb->maplen = mgtvnic->nesvnic->max_frame_size;
  79. nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head];
  80. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
  81. cpu_to_le32(mgtvnic->nesvnic->max_frame_size);
  82. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  83. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
  84. cpu_to_le32((u32)bus_address);
  85. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
  86. cpu_to_le32((u32)((u64)bus_address >> 32));
  87. nesmgt->rx_skb[nesmgt->rq_head] = skb;
  88. nesmgt->rq_head++;
  89. nesmgt->rq_head &= nesmgt->rq_size - 1;
  90. atomic_dec(&mgtvnic->rx_skbs_needed);
  91. barrier();
  92. if (++rx_wqes_posted == 255) {
  93. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  94. rx_wqes_posted = 0;
  95. }
  96. } else {
  97. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  98. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  99. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  100. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  101. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  102. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  103. add_timer(&mgtvnic->rq_wqes_timer);
  104. } else {
  105. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  106. }
  107. break;
  108. }
  109. } while (atomic_read(&mgtvnic->rx_skbs_needed));
  110. barrier();
  111. if (rx_wqes_posted)
  112. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  113. nesmgt->replenishing_rq = 0;
  114. }
  115. /**
  116. * nes_mgt_rq_wqes_timeout
  117. */
  118. static void nes_mgt_rq_wqes_timeout(unsigned long parm)
  119. {
  120. struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
  121. atomic_set(&mgtvnic->rx_skb_timer_running, 0);
  122. if (atomic_read(&mgtvnic->rx_skbs_needed))
  123. nes_replenish_mgt_rq(mgtvnic);
  124. }
  125. /**
  126. * nes_mgt_free_skb - unmap and free skb
  127. */
  128. static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir)
  129. {
  130. struct nes_rskb_cb *cb;
  131. cb = (struct nes_rskb_cb *)&skb->cb[0];
  132. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir);
  133. cb->busaddr = 0;
  134. dev_kfree_skb_any(skb);
  135. }
  136. /**
  137. * nes_download_callback - handle download completions
  138. */
  139. static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  140. {
  141. struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer;
  142. struct nes_qp *nesqp = fpdu_info->nesqp;
  143. struct sk_buff *skb;
  144. int i;
  145. for (i = 0; i < fpdu_info->frag_cnt; i++) {
  146. skb = fpdu_info->frags[i].skb;
  147. if (fpdu_info->frags[i].cmplt) {
  148. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  149. nes_rem_ref_cm_node(nesqp->cm_node);
  150. }
  151. }
  152. if (fpdu_info->hdr_vbase)
  153. pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len,
  154. fpdu_info->hdr_vbase, fpdu_info->hdr_pbase);
  155. kfree(fpdu_info);
  156. }
  157. /**
  158. * nes_get_seq - Get the seq, ack_seq and window from the packet
  159. */
  160. static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  161. {
  162. struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0];
  163. struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  164. struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  165. *ack = be32_to_cpu(tcph->ack_seq);
  166. *wnd = be16_to_cpu(tcph->window);
  167. *fin_rcvd = tcph->fin;
  168. *rst_rcvd = tcph->rst;
  169. return be32_to_cpu(tcph->seq);
  170. }
  171. /**
  172. * nes_get_next_skb - Get the next skb based on where current skb is in the queue
  173. */
  174. static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp,
  175. struct sk_buff *skb, u32 nextseq, u32 *ack,
  176. u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  177. {
  178. u32 seq;
  179. bool processacks;
  180. struct sk_buff *old_skb;
  181. if (skb) {
  182. /* Continue processing fpdu */
  183. if (skb->next == (struct sk_buff *)&nesqp->pau_list)
  184. goto out;
  185. skb = skb->next;
  186. processacks = false;
  187. } else {
  188. /* Starting a new one */
  189. if (skb_queue_empty(&nesqp->pau_list))
  190. goto out;
  191. skb = skb_peek(&nesqp->pau_list);
  192. processacks = true;
  193. }
  194. while (1) {
  195. seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
  196. if (seq == nextseq) {
  197. if (skb->len || processacks)
  198. break;
  199. } else if (after(seq, nextseq)) {
  200. goto out;
  201. }
  202. if (skb->next == (struct sk_buff *)&nesqp->pau_list)
  203. goto out;
  204. old_skb = skb;
  205. skb = skb->next;
  206. skb_unlink(old_skb, &nesqp->pau_list);
  207. nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
  208. nes_rem_ref_cm_node(nesqp->cm_node);
  209. }
  210. return skb;
  211. out:
  212. return NULL;
  213. }
  214. /**
  215. * get_fpdu_info - Find the next complete fpdu and return its fragments.
  216. */
  217. static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
  218. struct pau_fpdu_info **pau_fpdu_info)
  219. {
  220. struct sk_buff *skb;
  221. struct iphdr *iph;
  222. struct tcphdr *tcph;
  223. struct nes_rskb_cb *cb;
  224. struct pau_fpdu_info *fpdu_info = NULL;
  225. struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
  226. unsigned long flags;
  227. u32 fpdu_len = 0;
  228. u32 tmp_len;
  229. int frag_cnt = 0;
  230. u32 tot_len;
  231. u32 frag_tot;
  232. u32 ack;
  233. u32 fin_rcvd;
  234. u32 rst_rcvd;
  235. u16 wnd;
  236. int i;
  237. int rc = 0;
  238. *pau_fpdu_info = NULL;
  239. spin_lock_irqsave(&nesqp->pau_lock, flags);
  240. skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  241. if (!skb) {
  242. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  243. goto out;
  244. }
  245. cb = (struct nes_rskb_cb *)&skb->cb[0];
  246. if (skb->len) {
  247. fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
  248. fpdu_len = (fpdu_len + 3) & 0xfffffffc;
  249. tmp_len = fpdu_len;
  250. /* See if we have all of the fpdu */
  251. frag_tot = 0;
  252. memset(&frags, 0, sizeof frags);
  253. for (i = 0; i < MAX_FPDU_FRAGS; i++) {
  254. frags[i].physaddr = cb->busaddr;
  255. frags[i].physaddr += skb->data - cb->data_start;
  256. frags[i].frag_len = min(tmp_len, skb->len);
  257. frags[i].skb = skb;
  258. frags[i].cmplt = (skb->len == frags[i].frag_len);
  259. frag_tot += frags[i].frag_len;
  260. frag_cnt++;
  261. tmp_len -= frags[i].frag_len;
  262. if (tmp_len == 0)
  263. break;
  264. skb = nes_get_next_skb(nesdev, nesqp, skb,
  265. nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  266. if (!skb) {
  267. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  268. goto out;
  269. } else if (rst_rcvd) {
  270. /* rst received in the middle of fpdu */
  271. for (; i >= 0; i--) {
  272. skb_unlink(frags[i].skb, &nesqp->pau_list);
  273. nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE);
  274. }
  275. cb = (struct nes_rskb_cb *)&skb->cb[0];
  276. frags[0].physaddr = cb->busaddr;
  277. frags[0].physaddr += skb->data - cb->data_start;
  278. frags[0].frag_len = skb->len;
  279. frags[0].skb = skb;
  280. frags[0].cmplt = true;
  281. frag_cnt = 1;
  282. break;
  283. }
  284. cb = (struct nes_rskb_cb *)&skb->cb[0];
  285. }
  286. } else {
  287. /* no data */
  288. frags[0].physaddr = cb->busaddr;
  289. frags[0].frag_len = 0;
  290. frags[0].skb = skb;
  291. frags[0].cmplt = true;
  292. frag_cnt = 1;
  293. }
  294. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  295. /* Found one */
  296. fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
  297. if (fpdu_info == NULL) {
  298. nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
  299. rc = -ENOMEM;
  300. goto out;
  301. }
  302. fpdu_info->cqp_request = nes_get_cqp_request(nesdev);
  303. if (fpdu_info->cqp_request == NULL) {
  304. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  305. rc = -ENOMEM;
  306. goto out;
  307. }
  308. cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0];
  309. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  310. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  311. fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start;
  312. fpdu_info->data_len = fpdu_len;
  313. tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN;
  314. if (frags[0].cmplt) {
  315. fpdu_info->hdr_pbase = cb->busaddr;
  316. fpdu_info->hdr_vbase = NULL;
  317. } else {
  318. fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev,
  319. fpdu_info->hdr_len, &fpdu_info->hdr_pbase);
  320. if (!fpdu_info->hdr_vbase) {
  321. nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n");
  322. rc = -ENOMEM;
  323. goto out;
  324. }
  325. /* Copy hdrs, adjusting len and seqnum */
  326. memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len);
  327. iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN);
  328. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  329. }
  330. iph->tot_len = cpu_to_be16(tot_len);
  331. iph->saddr = cpu_to_be32(0x7f000001);
  332. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  333. tcph->ack_seq = cpu_to_be32(ack);
  334. tcph->window = cpu_to_be16(wnd);
  335. nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd;
  336. memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags));
  337. fpdu_info->frag_cnt = frag_cnt;
  338. fpdu_info->nesqp = nesqp;
  339. *pau_fpdu_info = fpdu_info;
  340. /* Update skb's for next pass */
  341. for (i = 0; i < frag_cnt; i++) {
  342. cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0];
  343. skb_pull(frags[i].skb, frags[i].frag_len);
  344. if (frags[i].skb->len == 0) {
  345. /* Pull skb off the list - it will be freed in the callback */
  346. spin_lock_irqsave(&nesqp->pau_lock, flags);
  347. skb_unlink(frags[i].skb, &nesqp->pau_list);
  348. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  349. } else {
  350. /* Last skb still has data so update the seq */
  351. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  352. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  353. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  354. }
  355. }
  356. out:
  357. if (rc) {
  358. if (fpdu_info) {
  359. if (fpdu_info->cqp_request)
  360. nes_put_cqp_request(nesdev, fpdu_info->cqp_request);
  361. kfree(fpdu_info);
  362. }
  363. }
  364. return rc;
  365. }
  366. /**
  367. * forward_fpdu - send complete fpdus, one at a time
  368. */
  369. static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  370. {
  371. struct nes_device *nesdev = nesvnic->nesdev;
  372. struct pau_fpdu_info *fpdu_info;
  373. struct nes_hw_cqp_wqe *cqp_wqe;
  374. struct nes_cqp_request *cqp_request;
  375. u64 u64tmp;
  376. u32 u32tmp;
  377. int rc;
  378. while (1) {
  379. rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
  380. if (fpdu_info == NULL)
  381. return rc;
  382. cqp_request = fpdu_info->cqp_request;
  383. cqp_wqe = &cqp_request->cqp_wqe;
  384. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  385. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX,
  386. NES_CQP_DOWNLOAD_SEGMENT |
  387. (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT));
  388. u32tmp = fpdu_info->hdr_len << 16;
  389. u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len;
  390. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX,
  391. u32tmp);
  392. u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len;
  393. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX,
  394. u32tmp);
  395. u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len;
  396. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX,
  397. u32tmp);
  398. u64tmp = (u64)fpdu_info->hdr_pbase;
  399. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
  400. lower_32_bits(u64tmp));
  401. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
  402. upper_32_bits(u64tmp >> 32));
  403. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
  404. lower_32_bits(fpdu_info->frags[0].physaddr));
  405. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX,
  406. upper_32_bits(fpdu_info->frags[0].physaddr));
  407. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX,
  408. lower_32_bits(fpdu_info->frags[1].physaddr));
  409. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX,
  410. upper_32_bits(fpdu_info->frags[1].physaddr));
  411. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX,
  412. lower_32_bits(fpdu_info->frags[2].physaddr));
  413. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX,
  414. upper_32_bits(fpdu_info->frags[2].physaddr));
  415. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX,
  416. lower_32_bits(fpdu_info->frags[3].physaddr));
  417. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX,
  418. upper_32_bits(fpdu_info->frags[3].physaddr));
  419. cqp_request->cqp_callback_pointer = fpdu_info;
  420. cqp_request->callback = 1;
  421. cqp_request->cqp_callback = nes_download_callback;
  422. atomic_set(&cqp_request->refcount, 1);
  423. nes_post_cqp_request(nesdev, cqp_request);
  424. }
  425. return 0;
  426. }
  427. static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  428. {
  429. int again = 1;
  430. unsigned long flags;
  431. do {
  432. /* Ignore rc - if it failed, tcp retries will cause it to try again */
  433. forward_fpdus(nesvnic, nesqp);
  434. spin_lock_irqsave(&nesqp->pau_lock, flags);
  435. if (nesqp->pau_pending) {
  436. nesqp->pau_pending = 0;
  437. } else {
  438. nesqp->pau_busy = 0;
  439. again = 0;
  440. }
  441. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  442. } while (again);
  443. }
  444. /**
  445. * queue_fpdus - Handle fpdu's that hw passed up to sw
  446. */
  447. static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  448. {
  449. struct sk_buff *tmpskb;
  450. struct nes_rskb_cb *cb;
  451. struct iphdr *iph;
  452. struct tcphdr *tcph;
  453. unsigned char *tcph_end;
  454. u32 rcv_nxt;
  455. u32 rcv_wnd;
  456. u32 seqnum;
  457. u32 len;
  458. bool process_it = false;
  459. unsigned long flags;
  460. /* Move data ptr to after tcp header */
  461. iph = (struct iphdr *)skb->data;
  462. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  463. seqnum = be32_to_cpu(tcph->seq);
  464. tcph_end = (((char *)tcph) + (4 * tcph->doff));
  465. len = be16_to_cpu(iph->tot_len);
  466. if (skb->len > len)
  467. skb_trim(skb, len);
  468. skb_pull(skb, tcph_end - skb->data);
  469. /* Initialize tracking values */
  470. cb = (struct nes_rskb_cb *)&skb->cb[0];
  471. cb->seqnum = seqnum;
  472. /* Make sure data is in the receive window */
  473. rcv_nxt = nesqp->pau_rcv_nxt;
  474. rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd);
  475. if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) {
  476. nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE);
  477. nes_rem_ref_cm_node(nesqp->cm_node);
  478. return;
  479. }
  480. spin_lock_irqsave(&nesqp->pau_lock, flags);
  481. if (nesqp->pau_busy)
  482. nesqp->pau_pending = 1;
  483. else
  484. nesqp->pau_busy = 1;
  485. /* Queue skb by sequence number */
  486. if (skb_queue_len(&nesqp->pau_list) == 0) {
  487. skb_queue_head(&nesqp->pau_list, skb);
  488. } else {
  489. tmpskb = nesqp->pau_list.next;
  490. while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
  491. cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
  492. if (before(seqnum, cb->seqnum))
  493. break;
  494. tmpskb = tmpskb->next;
  495. }
  496. skb_insert(tmpskb, skb, &nesqp->pau_list);
  497. }
  498. if (nesqp->pau_state == PAU_READY)
  499. process_it = true;
  500. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  501. if (process_it)
  502. process_fpdus(nesvnic, nesqp);
  503. return;
  504. }
  505. /**
  506. * mgt_thread - Handle mgt skbs in a safe context
  507. */
  508. static int mgt_thread(void *context)
  509. {
  510. struct nes_vnic *nesvnic = context;
  511. struct sk_buff *skb;
  512. struct nes_rskb_cb *cb;
  513. while (!kthread_should_stop()) {
  514. wait_event_interruptible(nesvnic->mgt_wait_queue,
  515. skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop());
  516. while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) {
  517. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  518. cb = (struct nes_rskb_cb *)&skb->cb[0];
  519. cb->data_start = skb->data - ETH_HLEN;
  520. cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start,
  521. nesvnic->max_frame_size, PCI_DMA_TODEVICE);
  522. queue_fpdus(skb, nesvnic, cb->nesqp);
  523. }
  524. }
  525. /* Closing down so delete any entries on the queue */
  526. while (skb_queue_len(&nesvnic->mgt_skb_list)) {
  527. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  528. cb = (struct nes_rskb_cb *)&skb->cb[0];
  529. nes_rem_ref_cm_node(cb->nesqp->cm_node);
  530. dev_kfree_skb_any(skb);
  531. }
  532. return 0;
  533. }
  534. /**
  535. * nes_queue_skbs - Queue skb so it can be handled in a thread context
  536. */
  537. void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  538. {
  539. struct nes_rskb_cb *cb;
  540. cb = (struct nes_rskb_cb *)&skb->cb[0];
  541. cb->nesqp = nesqp;
  542. skb_queue_tail(&nesvnic->mgt_skb_list, skb);
  543. wake_up_interruptible(&nesvnic->mgt_wait_queue);
  544. }
  545. void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
  546. {
  547. struct sk_buff *skb;
  548. unsigned long flags;
  549. atomic_inc(&pau_qps_destroyed);
  550. /* Free packets that have not yet been forwarded */
  551. /* Lock is acquired by skb_dequeue when removing the skb */
  552. spin_lock_irqsave(&nesqp->pau_lock, flags);
  553. while (skb_queue_len(&nesqp->pau_list)) {
  554. skb = skb_dequeue(&nesqp->pau_list);
  555. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  556. nes_rem_ref_cm_node(nesqp->cm_node);
  557. }
  558. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  559. }
  560. static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  561. {
  562. struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer;
  563. struct nes_cqp_request *new_request;
  564. struct nes_hw_cqp_wqe *cqp_wqe;
  565. struct nes_adapter *nesadapter;
  566. struct nes_qp *nesqp;
  567. struct nes_v4_quad nes_quad;
  568. u32 crc_value;
  569. u64 u64temp;
  570. nesadapter = nesdev->nesadapter;
  571. nesqp = qh_chg->nesqp;
  572. /* Should we handle the bad completion */
  573. if (cqp_request->major_code) {
  574. printk(KERN_ERR PFX "Invalid cqp_request major_code=0x%x\n",
  575. cqp_request->major_code);
  576. WARN_ON(1);
  577. }
  578. switch (nesqp->pau_state) {
  579. case PAU_DEL_QH:
  580. /* Old hash code deleted, now set the new one */
  581. nesqp->pau_state = PAU_ADD_LB_QH;
  582. new_request = nes_get_cqp_request(nesdev);
  583. if (new_request == NULL) {
  584. nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n");
  585. WARN_ON(1);
  586. return;
  587. }
  588. memset(&nes_quad, 0, sizeof(nes_quad));
  589. nes_quad.DstIpAdrIndex =
  590. cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
  591. nes_quad.SrcIpadr = cpu_to_be32(0x7f000001);
  592. nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]);
  593. nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]);
  594. /* Produce hash key */
  595. crc_value = get_crc_value(&nes_quad);
  596. nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
  597. nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n",
  598. nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
  599. nesqp->hte_index &= nesadapter->hte_index_mask;
  600. nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
  601. nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001);
  602. nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt);
  603. cqp_wqe = &new_request->cqp_wqe;
  604. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  605. set_wqe_32bit_value(cqp_wqe->wqe_words,
  606. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH |
  607. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  608. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  609. u64temp = (u64)nesqp->nesqp_context_pbase;
  610. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  611. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n");
  612. new_request->cqp_callback_pointer = qh_chg;
  613. new_request->callback = 1;
  614. new_request->cqp_callback = nes_chg_qh_handler;
  615. atomic_set(&new_request->refcount, 1);
  616. nes_post_cqp_request(nesdev, new_request);
  617. break;
  618. case PAU_ADD_LB_QH:
  619. /* Start processing the queued fpdu's */
  620. nesqp->pau_state = PAU_READY;
  621. process_fpdus(qh_chg->nesvnic, qh_chg->nesqp);
  622. kfree(qh_chg);
  623. break;
  624. }
  625. }
  626. /**
  627. * nes_change_quad_hash
  628. */
  629. static int nes_change_quad_hash(struct nes_device *nesdev,
  630. struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  631. {
  632. struct nes_cqp_request *cqp_request = NULL;
  633. struct pau_qh_chg *qh_chg = NULL;
  634. u64 u64temp;
  635. struct nes_hw_cqp_wqe *cqp_wqe;
  636. int ret = 0;
  637. cqp_request = nes_get_cqp_request(nesdev);
  638. if (cqp_request == NULL) {
  639. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  640. ret = -ENOMEM;
  641. goto chg_qh_err;
  642. }
  643. qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
  644. if (qh_chg == NULL) {
  645. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  646. ret = -ENOMEM;
  647. goto chg_qh_err;
  648. }
  649. qh_chg->nesdev = nesdev;
  650. qh_chg->nesvnic = nesvnic;
  651. qh_chg->nesqp = nesqp;
  652. nesqp->pau_state = PAU_DEL_QH;
  653. cqp_wqe = &cqp_request->cqp_wqe;
  654. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  655. set_wqe_32bit_value(cqp_wqe->wqe_words,
  656. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE |
  657. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  658. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  659. u64temp = (u64)nesqp->nesqp_context_pbase;
  660. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  661. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n");
  662. cqp_request->cqp_callback_pointer = qh_chg;
  663. cqp_request->callback = 1;
  664. cqp_request->cqp_callback = nes_chg_qh_handler;
  665. atomic_set(&cqp_request->refcount, 1);
  666. nes_post_cqp_request(nesdev, cqp_request);
  667. return ret;
  668. chg_qh_err:
  669. kfree(qh_chg);
  670. if (cqp_request)
  671. nes_put_cqp_request(nesdev, cqp_request);
  672. return ret;
  673. }
  674. /**
  675. * nes_mgt_ce_handler
  676. * This management code deals with any packed and unaligned (pau) fpdu's
  677. * that the hardware cannot handle.
  678. */
  679. static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
  680. {
  681. struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq);
  682. struct nes_adapter *nesadapter = nesdev->nesadapter;
  683. u32 head;
  684. u32 cq_size;
  685. u32 cqe_count = 0;
  686. u32 cqe_misc;
  687. u32 qp_id = 0;
  688. u32 skbs_needed;
  689. unsigned long context;
  690. struct nes_qp *nesqp;
  691. struct sk_buff *rx_skb;
  692. struct nes_rskb_cb *cb;
  693. head = cq->cq_head;
  694. cq_size = cq->cq_size;
  695. while (1) {
  696. cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
  697. if (!(cqe_misc & NES_NIC_CQE_VALID))
  698. break;
  699. nesqp = NULL;
  700. if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) {
  701. qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]);
  702. qp_id &= 0x001fffff;
  703. if (qp_id < nesadapter->max_qp) {
  704. context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN];
  705. nesqp = (struct nes_qp *)context;
  706. }
  707. }
  708. if (nesqp) {
  709. if (nesqp->pau_mode == false) {
  710. nesqp->pau_mode = true; /* First time for this qp */
  711. nesqp->pau_rcv_nxt = le32_to_cpu(
  712. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
  713. skb_queue_head_init(&nesqp->pau_list);
  714. spin_lock_init(&nesqp->pau_lock);
  715. atomic_inc(&pau_qps_created);
  716. nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
  717. }
  718. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  719. rx_skb->len = 0;
  720. skb_put(rx_skb, cqe_misc & 0x0000ffff);
  721. rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev);
  722. cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
  723. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE);
  724. cb->busaddr = 0;
  725. mgtvnic->mgt.rq_tail++;
  726. mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1;
  727. nes_add_ref_cm_node(nesqp->cm_node);
  728. nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp);
  729. } else {
  730. printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id);
  731. }
  732. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
  733. cqe_count++;
  734. if (++head >= cq_size)
  735. head = 0;
  736. if (cqe_count == 255) {
  737. /* Replenish mgt CQ */
  738. nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16));
  739. nesdev->currcq_count += cqe_count;
  740. cqe_count = 0;
  741. }
  742. skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed);
  743. if (skbs_needed > (mgtvnic->mgt.rq_size >> 1))
  744. nes_replenish_mgt_rq(mgtvnic);
  745. }
  746. cq->cq_head = head;
  747. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  748. cq->cq_number | (cqe_count << 16));
  749. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  750. nesdev->currcq_count += cqe_count;
  751. }
  752. /**
  753. * nes_init_mgt_qp
  754. */
  755. int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic)
  756. {
  757. struct nes_vnic_mgt *mgtvnic;
  758. u32 counter;
  759. void *vmem;
  760. dma_addr_t pmem;
  761. struct nes_hw_cqp_wqe *cqp_wqe;
  762. u32 cqp_head;
  763. unsigned long flags;
  764. struct nes_hw_nic_qp_context *mgt_context;
  765. u64 u64temp;
  766. struct nes_hw_nic_rq_wqe *mgt_rqe;
  767. struct sk_buff *skb;
  768. u32 wqe_count;
  769. struct nes_rskb_cb *cb;
  770. u32 mgt_mem_size;
  771. void *mgt_vbase;
  772. dma_addr_t mgt_pbase;
  773. int i;
  774. int ret;
  775. /* Allocate space the all mgt QPs once */
  776. mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
  777. if (mgtvnic == NULL) {
  778. nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
  779. return -ENOMEM;
  780. }
  781. /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
  782. /* We are not sending from this NIC so sq is not allocated */
  783. mgt_mem_size = 256 +
  784. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) +
  785. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) +
  786. sizeof(struct nes_hw_nic_qp_context);
  787. mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
  788. mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase);
  789. if (!mgt_vbase) {
  790. kfree(mgtvnic);
  791. nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n");
  792. return -ENOMEM;
  793. }
  794. nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size;
  795. nesvnic->mgt_vbase = mgt_vbase;
  796. nesvnic->mgt_pbase = mgt_pbase;
  797. skb_queue_head_init(&nesvnic->mgt_skb_list);
  798. init_waitqueue_head(&nesvnic->mgt_wait_queue);
  799. nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread");
  800. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  801. mgtvnic->nesvnic = nesvnic;
  802. mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i;
  803. memset(mgt_vbase, 0, mgt_mem_size);
  804. nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n",
  805. mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size);
  806. vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) &
  807. ~(unsigned long)(256 - 1));
  808. pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) &
  809. ~(unsigned long long)(256 - 1));
  810. spin_lock_init(&mgtvnic->mgt.rq_lock);
  811. /* setup the RQ */
  812. mgtvnic->mgt.rq_vbase = vmem;
  813. mgtvnic->mgt.rq_pbase = pmem;
  814. mgtvnic->mgt.rq_head = 0;
  815. mgtvnic->mgt.rq_tail = 0;
  816. mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT;
  817. /* setup the CQ */
  818. vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  819. pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  820. mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id;
  821. mgtvnic->mgt_cq.cq_vbase = vmem;
  822. mgtvnic->mgt_cq.cq_pbase = pmem;
  823. mgtvnic->mgt_cq.cq_head = 0;
  824. mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT;
  825. mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler;
  826. /* Send CreateCQ request to CQP */
  827. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  828. cqp_head = nesdev->cqp.sq_head;
  829. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  830. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  831. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
  832. NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
  833. ((u32)mgtvnic->mgt_cq.cq_size << 16));
  834. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
  835. mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16));
  836. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase;
  837. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
  838. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
  839. u64temp = (unsigned long)&mgtvnic->mgt_cq;
  840. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
  841. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
  842. cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
  843. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
  844. if (++cqp_head >= nesdev->cqp.sq_size)
  845. cqp_head = 0;
  846. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  847. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  848. /* Send CreateQP request to CQP */
  849. mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]);
  850. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] =
  851. cpu_to_le32((u32)NES_MGT_CTX_SIZE |
  852. ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
  853. nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
  854. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
  855. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
  856. if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0)
  857. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
  858. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  859. mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  860. mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  861. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  862. mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  863. mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  864. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
  865. NES_CQP_QP_TYPE_NIC);
  866. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id);
  867. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase +
  868. (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
  869. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  870. if (++cqp_head >= nesdev->cqp.sq_size)
  871. cqp_head = 0;
  872. nesdev->cqp.sq_head = cqp_head;
  873. barrier();
  874. /* Ring doorbell (2 WQEs) */
  875. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  876. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  877. nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n",
  878. mgtvnic->mgt.qp_id);
  879. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  880. NES_EVENT_TIMEOUT);
  881. nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n",
  882. mgtvnic->mgt.qp_id, ret);
  883. if (!ret) {
  884. nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id);
  885. if (i == 0) {
  886. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  887. nesvnic->mgt_pbase);
  888. kfree(mgtvnic);
  889. } else {
  890. nes_destroy_mgt(nesvnic);
  891. }
  892. return -EIO;
  893. }
  894. /* Populate the RQ */
  895. for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) {
  896. skb = dev_alloc_skb(nesvnic->max_frame_size);
  897. if (!skb) {
  898. nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
  899. return -ENOMEM;
  900. }
  901. skb->dev = netdev;
  902. pmem = pci_map_single(nesdev->pcidev, skb->data,
  903. nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  904. cb = (struct nes_rskb_cb *)&skb->cb[0];
  905. cb->busaddr = pmem;
  906. cb->maplen = nesvnic->max_frame_size;
  907. mgt_rqe = &mgtvnic->mgt.rq_vbase[counter];
  908. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size);
  909. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  910. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
  911. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
  912. mgtvnic->mgt.rx_skb[counter] = skb;
  913. }
  914. init_timer(&mgtvnic->rq_wqes_timer);
  915. mgtvnic->rq_wqes_timer.function = nes_mgt_rq_wqes_timeout;
  916. mgtvnic->rq_wqes_timer.data = (unsigned long)mgtvnic;
  917. wqe_count = NES_MGT_WQ_COUNT - 1;
  918. mgtvnic->mgt.rq_head = wqe_count;
  919. barrier();
  920. do {
  921. counter = min(wqe_count, ((u32)255));
  922. wqe_count -= counter;
  923. nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id);
  924. } while (wqe_count);
  925. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  926. mgtvnic->mgt_cq.cq_number);
  927. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  928. mgt_vbase += mgt_mem_size;
  929. mgt_pbase += mgt_mem_size;
  930. nesvnic->mgtvnic[i] = mgtvnic++;
  931. }
  932. return 0;
  933. }
  934. void nes_destroy_mgt(struct nes_vnic *nesvnic)
  935. {
  936. struct nes_device *nesdev = nesvnic->nesdev;
  937. struct nes_vnic_mgt *mgtvnic;
  938. struct nes_vnic_mgt *first_mgtvnic;
  939. unsigned long flags;
  940. struct nes_hw_cqp_wqe *cqp_wqe;
  941. u32 cqp_head;
  942. struct sk_buff *rx_skb;
  943. int i;
  944. int ret;
  945. kthread_stop(nesvnic->mgt_thread);
  946. /* Free remaining NIC receive buffers */
  947. first_mgtvnic = nesvnic->mgtvnic[0];
  948. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  949. mgtvnic = nesvnic->mgtvnic[i];
  950. if (mgtvnic == NULL)
  951. continue;
  952. while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) {
  953. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  954. nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE);
  955. mgtvnic->mgt.rq_tail++;
  956. mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1);
  957. }
  958. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  959. /* Destroy NIC QP */
  960. cqp_head = nesdev->cqp.sq_head;
  961. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  962. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  963. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  964. (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
  965. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  966. mgtvnic->mgt.qp_id);
  967. if (++cqp_head >= nesdev->cqp.sq_size)
  968. cqp_head = 0;
  969. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  970. /* Destroy NIC CQ */
  971. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  972. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  973. (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16)));
  974. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  975. (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
  976. if (++cqp_head >= nesdev->cqp.sq_size)
  977. cqp_head = 0;
  978. nesdev->cqp.sq_head = cqp_head;
  979. barrier();
  980. /* Ring doorbell (2 WQEs) */
  981. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  982. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  983. nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
  984. " cqp.sq_tail=%u, cqp.sq_size=%u\n",
  985. cqp_head, nesdev->cqp.sq_head,
  986. nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
  987. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  988. NES_EVENT_TIMEOUT);
  989. nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
  990. " cqp.sq_head=%u, cqp.sq_tail=%u\n",
  991. ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
  992. if (!ret)
  993. nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n",
  994. mgtvnic->mgt.qp_id);
  995. nesvnic->mgtvnic[i] = NULL;
  996. }
  997. if (nesvnic->mgt_vbase) {
  998. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  999. nesvnic->mgt_pbase);
  1000. nesvnic->mgt_vbase = NULL;
  1001. nesvnic->mgt_pbase = 0;
  1002. }
  1003. kfree(first_mgtvnic);
  1004. }