iwch_cm.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/list.h>
  34. #include <linux/slab.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/timer.h>
  38. #include <linux/notifier.h>
  39. #include <linux/inetdevice.h>
  40. #include <net/neighbour.h>
  41. #include <net/netevent.h>
  42. #include <net/route.h>
  43. #include "tcb.h"
  44. #include "cxgb3_offload.h"
  45. #include "iwch.h"
  46. #include "iwch_provider.h"
  47. #include "iwch_cm.h"
  48. static char *states[] = {
  49. "idle",
  50. "listen",
  51. "connecting",
  52. "mpa_wait_req",
  53. "mpa_req_sent",
  54. "mpa_req_rcvd",
  55. "mpa_rep_sent",
  56. "fpdu_mode",
  57. "aborting",
  58. "closing",
  59. "moribund",
  60. "dead",
  61. NULL,
  62. };
  63. int peer2peer = 0;
  64. module_param(peer2peer, int, 0644);
  65. MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
  66. static int ep_timeout_secs = 60;
  67. module_param(ep_timeout_secs, int, 0644);
  68. MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  69. "in seconds (default=60)");
  70. static int mpa_rev = 1;
  71. module_param(mpa_rev, int, 0644);
  72. MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
  73. "1 is spec compliant. (default=1)");
  74. static int markers_enabled = 0;
  75. module_param(markers_enabled, int, 0644);
  76. MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
  77. static int crc_enabled = 1;
  78. module_param(crc_enabled, int, 0644);
  79. MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
  80. static int rcv_win = 256 * 1024;
  81. module_param(rcv_win, int, 0644);
  82. MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
  83. static int snd_win = 32 * 1024;
  84. module_param(snd_win, int, 0644);
  85. MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
  86. static unsigned int nocong = 0;
  87. module_param(nocong, uint, 0644);
  88. MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
  89. static unsigned int cong_flavor = 1;
  90. module_param(cong_flavor, uint, 0644);
  91. MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
  92. static struct workqueue_struct *workq;
  93. static struct sk_buff_head rxq;
  94. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
  95. static void ep_timeout(unsigned long arg);
  96. static void connect_reply_upcall(struct iwch_ep *ep, int status);
  97. static void start_ep_timer(struct iwch_ep *ep)
  98. {
  99. PDBG("%s ep %p\n", __func__, ep);
  100. if (timer_pending(&ep->timer)) {
  101. PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
  102. del_timer_sync(&ep->timer);
  103. } else
  104. get_ep(&ep->com);
  105. ep->timer.expires = jiffies + ep_timeout_secs * HZ;
  106. ep->timer.data = (unsigned long)ep;
  107. ep->timer.function = ep_timeout;
  108. add_timer(&ep->timer);
  109. }
  110. static void stop_ep_timer(struct iwch_ep *ep)
  111. {
  112. PDBG("%s ep %p\n", __func__, ep);
  113. if (!timer_pending(&ep->timer)) {
  114. WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
  115. __func__, ep, ep->com.state);
  116. return;
  117. }
  118. del_timer_sync(&ep->timer);
  119. put_ep(&ep->com);
  120. }
  121. static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
  122. {
  123. int error = 0;
  124. struct cxio_rdev *rdev;
  125. rdev = (struct cxio_rdev *)tdev->ulp;
  126. if (cxio_fatal_error(rdev)) {
  127. kfree_skb(skb);
  128. return -EIO;
  129. }
  130. error = l2t_send(tdev, skb, l2e);
  131. if (error < 0)
  132. kfree_skb(skb);
  133. return error < 0 ? error : 0;
  134. }
  135. int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
  136. {
  137. int error = 0;
  138. struct cxio_rdev *rdev;
  139. rdev = (struct cxio_rdev *)tdev->ulp;
  140. if (cxio_fatal_error(rdev)) {
  141. kfree_skb(skb);
  142. return -EIO;
  143. }
  144. error = cxgb3_ofld_send(tdev, skb);
  145. if (error < 0)
  146. kfree_skb(skb);
  147. return error < 0 ? error : 0;
  148. }
  149. static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
  150. {
  151. struct cpl_tid_release *req;
  152. skb = get_skb(skb, sizeof *req, GFP_KERNEL);
  153. if (!skb)
  154. return;
  155. req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
  156. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  157. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
  158. skb->priority = CPL_PRIORITY_SETUP;
  159. iwch_cxgb3_ofld_send(tdev, skb);
  160. return;
  161. }
  162. int iwch_quiesce_tid(struct iwch_ep *ep)
  163. {
  164. struct cpl_set_tcb_field *req;
  165. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  166. if (!skb)
  167. return -ENOMEM;
  168. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  169. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  170. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  171. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  172. req->reply = 0;
  173. req->cpu_idx = 0;
  174. req->word = htons(W_TCB_RX_QUIESCE);
  175. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  176. req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
  177. skb->priority = CPL_PRIORITY_DATA;
  178. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  179. }
  180. int iwch_resume_tid(struct iwch_ep *ep)
  181. {
  182. struct cpl_set_tcb_field *req;
  183. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  184. if (!skb)
  185. return -ENOMEM;
  186. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  187. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  188. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  189. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  190. req->reply = 0;
  191. req->cpu_idx = 0;
  192. req->word = htons(W_TCB_RX_QUIESCE);
  193. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  194. req->val = 0;
  195. skb->priority = CPL_PRIORITY_DATA;
  196. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  197. }
  198. static void set_emss(struct iwch_ep *ep, u16 opt)
  199. {
  200. PDBG("%s ep %p opt %u\n", __func__, ep, opt);
  201. ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
  202. if (G_TCPOPT_TSTAMP(opt))
  203. ep->emss -= 12;
  204. if (ep->emss < 128)
  205. ep->emss = 128;
  206. PDBG("emss=%d\n", ep->emss);
  207. }
  208. static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
  209. {
  210. unsigned long flags;
  211. enum iwch_ep_state state;
  212. spin_lock_irqsave(&epc->lock, flags);
  213. state = epc->state;
  214. spin_unlock_irqrestore(&epc->lock, flags);
  215. return state;
  216. }
  217. static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  218. {
  219. epc->state = new;
  220. }
  221. static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  222. {
  223. unsigned long flags;
  224. spin_lock_irqsave(&epc->lock, flags);
  225. PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
  226. __state_set(epc, new);
  227. spin_unlock_irqrestore(&epc->lock, flags);
  228. return;
  229. }
  230. static void *alloc_ep(int size, gfp_t gfp)
  231. {
  232. struct iwch_ep_common *epc;
  233. epc = kzalloc(size, gfp);
  234. if (epc) {
  235. kref_init(&epc->kref);
  236. spin_lock_init(&epc->lock);
  237. init_waitqueue_head(&epc->waitq);
  238. }
  239. PDBG("%s alloc ep %p\n", __func__, epc);
  240. return epc;
  241. }
  242. void __free_ep(struct kref *kref)
  243. {
  244. struct iwch_ep *ep;
  245. ep = container_of(container_of(kref, struct iwch_ep_common, kref),
  246. struct iwch_ep, com);
  247. PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
  248. if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
  249. cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
  250. dst_release(ep->dst);
  251. l2t_release(ep->com.tdev, ep->l2t);
  252. }
  253. kfree(ep);
  254. }
  255. static void release_ep_resources(struct iwch_ep *ep)
  256. {
  257. PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
  258. set_bit(RELEASE_RESOURCES, &ep->com.flags);
  259. put_ep(&ep->com);
  260. }
  261. static int status2errno(int status)
  262. {
  263. switch (status) {
  264. case CPL_ERR_NONE:
  265. return 0;
  266. case CPL_ERR_CONN_RESET:
  267. return -ECONNRESET;
  268. case CPL_ERR_ARP_MISS:
  269. return -EHOSTUNREACH;
  270. case CPL_ERR_CONN_TIMEDOUT:
  271. return -ETIMEDOUT;
  272. case CPL_ERR_TCAM_FULL:
  273. return -ENOMEM;
  274. case CPL_ERR_CONN_EXIST:
  275. return -EADDRINUSE;
  276. default:
  277. return -EIO;
  278. }
  279. }
  280. /*
  281. * Try and reuse skbs already allocated...
  282. */
  283. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
  284. {
  285. if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
  286. skb_trim(skb, 0);
  287. skb_get(skb);
  288. } else {
  289. skb = alloc_skb(len, gfp);
  290. }
  291. return skb;
  292. }
  293. static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
  294. __be32 peer_ip, __be16 local_port,
  295. __be16 peer_port, u8 tos)
  296. {
  297. struct rtable *rt;
  298. struct flowi4 fl4;
  299. rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
  300. peer_port, local_port, IPPROTO_TCP,
  301. tos, 0);
  302. if (IS_ERR(rt))
  303. return NULL;
  304. return rt;
  305. }
  306. static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
  307. {
  308. int i = 0;
  309. while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
  310. ++i;
  311. return i;
  312. }
  313. static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
  314. {
  315. PDBG("%s t3cdev %p\n", __func__, dev);
  316. kfree_skb(skb);
  317. }
  318. /*
  319. * Handle an ARP failure for an active open.
  320. */
  321. static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  322. {
  323. printk(KERN_ERR MOD "ARP failure during connect\n");
  324. kfree_skb(skb);
  325. }
  326. /*
  327. * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
  328. * and send it along.
  329. */
  330. static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  331. {
  332. struct cpl_abort_req *req = cplhdr(skb);
  333. PDBG("%s t3cdev %p\n", __func__, dev);
  334. req->cmd = CPL_ABORT_NO_RST;
  335. iwch_cxgb3_ofld_send(dev, skb);
  336. }
  337. static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
  338. {
  339. struct cpl_close_con_req *req;
  340. struct sk_buff *skb;
  341. PDBG("%s ep %p\n", __func__, ep);
  342. skb = get_skb(NULL, sizeof(*req), gfp);
  343. if (!skb) {
  344. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  345. return -ENOMEM;
  346. }
  347. skb->priority = CPL_PRIORITY_DATA;
  348. set_arp_failure_handler(skb, arp_failure_discard);
  349. req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
  350. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
  351. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  352. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
  353. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  354. }
  355. static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  356. {
  357. struct cpl_abort_req *req;
  358. PDBG("%s ep %p\n", __func__, ep);
  359. skb = get_skb(skb, sizeof(*req), gfp);
  360. if (!skb) {
  361. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  362. __func__);
  363. return -ENOMEM;
  364. }
  365. skb->priority = CPL_PRIORITY_DATA;
  366. set_arp_failure_handler(skb, abort_arp_failure);
  367. req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
  368. memset(req, 0, sizeof(*req));
  369. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
  370. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  371. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
  372. req->cmd = CPL_ABORT_SEND_RST;
  373. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  374. }
  375. static int send_connect(struct iwch_ep *ep)
  376. {
  377. struct cpl_act_open_req *req;
  378. struct sk_buff *skb;
  379. u32 opt0h, opt0l, opt2;
  380. unsigned int mtu_idx;
  381. int wscale;
  382. PDBG("%s ep %p\n", __func__, ep);
  383. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  384. if (!skb) {
  385. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  386. __func__);
  387. return -ENOMEM;
  388. }
  389. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  390. wscale = compute_wscale(rcv_win);
  391. opt0h = V_NAGLE(0) |
  392. V_NO_CONG(nocong) |
  393. V_KEEP_ALIVE(1) |
  394. F_TCAM_BYPASS |
  395. V_WND_SCALE(wscale) |
  396. V_MSS_IDX(mtu_idx) |
  397. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  398. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  399. opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
  400. V_CONG_CONTROL_FLAVOR(cong_flavor);
  401. skb->priority = CPL_PRIORITY_SETUP;
  402. set_arp_failure_handler(skb, act_open_req_arp_failure);
  403. req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
  404. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  405. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
  406. req->local_port = ep->com.local_addr.sin_port;
  407. req->peer_port = ep->com.remote_addr.sin_port;
  408. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  409. req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
  410. req->opt0h = htonl(opt0h);
  411. req->opt0l = htonl(opt0l);
  412. req->params = 0;
  413. req->opt2 = htonl(opt2);
  414. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  415. }
  416. static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
  417. {
  418. int mpalen;
  419. struct tx_data_wr *req;
  420. struct mpa_message *mpa;
  421. int len;
  422. PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
  423. BUG_ON(skb_cloned(skb));
  424. mpalen = sizeof(*mpa) + ep->plen;
  425. if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
  426. kfree_skb(skb);
  427. skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
  428. if (!skb) {
  429. connect_reply_upcall(ep, -ENOMEM);
  430. return;
  431. }
  432. }
  433. skb_trim(skb, 0);
  434. skb_reserve(skb, sizeof(*req));
  435. skb_put(skb, mpalen);
  436. skb->priority = CPL_PRIORITY_DATA;
  437. mpa = (struct mpa_message *) skb->data;
  438. memset(mpa, 0, sizeof(*mpa));
  439. memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
  440. mpa->flags = (crc_enabled ? MPA_CRC : 0) |
  441. (markers_enabled ? MPA_MARKERS : 0);
  442. mpa->private_data_size = htons(ep->plen);
  443. mpa->revision = mpa_rev;
  444. if (ep->plen)
  445. memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
  446. /*
  447. * Reference the mpa skb. This ensures the data area
  448. * will remain in memory until the hw acks the tx.
  449. * Function tx_ack() will deref it.
  450. */
  451. skb_get(skb);
  452. set_arp_failure_handler(skb, arp_failure_discard);
  453. skb_reset_transport_header(skb);
  454. len = skb->len;
  455. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  456. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
  457. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  458. req->len = htonl(len);
  459. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  460. V_TX_SNDBUF(snd_win>>15));
  461. req->flags = htonl(F_TX_INIT);
  462. req->sndseq = htonl(ep->snd_seq);
  463. BUG_ON(ep->mpa_skb);
  464. ep->mpa_skb = skb;
  465. iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  466. start_ep_timer(ep);
  467. state_set(&ep->com, MPA_REQ_SENT);
  468. return;
  469. }
  470. static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
  471. {
  472. int mpalen;
  473. struct tx_data_wr *req;
  474. struct mpa_message *mpa;
  475. struct sk_buff *skb;
  476. PDBG("%s ep %p plen %d\n", __func__, ep, plen);
  477. mpalen = sizeof(*mpa) + plen;
  478. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  479. if (!skb) {
  480. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  481. return -ENOMEM;
  482. }
  483. skb_reserve(skb, sizeof(*req));
  484. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  485. memset(mpa, 0, sizeof(*mpa));
  486. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  487. mpa->flags = MPA_REJECT;
  488. mpa->revision = mpa_rev;
  489. mpa->private_data_size = htons(plen);
  490. if (plen)
  491. memcpy(mpa->private_data, pdata, plen);
  492. /*
  493. * Reference the mpa skb again. This ensures the data area
  494. * will remain in memory until the hw acks the tx.
  495. * Function tx_ack() will deref it.
  496. */
  497. skb_get(skb);
  498. skb->priority = CPL_PRIORITY_DATA;
  499. set_arp_failure_handler(skb, arp_failure_discard);
  500. skb_reset_transport_header(skb);
  501. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  502. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
  503. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  504. req->len = htonl(mpalen);
  505. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  506. V_TX_SNDBUF(snd_win>>15));
  507. req->flags = htonl(F_TX_INIT);
  508. req->sndseq = htonl(ep->snd_seq);
  509. BUG_ON(ep->mpa_skb);
  510. ep->mpa_skb = skb;
  511. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  512. }
  513. static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
  514. {
  515. int mpalen;
  516. struct tx_data_wr *req;
  517. struct mpa_message *mpa;
  518. int len;
  519. struct sk_buff *skb;
  520. PDBG("%s ep %p plen %d\n", __func__, ep, plen);
  521. mpalen = sizeof(*mpa) + plen;
  522. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  523. if (!skb) {
  524. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  525. return -ENOMEM;
  526. }
  527. skb->priority = CPL_PRIORITY_DATA;
  528. skb_reserve(skb, sizeof(*req));
  529. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  530. memset(mpa, 0, sizeof(*mpa));
  531. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  532. mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
  533. (markers_enabled ? MPA_MARKERS : 0);
  534. mpa->revision = mpa_rev;
  535. mpa->private_data_size = htons(plen);
  536. if (plen)
  537. memcpy(mpa->private_data, pdata, plen);
  538. /*
  539. * Reference the mpa skb. This ensures the data area
  540. * will remain in memory until the hw acks the tx.
  541. * Function tx_ack() will deref it.
  542. */
  543. skb_get(skb);
  544. set_arp_failure_handler(skb, arp_failure_discard);
  545. skb_reset_transport_header(skb);
  546. len = skb->len;
  547. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  548. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
  549. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  550. req->len = htonl(len);
  551. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  552. V_TX_SNDBUF(snd_win>>15));
  553. req->flags = htonl(F_TX_INIT);
  554. req->sndseq = htonl(ep->snd_seq);
  555. ep->mpa_skb = skb;
  556. state_set(&ep->com, MPA_REP_SENT);
  557. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  558. }
  559. static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  560. {
  561. struct iwch_ep *ep = ctx;
  562. struct cpl_act_establish *req = cplhdr(skb);
  563. unsigned int tid = GET_TID(req);
  564. PDBG("%s ep %p tid %d\n", __func__, ep, tid);
  565. dst_confirm(ep->dst);
  566. /* setup the hwtid for this connection */
  567. ep->hwtid = tid;
  568. cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
  569. ep->snd_seq = ntohl(req->snd_isn);
  570. ep->rcv_seq = ntohl(req->rcv_isn);
  571. set_emss(ep, ntohs(req->tcp_opt));
  572. /* dealloc the atid */
  573. cxgb3_free_atid(ep->com.tdev, ep->atid);
  574. /* start MPA negotiation */
  575. send_mpa_req(ep, skb);
  576. return 0;
  577. }
  578. static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  579. {
  580. PDBG("%s ep %p\n", __FILE__, ep);
  581. state_set(&ep->com, ABORTING);
  582. send_abort(ep, skb, gfp);
  583. }
  584. static void close_complete_upcall(struct iwch_ep *ep)
  585. {
  586. struct iw_cm_event event;
  587. PDBG("%s ep %p\n", __func__, ep);
  588. memset(&event, 0, sizeof(event));
  589. event.event = IW_CM_EVENT_CLOSE;
  590. if (ep->com.cm_id) {
  591. PDBG("close complete delivered ep %p cm_id %p tid %d\n",
  592. ep, ep->com.cm_id, ep->hwtid);
  593. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  594. ep->com.cm_id->rem_ref(ep->com.cm_id);
  595. ep->com.cm_id = NULL;
  596. ep->com.qp = NULL;
  597. }
  598. }
  599. static void peer_close_upcall(struct iwch_ep *ep)
  600. {
  601. struct iw_cm_event event;
  602. PDBG("%s ep %p\n", __func__, ep);
  603. memset(&event, 0, sizeof(event));
  604. event.event = IW_CM_EVENT_DISCONNECT;
  605. if (ep->com.cm_id) {
  606. PDBG("peer close delivered ep %p cm_id %p tid %d\n",
  607. ep, ep->com.cm_id, ep->hwtid);
  608. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  609. }
  610. }
  611. static void peer_abort_upcall(struct iwch_ep *ep)
  612. {
  613. struct iw_cm_event event;
  614. PDBG("%s ep %p\n", __func__, ep);
  615. memset(&event, 0, sizeof(event));
  616. event.event = IW_CM_EVENT_CLOSE;
  617. event.status = -ECONNRESET;
  618. if (ep->com.cm_id) {
  619. PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
  620. ep->com.cm_id, ep->hwtid);
  621. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  622. ep->com.cm_id->rem_ref(ep->com.cm_id);
  623. ep->com.cm_id = NULL;
  624. ep->com.qp = NULL;
  625. }
  626. }
  627. static void connect_reply_upcall(struct iwch_ep *ep, int status)
  628. {
  629. struct iw_cm_event event;
  630. PDBG("%s ep %p status %d\n", __func__, ep, status);
  631. memset(&event, 0, sizeof(event));
  632. event.event = IW_CM_EVENT_CONNECT_REPLY;
  633. event.status = status;
  634. memcpy(&event.local_addr, &ep->com.local_addr,
  635. sizeof(ep->com.local_addr));
  636. memcpy(&event.remote_addr, &ep->com.remote_addr,
  637. sizeof(ep->com.remote_addr));
  638. if ((status == 0) || (status == -ECONNREFUSED)) {
  639. event.private_data_len = ep->plen;
  640. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  641. }
  642. if (ep->com.cm_id) {
  643. PDBG("%s ep %p tid %d status %d\n", __func__, ep,
  644. ep->hwtid, status);
  645. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  646. }
  647. if (status < 0) {
  648. ep->com.cm_id->rem_ref(ep->com.cm_id);
  649. ep->com.cm_id = NULL;
  650. ep->com.qp = NULL;
  651. }
  652. }
  653. static void connect_request_upcall(struct iwch_ep *ep)
  654. {
  655. struct iw_cm_event event;
  656. PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
  657. memset(&event, 0, sizeof(event));
  658. event.event = IW_CM_EVENT_CONNECT_REQUEST;
  659. memcpy(&event.local_addr, &ep->com.local_addr,
  660. sizeof(ep->com.local_addr));
  661. memcpy(&event.remote_addr, &ep->com.remote_addr,
  662. sizeof(ep->com.local_addr));
  663. event.private_data_len = ep->plen;
  664. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  665. event.provider_data = ep;
  666. /*
  667. * Until ird/ord negotiation via MPAv2 support is added, send max
  668. * supported values
  669. */
  670. event.ird = event.ord = 8;
  671. if (state_read(&ep->parent_ep->com) != DEAD) {
  672. get_ep(&ep->com);
  673. ep->parent_ep->com.cm_id->event_handler(
  674. ep->parent_ep->com.cm_id,
  675. &event);
  676. }
  677. put_ep(&ep->parent_ep->com);
  678. ep->parent_ep = NULL;
  679. }
  680. static void established_upcall(struct iwch_ep *ep)
  681. {
  682. struct iw_cm_event event;
  683. PDBG("%s ep %p\n", __func__, ep);
  684. memset(&event, 0, sizeof(event));
  685. event.event = IW_CM_EVENT_ESTABLISHED;
  686. /*
  687. * Until ird/ord negotiation via MPAv2 support is added, send max
  688. * supported values
  689. */
  690. event.ird = event.ord = 8;
  691. if (ep->com.cm_id) {
  692. PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
  693. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  694. }
  695. }
  696. static int update_rx_credits(struct iwch_ep *ep, u32 credits)
  697. {
  698. struct cpl_rx_data_ack *req;
  699. struct sk_buff *skb;
  700. PDBG("%s ep %p credits %u\n", __func__, ep, credits);
  701. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  702. if (!skb) {
  703. printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
  704. return 0;
  705. }
  706. req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
  707. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  708. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
  709. req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
  710. skb->priority = CPL_PRIORITY_ACK;
  711. iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  712. return credits;
  713. }
  714. static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
  715. {
  716. struct mpa_message *mpa;
  717. u16 plen;
  718. struct iwch_qp_attributes attrs;
  719. enum iwch_qp_attr_mask mask;
  720. int err;
  721. PDBG("%s ep %p\n", __func__, ep);
  722. /*
  723. * Stop mpa timer. If it expired, then the state has
  724. * changed and we bail since ep_timeout already aborted
  725. * the connection.
  726. */
  727. stop_ep_timer(ep);
  728. if (state_read(&ep->com) != MPA_REQ_SENT)
  729. return;
  730. /*
  731. * If we get more than the supported amount of private data
  732. * then we must fail this connection.
  733. */
  734. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  735. err = -EINVAL;
  736. goto err;
  737. }
  738. /*
  739. * copy the new data into our accumulation buffer.
  740. */
  741. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  742. skb->len);
  743. ep->mpa_pkt_len += skb->len;
  744. /*
  745. * if we don't even have the mpa message, then bail.
  746. */
  747. if (ep->mpa_pkt_len < sizeof(*mpa))
  748. return;
  749. mpa = (struct mpa_message *) ep->mpa_pkt;
  750. /* Validate MPA header. */
  751. if (mpa->revision != mpa_rev) {
  752. err = -EPROTO;
  753. goto err;
  754. }
  755. if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
  756. err = -EPROTO;
  757. goto err;
  758. }
  759. plen = ntohs(mpa->private_data_size);
  760. /*
  761. * Fail if there's too much private data.
  762. */
  763. if (plen > MPA_MAX_PRIVATE_DATA) {
  764. err = -EPROTO;
  765. goto err;
  766. }
  767. /*
  768. * If plen does not account for pkt size
  769. */
  770. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  771. err = -EPROTO;
  772. goto err;
  773. }
  774. ep->plen = (u8) plen;
  775. /*
  776. * If we don't have all the pdata yet, then bail.
  777. * We'll continue process when more data arrives.
  778. */
  779. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  780. return;
  781. if (mpa->flags & MPA_REJECT) {
  782. err = -ECONNREFUSED;
  783. goto err;
  784. }
  785. /*
  786. * If we get here we have accumulated the entire mpa
  787. * start reply message including private data. And
  788. * the MPA header is valid.
  789. */
  790. state_set(&ep->com, FPDU_MODE);
  791. ep->mpa_attr.initiator = 1;
  792. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  793. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  794. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  795. ep->mpa_attr.version = mpa_rev;
  796. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  797. "xmit_marker_enabled=%d, version=%d\n", __func__,
  798. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  799. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  800. attrs.mpa_attr = ep->mpa_attr;
  801. attrs.max_ird = ep->ird;
  802. attrs.max_ord = ep->ord;
  803. attrs.llp_stream_handle = ep;
  804. attrs.next_state = IWCH_QP_STATE_RTS;
  805. mask = IWCH_QP_ATTR_NEXT_STATE |
  806. IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
  807. IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
  808. /* bind QP and TID with INIT_WR */
  809. err = iwch_modify_qp(ep->com.qp->rhp,
  810. ep->com.qp, mask, &attrs, 1);
  811. if (err)
  812. goto err;
  813. if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
  814. iwch_post_zb_read(ep);
  815. }
  816. goto out;
  817. err:
  818. abort_connection(ep, skb, GFP_KERNEL);
  819. out:
  820. connect_reply_upcall(ep, err);
  821. return;
  822. }
  823. static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
  824. {
  825. struct mpa_message *mpa;
  826. u16 plen;
  827. PDBG("%s ep %p\n", __func__, ep);
  828. /*
  829. * Stop mpa timer. If it expired, then the state has
  830. * changed and we bail since ep_timeout already aborted
  831. * the connection.
  832. */
  833. stop_ep_timer(ep);
  834. if (state_read(&ep->com) != MPA_REQ_WAIT)
  835. return;
  836. /*
  837. * If we get more than the supported amount of private data
  838. * then we must fail this connection.
  839. */
  840. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  841. abort_connection(ep, skb, GFP_KERNEL);
  842. return;
  843. }
  844. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  845. /*
  846. * Copy the new data into our accumulation buffer.
  847. */
  848. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  849. skb->len);
  850. ep->mpa_pkt_len += skb->len;
  851. /*
  852. * If we don't even have the mpa message, then bail.
  853. * We'll continue process when more data arrives.
  854. */
  855. if (ep->mpa_pkt_len < sizeof(*mpa))
  856. return;
  857. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  858. mpa = (struct mpa_message *) ep->mpa_pkt;
  859. /*
  860. * Validate MPA Header.
  861. */
  862. if (mpa->revision != mpa_rev) {
  863. abort_connection(ep, skb, GFP_KERNEL);
  864. return;
  865. }
  866. if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
  867. abort_connection(ep, skb, GFP_KERNEL);
  868. return;
  869. }
  870. plen = ntohs(mpa->private_data_size);
  871. /*
  872. * Fail if there's too much private data.
  873. */
  874. if (plen > MPA_MAX_PRIVATE_DATA) {
  875. abort_connection(ep, skb, GFP_KERNEL);
  876. return;
  877. }
  878. /*
  879. * If plen does not account for pkt size
  880. */
  881. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  882. abort_connection(ep, skb, GFP_KERNEL);
  883. return;
  884. }
  885. ep->plen = (u8) plen;
  886. /*
  887. * If we don't have all the pdata yet, then bail.
  888. */
  889. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  890. return;
  891. /*
  892. * If we get here we have accumulated the entire mpa
  893. * start reply message including private data.
  894. */
  895. ep->mpa_attr.initiator = 0;
  896. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  897. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  898. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  899. ep->mpa_attr.version = mpa_rev;
  900. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  901. "xmit_marker_enabled=%d, version=%d\n", __func__,
  902. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  903. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  904. state_set(&ep->com, MPA_REQ_RCVD);
  905. /* drive upcall */
  906. connect_request_upcall(ep);
  907. return;
  908. }
  909. static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  910. {
  911. struct iwch_ep *ep = ctx;
  912. struct cpl_rx_data *hdr = cplhdr(skb);
  913. unsigned int dlen = ntohs(hdr->len);
  914. PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
  915. skb_pull(skb, sizeof(*hdr));
  916. skb_trim(skb, dlen);
  917. ep->rcv_seq += dlen;
  918. BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
  919. switch (state_read(&ep->com)) {
  920. case MPA_REQ_SENT:
  921. process_mpa_reply(ep, skb);
  922. break;
  923. case MPA_REQ_WAIT:
  924. process_mpa_request(ep, skb);
  925. break;
  926. case MPA_REP_SENT:
  927. break;
  928. default:
  929. printk(KERN_ERR MOD "%s Unexpected streaming data."
  930. " ep %p state %d tid %d\n",
  931. __func__, ep, state_read(&ep->com), ep->hwtid);
  932. /*
  933. * The ep will timeout and inform the ULP of the failure.
  934. * See ep_timeout().
  935. */
  936. break;
  937. }
  938. /* update RX credits */
  939. update_rx_credits(ep, dlen);
  940. return CPL_RET_BUF_DONE;
  941. }
  942. /*
  943. * Upcall from the adapter indicating data has been transmitted.
  944. * For us its just the single MPA request or reply. We can now free
  945. * the skb holding the mpa message.
  946. */
  947. static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  948. {
  949. struct iwch_ep *ep = ctx;
  950. struct cpl_wr_ack *hdr = cplhdr(skb);
  951. unsigned int credits = ntohs(hdr->credits);
  952. unsigned long flags;
  953. int post_zb = 0;
  954. PDBG("%s ep %p credits %u\n", __func__, ep, credits);
  955. if (credits == 0) {
  956. PDBG("%s 0 credit ack ep %p state %u\n",
  957. __func__, ep, state_read(&ep->com));
  958. return CPL_RET_BUF_DONE;
  959. }
  960. spin_lock_irqsave(&ep->com.lock, flags);
  961. BUG_ON(credits != 1);
  962. dst_confirm(ep->dst);
  963. if (!ep->mpa_skb) {
  964. PDBG("%s rdma_init wr_ack ep %p state %u\n",
  965. __func__, ep, ep->com.state);
  966. if (ep->mpa_attr.initiator) {
  967. PDBG("%s initiator ep %p state %u\n",
  968. __func__, ep, ep->com.state);
  969. if (peer2peer && ep->com.state == FPDU_MODE)
  970. post_zb = 1;
  971. } else {
  972. PDBG("%s responder ep %p state %u\n",
  973. __func__, ep, ep->com.state);
  974. if (ep->com.state == MPA_REQ_RCVD) {
  975. ep->com.rpl_done = 1;
  976. wake_up(&ep->com.waitq);
  977. }
  978. }
  979. } else {
  980. PDBG("%s lsm ack ep %p state %u freeing skb\n",
  981. __func__, ep, ep->com.state);
  982. kfree_skb(ep->mpa_skb);
  983. ep->mpa_skb = NULL;
  984. }
  985. spin_unlock_irqrestore(&ep->com.lock, flags);
  986. if (post_zb)
  987. iwch_post_zb_read(ep);
  988. return CPL_RET_BUF_DONE;
  989. }
  990. static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  991. {
  992. struct iwch_ep *ep = ctx;
  993. unsigned long flags;
  994. int release = 0;
  995. PDBG("%s ep %p\n", __func__, ep);
  996. BUG_ON(!ep);
  997. /*
  998. * We get 2 abort replies from the HW. The first one must
  999. * be ignored except for scribbling that we need one more.
  1000. */
  1001. if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
  1002. return CPL_RET_BUF_DONE;
  1003. }
  1004. spin_lock_irqsave(&ep->com.lock, flags);
  1005. switch (ep->com.state) {
  1006. case ABORTING:
  1007. close_complete_upcall(ep);
  1008. __state_set(&ep->com, DEAD);
  1009. release = 1;
  1010. break;
  1011. default:
  1012. printk(KERN_ERR "%s ep %p state %d\n",
  1013. __func__, ep, ep->com.state);
  1014. break;
  1015. }
  1016. spin_unlock_irqrestore(&ep->com.lock, flags);
  1017. if (release)
  1018. release_ep_resources(ep);
  1019. return CPL_RET_BUF_DONE;
  1020. }
  1021. /*
  1022. * Return whether a failed active open has allocated a TID
  1023. */
  1024. static inline int act_open_has_tid(int status)
  1025. {
  1026. return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
  1027. status != CPL_ERR_ARP_MISS;
  1028. }
  1029. static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1030. {
  1031. struct iwch_ep *ep = ctx;
  1032. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  1033. PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
  1034. status2errno(rpl->status));
  1035. connect_reply_upcall(ep, status2errno(rpl->status));
  1036. state_set(&ep->com, DEAD);
  1037. if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
  1038. release_tid(ep->com.tdev, GET_TID(rpl), NULL);
  1039. cxgb3_free_atid(ep->com.tdev, ep->atid);
  1040. dst_release(ep->dst);
  1041. l2t_release(ep->com.tdev, ep->l2t);
  1042. put_ep(&ep->com);
  1043. return CPL_RET_BUF_DONE;
  1044. }
  1045. static int listen_start(struct iwch_listen_ep *ep)
  1046. {
  1047. struct sk_buff *skb;
  1048. struct cpl_pass_open_req *req;
  1049. PDBG("%s ep %p\n", __func__, ep);
  1050. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1051. if (!skb) {
  1052. printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
  1053. return -ENOMEM;
  1054. }
  1055. req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
  1056. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1057. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
  1058. req->local_port = ep->com.local_addr.sin_port;
  1059. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  1060. req->peer_port = 0;
  1061. req->peer_ip = 0;
  1062. req->peer_netmask = 0;
  1063. req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
  1064. req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
  1065. req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
  1066. skb->priority = 1;
  1067. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  1068. }
  1069. static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1070. {
  1071. struct iwch_listen_ep *ep = ctx;
  1072. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1073. PDBG("%s ep %p status %d error %d\n", __func__, ep,
  1074. rpl->status, status2errno(rpl->status));
  1075. ep->com.rpl_err = status2errno(rpl->status);
  1076. ep->com.rpl_done = 1;
  1077. wake_up(&ep->com.waitq);
  1078. return CPL_RET_BUF_DONE;
  1079. }
  1080. static int listen_stop(struct iwch_listen_ep *ep)
  1081. {
  1082. struct sk_buff *skb;
  1083. struct cpl_close_listserv_req *req;
  1084. PDBG("%s ep %p\n", __func__, ep);
  1085. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1086. if (!skb) {
  1087. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  1088. return -ENOMEM;
  1089. }
  1090. req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
  1091. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1092. req->cpu_idx = 0;
  1093. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
  1094. skb->priority = 1;
  1095. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  1096. }
  1097. static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
  1098. void *ctx)
  1099. {
  1100. struct iwch_listen_ep *ep = ctx;
  1101. struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
  1102. PDBG("%s ep %p\n", __func__, ep);
  1103. ep->com.rpl_err = status2errno(rpl->status);
  1104. ep->com.rpl_done = 1;
  1105. wake_up(&ep->com.waitq);
  1106. return CPL_RET_BUF_DONE;
  1107. }
  1108. static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
  1109. {
  1110. struct cpl_pass_accept_rpl *rpl;
  1111. unsigned int mtu_idx;
  1112. u32 opt0h, opt0l, opt2;
  1113. int wscale;
  1114. PDBG("%s ep %p\n", __func__, ep);
  1115. BUG_ON(skb_cloned(skb));
  1116. skb_trim(skb, sizeof(*rpl));
  1117. skb_get(skb);
  1118. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  1119. wscale = compute_wscale(rcv_win);
  1120. opt0h = V_NAGLE(0) |
  1121. V_NO_CONG(nocong) |
  1122. V_KEEP_ALIVE(1) |
  1123. F_TCAM_BYPASS |
  1124. V_WND_SCALE(wscale) |
  1125. V_MSS_IDX(mtu_idx) |
  1126. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  1127. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  1128. opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
  1129. V_CONG_CONTROL_FLAVOR(cong_flavor);
  1130. rpl = cplhdr(skb);
  1131. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1132. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
  1133. rpl->peer_ip = peer_ip;
  1134. rpl->opt0h = htonl(opt0h);
  1135. rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
  1136. rpl->opt2 = htonl(opt2);
  1137. rpl->rsvd = rpl->opt2; /* workaround for HW bug */
  1138. skb->priority = CPL_PRIORITY_SETUP;
  1139. iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  1140. return;
  1141. }
  1142. static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
  1143. struct sk_buff *skb)
  1144. {
  1145. PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
  1146. peer_ip);
  1147. BUG_ON(skb_cloned(skb));
  1148. skb_trim(skb, sizeof(struct cpl_tid_release));
  1149. skb_get(skb);
  1150. if (tdev->type != T3A)
  1151. release_tid(tdev, hwtid, skb);
  1152. else {
  1153. struct cpl_pass_accept_rpl *rpl;
  1154. rpl = cplhdr(skb);
  1155. skb->priority = CPL_PRIORITY_SETUP;
  1156. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1157. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  1158. hwtid));
  1159. rpl->peer_ip = peer_ip;
  1160. rpl->opt0h = htonl(F_TCAM_BYPASS);
  1161. rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
  1162. rpl->opt2 = 0;
  1163. rpl->rsvd = rpl->opt2;
  1164. iwch_cxgb3_ofld_send(tdev, skb);
  1165. }
  1166. }
  1167. static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1168. {
  1169. struct iwch_ep *child_ep, *parent_ep = ctx;
  1170. struct cpl_pass_accept_req *req = cplhdr(skb);
  1171. unsigned int hwtid = GET_TID(req);
  1172. struct dst_entry *dst;
  1173. struct l2t_entry *l2t;
  1174. struct rtable *rt;
  1175. struct iff_mac tim;
  1176. PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
  1177. if (state_read(&parent_ep->com) != LISTEN) {
  1178. printk(KERN_ERR "%s - listening ep not in LISTEN\n",
  1179. __func__);
  1180. goto reject;
  1181. }
  1182. /*
  1183. * Find the netdev for this connection request.
  1184. */
  1185. tim.mac_addr = req->dst_mac;
  1186. tim.vlan_tag = ntohs(req->vlan_tag);
  1187. if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
  1188. printk(KERN_ERR "%s bad dst mac %pM\n",
  1189. __func__, req->dst_mac);
  1190. goto reject;
  1191. }
  1192. /* Find output route */
  1193. rt = find_route(tdev,
  1194. req->local_ip,
  1195. req->peer_ip,
  1196. req->local_port,
  1197. req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
  1198. if (!rt) {
  1199. printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
  1200. __func__);
  1201. goto reject;
  1202. }
  1203. dst = &rt->dst;
  1204. l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
  1205. if (!l2t) {
  1206. printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
  1207. __func__);
  1208. dst_release(dst);
  1209. goto reject;
  1210. }
  1211. child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
  1212. if (!child_ep) {
  1213. printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
  1214. __func__);
  1215. l2t_release(tdev, l2t);
  1216. dst_release(dst);
  1217. goto reject;
  1218. }
  1219. state_set(&child_ep->com, CONNECTING);
  1220. child_ep->com.tdev = tdev;
  1221. child_ep->com.cm_id = NULL;
  1222. child_ep->com.local_addr.sin_family = AF_INET;
  1223. child_ep->com.local_addr.sin_port = req->local_port;
  1224. child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
  1225. child_ep->com.remote_addr.sin_family = AF_INET;
  1226. child_ep->com.remote_addr.sin_port = req->peer_port;
  1227. child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
  1228. get_ep(&parent_ep->com);
  1229. child_ep->parent_ep = parent_ep;
  1230. child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
  1231. child_ep->l2t = l2t;
  1232. child_ep->dst = dst;
  1233. child_ep->hwtid = hwtid;
  1234. init_timer(&child_ep->timer);
  1235. cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
  1236. accept_cr(child_ep, req->peer_ip, skb);
  1237. goto out;
  1238. reject:
  1239. reject_cr(tdev, hwtid, req->peer_ip, skb);
  1240. out:
  1241. return CPL_RET_BUF_DONE;
  1242. }
  1243. static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1244. {
  1245. struct iwch_ep *ep = ctx;
  1246. struct cpl_pass_establish *req = cplhdr(skb);
  1247. PDBG("%s ep %p\n", __func__, ep);
  1248. ep->snd_seq = ntohl(req->snd_isn);
  1249. ep->rcv_seq = ntohl(req->rcv_isn);
  1250. set_emss(ep, ntohs(req->tcp_opt));
  1251. dst_confirm(ep->dst);
  1252. state_set(&ep->com, MPA_REQ_WAIT);
  1253. start_ep_timer(ep);
  1254. return CPL_RET_BUF_DONE;
  1255. }
  1256. static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1257. {
  1258. struct iwch_ep *ep = ctx;
  1259. struct iwch_qp_attributes attrs;
  1260. unsigned long flags;
  1261. int disconnect = 1;
  1262. int release = 0;
  1263. PDBG("%s ep %p\n", __func__, ep);
  1264. dst_confirm(ep->dst);
  1265. spin_lock_irqsave(&ep->com.lock, flags);
  1266. switch (ep->com.state) {
  1267. case MPA_REQ_WAIT:
  1268. __state_set(&ep->com, CLOSING);
  1269. break;
  1270. case MPA_REQ_SENT:
  1271. __state_set(&ep->com, CLOSING);
  1272. connect_reply_upcall(ep, -ECONNRESET);
  1273. break;
  1274. case MPA_REQ_RCVD:
  1275. /*
  1276. * We're gonna mark this puppy DEAD, but keep
  1277. * the reference on it until the ULP accepts or
  1278. * rejects the CR. Also wake up anyone waiting
  1279. * in rdma connection migration (see iwch_accept_cr()).
  1280. */
  1281. __state_set(&ep->com, CLOSING);
  1282. ep->com.rpl_done = 1;
  1283. ep->com.rpl_err = -ECONNRESET;
  1284. PDBG("waking up ep %p\n", ep);
  1285. wake_up(&ep->com.waitq);
  1286. break;
  1287. case MPA_REP_SENT:
  1288. __state_set(&ep->com, CLOSING);
  1289. ep->com.rpl_done = 1;
  1290. ep->com.rpl_err = -ECONNRESET;
  1291. PDBG("waking up ep %p\n", ep);
  1292. wake_up(&ep->com.waitq);
  1293. break;
  1294. case FPDU_MODE:
  1295. start_ep_timer(ep);
  1296. __state_set(&ep->com, CLOSING);
  1297. attrs.next_state = IWCH_QP_STATE_CLOSING;
  1298. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1299. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1300. peer_close_upcall(ep);
  1301. break;
  1302. case ABORTING:
  1303. disconnect = 0;
  1304. break;
  1305. case CLOSING:
  1306. __state_set(&ep->com, MORIBUND);
  1307. disconnect = 0;
  1308. break;
  1309. case MORIBUND:
  1310. stop_ep_timer(ep);
  1311. if (ep->com.cm_id && ep->com.qp) {
  1312. attrs.next_state = IWCH_QP_STATE_IDLE;
  1313. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1314. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1315. }
  1316. close_complete_upcall(ep);
  1317. __state_set(&ep->com, DEAD);
  1318. release = 1;
  1319. disconnect = 0;
  1320. break;
  1321. case DEAD:
  1322. disconnect = 0;
  1323. break;
  1324. default:
  1325. BUG_ON(1);
  1326. }
  1327. spin_unlock_irqrestore(&ep->com.lock, flags);
  1328. if (disconnect)
  1329. iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1330. if (release)
  1331. release_ep_resources(ep);
  1332. return CPL_RET_BUF_DONE;
  1333. }
  1334. /*
  1335. * Returns whether an ABORT_REQ_RSS message is a negative advice.
  1336. */
  1337. static int is_neg_adv_abort(unsigned int status)
  1338. {
  1339. return status == CPL_ERR_RTX_NEG_ADVICE ||
  1340. status == CPL_ERR_PERSIST_NEG_ADVICE;
  1341. }
  1342. static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1343. {
  1344. struct cpl_abort_req_rss *req = cplhdr(skb);
  1345. struct iwch_ep *ep = ctx;
  1346. struct cpl_abort_rpl *rpl;
  1347. struct sk_buff *rpl_skb;
  1348. struct iwch_qp_attributes attrs;
  1349. int ret;
  1350. int release = 0;
  1351. unsigned long flags;
  1352. if (is_neg_adv_abort(req->status)) {
  1353. PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
  1354. ep->hwtid);
  1355. t3_l2t_send_event(ep->com.tdev, ep->l2t);
  1356. return CPL_RET_BUF_DONE;
  1357. }
  1358. /*
  1359. * We get 2 peer aborts from the HW. The first one must
  1360. * be ignored except for scribbling that we need one more.
  1361. */
  1362. if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
  1363. return CPL_RET_BUF_DONE;
  1364. }
  1365. spin_lock_irqsave(&ep->com.lock, flags);
  1366. PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
  1367. switch (ep->com.state) {
  1368. case CONNECTING:
  1369. break;
  1370. case MPA_REQ_WAIT:
  1371. stop_ep_timer(ep);
  1372. break;
  1373. case MPA_REQ_SENT:
  1374. stop_ep_timer(ep);
  1375. connect_reply_upcall(ep, -ECONNRESET);
  1376. break;
  1377. case MPA_REP_SENT:
  1378. ep->com.rpl_done = 1;
  1379. ep->com.rpl_err = -ECONNRESET;
  1380. PDBG("waking up ep %p\n", ep);
  1381. wake_up(&ep->com.waitq);
  1382. break;
  1383. case MPA_REQ_RCVD:
  1384. /*
  1385. * We're gonna mark this puppy DEAD, but keep
  1386. * the reference on it until the ULP accepts or
  1387. * rejects the CR. Also wake up anyone waiting
  1388. * in rdma connection migration (see iwch_accept_cr()).
  1389. */
  1390. ep->com.rpl_done = 1;
  1391. ep->com.rpl_err = -ECONNRESET;
  1392. PDBG("waking up ep %p\n", ep);
  1393. wake_up(&ep->com.waitq);
  1394. break;
  1395. case MORIBUND:
  1396. case CLOSING:
  1397. stop_ep_timer(ep);
  1398. /*FALLTHROUGH*/
  1399. case FPDU_MODE:
  1400. if (ep->com.cm_id && ep->com.qp) {
  1401. attrs.next_state = IWCH_QP_STATE_ERROR;
  1402. ret = iwch_modify_qp(ep->com.qp->rhp,
  1403. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1404. &attrs, 1);
  1405. if (ret)
  1406. printk(KERN_ERR MOD
  1407. "%s - qp <- error failed!\n",
  1408. __func__);
  1409. }
  1410. peer_abort_upcall(ep);
  1411. break;
  1412. case ABORTING:
  1413. break;
  1414. case DEAD:
  1415. PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
  1416. spin_unlock_irqrestore(&ep->com.lock, flags);
  1417. return CPL_RET_BUF_DONE;
  1418. default:
  1419. BUG_ON(1);
  1420. break;
  1421. }
  1422. dst_confirm(ep->dst);
  1423. if (ep->com.state != ABORTING) {
  1424. __state_set(&ep->com, DEAD);
  1425. release = 1;
  1426. }
  1427. spin_unlock_irqrestore(&ep->com.lock, flags);
  1428. rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
  1429. if (!rpl_skb) {
  1430. printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
  1431. __func__);
  1432. release = 1;
  1433. goto out;
  1434. }
  1435. rpl_skb->priority = CPL_PRIORITY_DATA;
  1436. rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
  1437. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
  1438. rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  1439. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
  1440. rpl->cmd = CPL_ABORT_NO_RST;
  1441. iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
  1442. out:
  1443. if (release)
  1444. release_ep_resources(ep);
  1445. return CPL_RET_BUF_DONE;
  1446. }
  1447. static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1448. {
  1449. struct iwch_ep *ep = ctx;
  1450. struct iwch_qp_attributes attrs;
  1451. unsigned long flags;
  1452. int release = 0;
  1453. PDBG("%s ep %p\n", __func__, ep);
  1454. BUG_ON(!ep);
  1455. /* The cm_id may be null if we failed to connect */
  1456. spin_lock_irqsave(&ep->com.lock, flags);
  1457. switch (ep->com.state) {
  1458. case CLOSING:
  1459. __state_set(&ep->com, MORIBUND);
  1460. break;
  1461. case MORIBUND:
  1462. stop_ep_timer(ep);
  1463. if ((ep->com.cm_id) && (ep->com.qp)) {
  1464. attrs.next_state = IWCH_QP_STATE_IDLE;
  1465. iwch_modify_qp(ep->com.qp->rhp,
  1466. ep->com.qp,
  1467. IWCH_QP_ATTR_NEXT_STATE,
  1468. &attrs, 1);
  1469. }
  1470. close_complete_upcall(ep);
  1471. __state_set(&ep->com, DEAD);
  1472. release = 1;
  1473. break;
  1474. case ABORTING:
  1475. case DEAD:
  1476. break;
  1477. default:
  1478. BUG_ON(1);
  1479. break;
  1480. }
  1481. spin_unlock_irqrestore(&ep->com.lock, flags);
  1482. if (release)
  1483. release_ep_resources(ep);
  1484. return CPL_RET_BUF_DONE;
  1485. }
  1486. /*
  1487. * T3A does 3 things when a TERM is received:
  1488. * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
  1489. * 2) generate an async event on the QP with the TERMINATE opcode
  1490. * 3) post a TERMINATE opcode cqe into the associated CQ.
  1491. *
  1492. * For (1), we save the message in the qp for later consumer consumption.
  1493. * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
  1494. * For (3), we toss the CQE in cxio_poll_cq().
  1495. *
  1496. * terminate() handles case (1)...
  1497. */
  1498. static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1499. {
  1500. struct iwch_ep *ep = ctx;
  1501. if (state_read(&ep->com) != FPDU_MODE)
  1502. return CPL_RET_BUF_DONE;
  1503. PDBG("%s ep %p\n", __func__, ep);
  1504. skb_pull(skb, sizeof(struct cpl_rdma_terminate));
  1505. PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
  1506. skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
  1507. skb->len);
  1508. ep->com.qp->attr.terminate_msg_len = skb->len;
  1509. ep->com.qp->attr.is_terminate_local = 0;
  1510. return CPL_RET_BUF_DONE;
  1511. }
  1512. static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1513. {
  1514. struct cpl_rdma_ec_status *rep = cplhdr(skb);
  1515. struct iwch_ep *ep = ctx;
  1516. PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
  1517. rep->status);
  1518. if (rep->status) {
  1519. struct iwch_qp_attributes attrs;
  1520. printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
  1521. __func__, ep->hwtid);
  1522. stop_ep_timer(ep);
  1523. attrs.next_state = IWCH_QP_STATE_ERROR;
  1524. iwch_modify_qp(ep->com.qp->rhp,
  1525. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1526. &attrs, 1);
  1527. abort_connection(ep, NULL, GFP_KERNEL);
  1528. }
  1529. return CPL_RET_BUF_DONE;
  1530. }
  1531. static void ep_timeout(unsigned long arg)
  1532. {
  1533. struct iwch_ep *ep = (struct iwch_ep *)arg;
  1534. struct iwch_qp_attributes attrs;
  1535. unsigned long flags;
  1536. int abort = 1;
  1537. spin_lock_irqsave(&ep->com.lock, flags);
  1538. PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
  1539. ep->com.state);
  1540. switch (ep->com.state) {
  1541. case MPA_REQ_SENT:
  1542. __state_set(&ep->com, ABORTING);
  1543. connect_reply_upcall(ep, -ETIMEDOUT);
  1544. break;
  1545. case MPA_REQ_WAIT:
  1546. __state_set(&ep->com, ABORTING);
  1547. break;
  1548. case CLOSING:
  1549. case MORIBUND:
  1550. if (ep->com.cm_id && ep->com.qp) {
  1551. attrs.next_state = IWCH_QP_STATE_ERROR;
  1552. iwch_modify_qp(ep->com.qp->rhp,
  1553. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1554. &attrs, 1);
  1555. }
  1556. __state_set(&ep->com, ABORTING);
  1557. break;
  1558. default:
  1559. WARN(1, "%s unexpected state ep %p state %u\n",
  1560. __func__, ep, ep->com.state);
  1561. abort = 0;
  1562. }
  1563. spin_unlock_irqrestore(&ep->com.lock, flags);
  1564. if (abort)
  1565. abort_connection(ep, NULL, GFP_ATOMIC);
  1566. put_ep(&ep->com);
  1567. }
  1568. int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  1569. {
  1570. int err;
  1571. struct iwch_ep *ep = to_ep(cm_id);
  1572. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1573. if (state_read(&ep->com) == DEAD) {
  1574. put_ep(&ep->com);
  1575. return -ECONNRESET;
  1576. }
  1577. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1578. if (mpa_rev == 0)
  1579. abort_connection(ep, NULL, GFP_KERNEL);
  1580. else {
  1581. err = send_mpa_reject(ep, pdata, pdata_len);
  1582. err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1583. }
  1584. put_ep(&ep->com);
  1585. return 0;
  1586. }
  1587. int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1588. {
  1589. int err;
  1590. struct iwch_qp_attributes attrs;
  1591. enum iwch_qp_attr_mask mask;
  1592. struct iwch_ep *ep = to_ep(cm_id);
  1593. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1594. struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
  1595. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1596. if (state_read(&ep->com) == DEAD) {
  1597. err = -ECONNRESET;
  1598. goto err;
  1599. }
  1600. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1601. BUG_ON(!qp);
  1602. if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
  1603. (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
  1604. abort_connection(ep, NULL, GFP_KERNEL);
  1605. err = -EINVAL;
  1606. goto err;
  1607. }
  1608. cm_id->add_ref(cm_id);
  1609. ep->com.cm_id = cm_id;
  1610. ep->com.qp = qp;
  1611. ep->ird = conn_param->ird;
  1612. ep->ord = conn_param->ord;
  1613. if (peer2peer && ep->ird == 0)
  1614. ep->ird = 1;
  1615. PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
  1616. /* bind QP to EP and move to RTS */
  1617. attrs.mpa_attr = ep->mpa_attr;
  1618. attrs.max_ird = ep->ird;
  1619. attrs.max_ord = ep->ord;
  1620. attrs.llp_stream_handle = ep;
  1621. attrs.next_state = IWCH_QP_STATE_RTS;
  1622. /* bind QP and TID with INIT_WR */
  1623. mask = IWCH_QP_ATTR_NEXT_STATE |
  1624. IWCH_QP_ATTR_LLP_STREAM_HANDLE |
  1625. IWCH_QP_ATTR_MPA_ATTR |
  1626. IWCH_QP_ATTR_MAX_IRD |
  1627. IWCH_QP_ATTR_MAX_ORD;
  1628. err = iwch_modify_qp(ep->com.qp->rhp,
  1629. ep->com.qp, mask, &attrs, 1);
  1630. if (err)
  1631. goto err1;
  1632. /* if needed, wait for wr_ack */
  1633. if (iwch_rqes_posted(qp)) {
  1634. wait_event(ep->com.waitq, ep->com.rpl_done);
  1635. err = ep->com.rpl_err;
  1636. if (err)
  1637. goto err1;
  1638. }
  1639. err = send_mpa_reply(ep, conn_param->private_data,
  1640. conn_param->private_data_len);
  1641. if (err)
  1642. goto err1;
  1643. state_set(&ep->com, FPDU_MODE);
  1644. established_upcall(ep);
  1645. put_ep(&ep->com);
  1646. return 0;
  1647. err1:
  1648. ep->com.cm_id = NULL;
  1649. ep->com.qp = NULL;
  1650. cm_id->rem_ref(cm_id);
  1651. err:
  1652. put_ep(&ep->com);
  1653. return err;
  1654. }
  1655. static int is_loopback_dst(struct iw_cm_id *cm_id)
  1656. {
  1657. struct net_device *dev;
  1658. struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
  1659. dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
  1660. if (!dev)
  1661. return 0;
  1662. dev_put(dev);
  1663. return 1;
  1664. }
  1665. int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1666. {
  1667. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1668. struct iwch_ep *ep;
  1669. struct rtable *rt;
  1670. int err = 0;
  1671. struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
  1672. struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
  1673. if (cm_id->m_remote_addr.ss_family != PF_INET) {
  1674. err = -ENOSYS;
  1675. goto out;
  1676. }
  1677. if (is_loopback_dst(cm_id)) {
  1678. err = -ENOSYS;
  1679. goto out;
  1680. }
  1681. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1682. if (!ep) {
  1683. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  1684. err = -ENOMEM;
  1685. goto out;
  1686. }
  1687. init_timer(&ep->timer);
  1688. ep->plen = conn_param->private_data_len;
  1689. if (ep->plen)
  1690. memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
  1691. conn_param->private_data, ep->plen);
  1692. ep->ird = conn_param->ird;
  1693. ep->ord = conn_param->ord;
  1694. if (peer2peer && ep->ord == 0)
  1695. ep->ord = 1;
  1696. ep->com.tdev = h->rdev.t3cdev_p;
  1697. cm_id->add_ref(cm_id);
  1698. ep->com.cm_id = cm_id;
  1699. ep->com.qp = get_qhp(h, conn_param->qpn);
  1700. BUG_ON(!ep->com.qp);
  1701. PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
  1702. ep->com.qp, cm_id);
  1703. /*
  1704. * Allocate an active TID to initiate a TCP connection.
  1705. */
  1706. ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
  1707. if (ep->atid == -1) {
  1708. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  1709. err = -ENOMEM;
  1710. goto fail2;
  1711. }
  1712. /* find a route */
  1713. rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
  1714. raddr->sin_addr.s_addr, laddr->sin_port,
  1715. raddr->sin_port, IPTOS_LOWDELAY);
  1716. if (!rt) {
  1717. printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
  1718. err = -EHOSTUNREACH;
  1719. goto fail3;
  1720. }
  1721. ep->dst = &rt->dst;
  1722. ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
  1723. &raddr->sin_addr.s_addr);
  1724. if (!ep->l2t) {
  1725. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
  1726. err = -ENOMEM;
  1727. goto fail4;
  1728. }
  1729. state_set(&ep->com, CONNECTING);
  1730. ep->tos = IPTOS_LOWDELAY;
  1731. memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
  1732. sizeof(ep->com.local_addr));
  1733. memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
  1734. sizeof(ep->com.remote_addr));
  1735. /* send connect request to rnic */
  1736. err = send_connect(ep);
  1737. if (!err)
  1738. goto out;
  1739. l2t_release(h->rdev.t3cdev_p, ep->l2t);
  1740. fail4:
  1741. dst_release(ep->dst);
  1742. fail3:
  1743. cxgb3_free_atid(ep->com.tdev, ep->atid);
  1744. fail2:
  1745. cm_id->rem_ref(cm_id);
  1746. put_ep(&ep->com);
  1747. out:
  1748. return err;
  1749. }
  1750. int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
  1751. {
  1752. int err = 0;
  1753. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1754. struct iwch_listen_ep *ep;
  1755. might_sleep();
  1756. if (cm_id->m_local_addr.ss_family != PF_INET) {
  1757. err = -ENOSYS;
  1758. goto fail1;
  1759. }
  1760. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1761. if (!ep) {
  1762. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  1763. err = -ENOMEM;
  1764. goto fail1;
  1765. }
  1766. PDBG("%s ep %p\n", __func__, ep);
  1767. ep->com.tdev = h->rdev.t3cdev_p;
  1768. cm_id->add_ref(cm_id);
  1769. ep->com.cm_id = cm_id;
  1770. ep->backlog = backlog;
  1771. memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
  1772. sizeof(ep->com.local_addr));
  1773. /*
  1774. * Allocate a server TID.
  1775. */
  1776. ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
  1777. if (ep->stid == -1) {
  1778. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  1779. err = -ENOMEM;
  1780. goto fail2;
  1781. }
  1782. state_set(&ep->com, LISTEN);
  1783. err = listen_start(ep);
  1784. if (err)
  1785. goto fail3;
  1786. /* wait for pass_open_rpl */
  1787. wait_event(ep->com.waitq, ep->com.rpl_done);
  1788. err = ep->com.rpl_err;
  1789. if (!err) {
  1790. cm_id->provider_data = ep;
  1791. goto out;
  1792. }
  1793. fail3:
  1794. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1795. fail2:
  1796. cm_id->rem_ref(cm_id);
  1797. put_ep(&ep->com);
  1798. fail1:
  1799. out:
  1800. return err;
  1801. }
  1802. int iwch_destroy_listen(struct iw_cm_id *cm_id)
  1803. {
  1804. int err;
  1805. struct iwch_listen_ep *ep = to_listen_ep(cm_id);
  1806. PDBG("%s ep %p\n", __func__, ep);
  1807. might_sleep();
  1808. state_set(&ep->com, DEAD);
  1809. ep->com.rpl_done = 0;
  1810. ep->com.rpl_err = 0;
  1811. err = listen_stop(ep);
  1812. if (err)
  1813. goto done;
  1814. wait_event(ep->com.waitq, ep->com.rpl_done);
  1815. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1816. done:
  1817. err = ep->com.rpl_err;
  1818. cm_id->rem_ref(cm_id);
  1819. put_ep(&ep->com);
  1820. return err;
  1821. }
  1822. int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
  1823. {
  1824. int ret=0;
  1825. unsigned long flags;
  1826. int close = 0;
  1827. int fatal = 0;
  1828. struct t3cdev *tdev;
  1829. struct cxio_rdev *rdev;
  1830. spin_lock_irqsave(&ep->com.lock, flags);
  1831. PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
  1832. states[ep->com.state], abrupt);
  1833. tdev = (struct t3cdev *)ep->com.tdev;
  1834. rdev = (struct cxio_rdev *)tdev->ulp;
  1835. if (cxio_fatal_error(rdev)) {
  1836. fatal = 1;
  1837. close_complete_upcall(ep);
  1838. ep->com.state = DEAD;
  1839. }
  1840. switch (ep->com.state) {
  1841. case MPA_REQ_WAIT:
  1842. case MPA_REQ_SENT:
  1843. case MPA_REQ_RCVD:
  1844. case MPA_REP_SENT:
  1845. case FPDU_MODE:
  1846. close = 1;
  1847. if (abrupt)
  1848. ep->com.state = ABORTING;
  1849. else {
  1850. ep->com.state = CLOSING;
  1851. start_ep_timer(ep);
  1852. }
  1853. set_bit(CLOSE_SENT, &ep->com.flags);
  1854. break;
  1855. case CLOSING:
  1856. if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
  1857. close = 1;
  1858. if (abrupt) {
  1859. stop_ep_timer(ep);
  1860. ep->com.state = ABORTING;
  1861. } else
  1862. ep->com.state = MORIBUND;
  1863. }
  1864. break;
  1865. case MORIBUND:
  1866. case ABORTING:
  1867. case DEAD:
  1868. PDBG("%s ignoring disconnect ep %p state %u\n",
  1869. __func__, ep, ep->com.state);
  1870. break;
  1871. default:
  1872. BUG();
  1873. break;
  1874. }
  1875. spin_unlock_irqrestore(&ep->com.lock, flags);
  1876. if (close) {
  1877. if (abrupt)
  1878. ret = send_abort(ep, NULL, gfp);
  1879. else
  1880. ret = send_halfclose(ep, gfp);
  1881. if (ret)
  1882. fatal = 1;
  1883. }
  1884. if (fatal)
  1885. release_ep_resources(ep);
  1886. return ret;
  1887. }
  1888. int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
  1889. struct l2t_entry *l2t)
  1890. {
  1891. struct iwch_ep *ep = ctx;
  1892. if (ep->dst != old)
  1893. return 0;
  1894. PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
  1895. l2t);
  1896. dst_hold(new);
  1897. l2t_release(ep->com.tdev, ep->l2t);
  1898. ep->l2t = l2t;
  1899. dst_release(old);
  1900. ep->dst = new;
  1901. return 1;
  1902. }
  1903. /*
  1904. * All the CM events are handled on a work queue to have a safe context.
  1905. * These are the real handlers that are called from the work queue.
  1906. */
  1907. static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
  1908. [CPL_ACT_ESTABLISH] = act_establish,
  1909. [CPL_ACT_OPEN_RPL] = act_open_rpl,
  1910. [CPL_RX_DATA] = rx_data,
  1911. [CPL_TX_DMA_ACK] = tx_ack,
  1912. [CPL_ABORT_RPL_RSS] = abort_rpl,
  1913. [CPL_ABORT_RPL] = abort_rpl,
  1914. [CPL_PASS_OPEN_RPL] = pass_open_rpl,
  1915. [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
  1916. [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
  1917. [CPL_PASS_ESTABLISH] = pass_establish,
  1918. [CPL_PEER_CLOSE] = peer_close,
  1919. [CPL_ABORT_REQ_RSS] = peer_abort,
  1920. [CPL_CLOSE_CON_RPL] = close_con_rpl,
  1921. [CPL_RDMA_TERMINATE] = terminate,
  1922. [CPL_RDMA_EC_STATUS] = ec_status,
  1923. };
  1924. static void process_work(struct work_struct *work)
  1925. {
  1926. struct sk_buff *skb = NULL;
  1927. void *ep;
  1928. struct t3cdev *tdev;
  1929. int ret;
  1930. while ((skb = skb_dequeue(&rxq))) {
  1931. ep = *((void **) (skb->cb));
  1932. tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
  1933. ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
  1934. if (ret & CPL_RET_BUF_DONE)
  1935. kfree_skb(skb);
  1936. /*
  1937. * ep was referenced in sched(), and is freed here.
  1938. */
  1939. put_ep((struct iwch_ep_common *)ep);
  1940. }
  1941. }
  1942. static DECLARE_WORK(skb_work, process_work);
  1943. static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1944. {
  1945. struct iwch_ep_common *epc = ctx;
  1946. get_ep(epc);
  1947. /*
  1948. * Save ctx and tdev in the skb->cb area.
  1949. */
  1950. *((void **) skb->cb) = ctx;
  1951. *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
  1952. /*
  1953. * Queue the skb and schedule the worker thread.
  1954. */
  1955. skb_queue_tail(&rxq, skb);
  1956. queue_work(workq, &skb_work);
  1957. return 0;
  1958. }
  1959. static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1960. {
  1961. struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
  1962. if (rpl->status != CPL_ERR_NONE) {
  1963. printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
  1964. "for tid %u\n", rpl->status, GET_TID(rpl));
  1965. }
  1966. return CPL_RET_BUF_DONE;
  1967. }
  1968. /*
  1969. * All upcalls from the T3 Core go to sched() to schedule the
  1970. * processing on a work queue.
  1971. */
  1972. cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
  1973. [CPL_ACT_ESTABLISH] = sched,
  1974. [CPL_ACT_OPEN_RPL] = sched,
  1975. [CPL_RX_DATA] = sched,
  1976. [CPL_TX_DMA_ACK] = sched,
  1977. [CPL_ABORT_RPL_RSS] = sched,
  1978. [CPL_ABORT_RPL] = sched,
  1979. [CPL_PASS_OPEN_RPL] = sched,
  1980. [CPL_CLOSE_LISTSRV_RPL] = sched,
  1981. [CPL_PASS_ACCEPT_REQ] = sched,
  1982. [CPL_PASS_ESTABLISH] = sched,
  1983. [CPL_PEER_CLOSE] = sched,
  1984. [CPL_CLOSE_CON_RPL] = sched,
  1985. [CPL_ABORT_REQ_RSS] = sched,
  1986. [CPL_RDMA_TERMINATE] = sched,
  1987. [CPL_RDMA_EC_STATUS] = sched,
  1988. [CPL_SET_TCB_RPL] = set_tcb_rpl,
  1989. };
  1990. int __init iwch_cm_init(void)
  1991. {
  1992. skb_queue_head_init(&rxq);
  1993. workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
  1994. if (!workq)
  1995. return -ENOMEM;
  1996. return 0;
  1997. }
  1998. void __exit iwch_cm_term(void)
  1999. {
  2000. flush_workqueue(workq);
  2001. destroy_workqueue(workq);
  2002. }