bnx2fc_els.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. /*
  2. * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
  3. * This file contains helper routines that handle ELS requests
  4. * and responses.
  5. *
  6. * Copyright (c) 2008 - 2011 Broadcom Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. *
  12. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  13. */
  14. #include "bnx2fc.h"
  15. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  16. void *arg);
  17. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  18. void *arg);
  19. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  20. void *data, u32 data_len,
  21. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  22. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
  23. static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
  24. {
  25. struct bnx2fc_cmd *orig_io_req;
  26. struct bnx2fc_cmd *rrq_req;
  27. int rc = 0;
  28. BUG_ON(!cb_arg);
  29. rrq_req = cb_arg->io_req;
  30. orig_io_req = cb_arg->aborted_io_req;
  31. BUG_ON(!orig_io_req);
  32. BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
  33. orig_io_req->xid, rrq_req->xid);
  34. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  35. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
  36. /*
  37. * els req is timed out. cleanup the IO with FW and
  38. * drop the completion. Remove from active_cmd_queue.
  39. */
  40. BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
  41. rrq_req->xid);
  42. if (rrq_req->on_active_queue) {
  43. list_del_init(&rrq_req->link);
  44. rrq_req->on_active_queue = 0;
  45. rc = bnx2fc_initiate_cleanup(rrq_req);
  46. BUG_ON(rc);
  47. }
  48. }
  49. kfree(cb_arg);
  50. }
  51. int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
  52. {
  53. struct fc_els_rrq rrq;
  54. struct bnx2fc_rport *tgt = aborted_io_req->tgt;
  55. struct fc_lport *lport = tgt->rdata->local_port;
  56. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  57. u32 sid = tgt->sid;
  58. u32 r_a_tov = lport->r_a_tov;
  59. unsigned long start = jiffies;
  60. int rc;
  61. BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
  62. aborted_io_req->xid);
  63. memset(&rrq, 0, sizeof(rrq));
  64. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
  65. if (!cb_arg) {
  66. printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
  67. rc = -ENOMEM;
  68. goto rrq_err;
  69. }
  70. cb_arg->aborted_io_req = aborted_io_req;
  71. rrq.rrq_cmd = ELS_RRQ;
  72. hton24(rrq.rrq_s_id, sid);
  73. rrq.rrq_ox_id = htons(aborted_io_req->xid);
  74. rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
  75. retry_rrq:
  76. rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
  77. bnx2fc_rrq_compl, cb_arg,
  78. r_a_tov);
  79. if (rc == -ENOMEM) {
  80. if (time_after(jiffies, start + (10 * HZ))) {
  81. BNX2FC_ELS_DBG("rrq Failed\n");
  82. rc = FAILED;
  83. goto rrq_err;
  84. }
  85. msleep(20);
  86. goto retry_rrq;
  87. }
  88. rrq_err:
  89. if (rc) {
  90. BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
  91. aborted_io_req->xid);
  92. kfree(cb_arg);
  93. spin_lock_bh(&tgt->tgt_lock);
  94. kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
  95. spin_unlock_bh(&tgt->tgt_lock);
  96. }
  97. return rc;
  98. }
  99. static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
  100. {
  101. struct bnx2fc_cmd *els_req;
  102. struct bnx2fc_rport *tgt;
  103. struct bnx2fc_mp_req *mp_req;
  104. struct fc_frame_header *fc_hdr;
  105. unsigned char *buf;
  106. void *resp_buf;
  107. u32 resp_len, hdr_len;
  108. u16 l2_oxid;
  109. int frame_len;
  110. int rc = 0;
  111. l2_oxid = cb_arg->l2_oxid;
  112. BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
  113. els_req = cb_arg->io_req;
  114. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
  115. /*
  116. * els req is timed out. cleanup the IO with FW and
  117. * drop the completion. libfc will handle the els timeout
  118. */
  119. if (els_req->on_active_queue) {
  120. list_del_init(&els_req->link);
  121. els_req->on_active_queue = 0;
  122. rc = bnx2fc_initiate_cleanup(els_req);
  123. BUG_ON(rc);
  124. }
  125. goto free_arg;
  126. }
  127. tgt = els_req->tgt;
  128. mp_req = &(els_req->mp_req);
  129. fc_hdr = &(mp_req->resp_fc_hdr);
  130. resp_len = mp_req->resp_len;
  131. resp_buf = mp_req->resp_buf;
  132. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  133. if (!buf) {
  134. printk(KERN_ERR PFX "Unable to alloc mp buf\n");
  135. goto free_arg;
  136. }
  137. hdr_len = sizeof(*fc_hdr);
  138. if (hdr_len + resp_len > PAGE_SIZE) {
  139. printk(KERN_ERR PFX "l2_els_compl: resp len is "
  140. "beyond page size\n");
  141. goto free_buf;
  142. }
  143. memcpy(buf, fc_hdr, hdr_len);
  144. memcpy(buf + hdr_len, resp_buf, resp_len);
  145. frame_len = hdr_len + resp_len;
  146. bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
  147. free_buf:
  148. kfree(buf);
  149. free_arg:
  150. kfree(cb_arg);
  151. }
  152. int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  153. {
  154. struct fc_els_adisc *adisc;
  155. struct fc_frame_header *fh;
  156. struct bnx2fc_els_cb_arg *cb_arg;
  157. struct fc_lport *lport = tgt->rdata->local_port;
  158. u32 r_a_tov = lport->r_a_tov;
  159. int rc;
  160. fh = fc_frame_header_get(fp);
  161. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  162. if (!cb_arg) {
  163. printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
  164. return -ENOMEM;
  165. }
  166. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  167. BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  168. adisc = fc_frame_payload_get(fp, sizeof(*adisc));
  169. /* adisc is initialized by libfc */
  170. rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
  171. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  172. if (rc)
  173. kfree(cb_arg);
  174. return rc;
  175. }
  176. int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  177. {
  178. struct fc_els_logo *logo;
  179. struct fc_frame_header *fh;
  180. struct bnx2fc_els_cb_arg *cb_arg;
  181. struct fc_lport *lport = tgt->rdata->local_port;
  182. u32 r_a_tov = lport->r_a_tov;
  183. int rc;
  184. fh = fc_frame_header_get(fp);
  185. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  186. if (!cb_arg) {
  187. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  188. return -ENOMEM;
  189. }
  190. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  191. BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  192. logo = fc_frame_payload_get(fp, sizeof(*logo));
  193. /* logo is initialized by libfc */
  194. rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
  195. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  196. if (rc)
  197. kfree(cb_arg);
  198. return rc;
  199. }
  200. int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  201. {
  202. struct fc_els_rls *rls;
  203. struct fc_frame_header *fh;
  204. struct bnx2fc_els_cb_arg *cb_arg;
  205. struct fc_lport *lport = tgt->rdata->local_port;
  206. u32 r_a_tov = lport->r_a_tov;
  207. int rc;
  208. fh = fc_frame_header_get(fp);
  209. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  210. if (!cb_arg) {
  211. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  212. return -ENOMEM;
  213. }
  214. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  215. rls = fc_frame_payload_get(fp, sizeof(*rls));
  216. /* rls is initialized by libfc */
  217. rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
  218. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  219. if (rc)
  220. kfree(cb_arg);
  221. return rc;
  222. }
  223. void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
  224. {
  225. struct bnx2fc_mp_req *mp_req;
  226. struct fc_frame_header *fc_hdr, *fh;
  227. struct bnx2fc_cmd *srr_req;
  228. struct bnx2fc_cmd *orig_io_req;
  229. struct fc_frame *fp;
  230. unsigned char *buf;
  231. void *resp_buf;
  232. u32 resp_len, hdr_len;
  233. u8 opcode;
  234. int rc = 0;
  235. orig_io_req = cb_arg->aborted_io_req;
  236. srr_req = cb_arg->io_req;
  237. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
  238. /* SRR timedout */
  239. BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
  240. "orig_io - 0x%x\n",
  241. orig_io_req->xid);
  242. rc = bnx2fc_initiate_abts(srr_req);
  243. if (rc != SUCCESS) {
  244. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  245. "failed. issue cleanup\n");
  246. bnx2fc_initiate_cleanup(srr_req);
  247. }
  248. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
  249. test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  250. BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
  251. orig_io_req->xid, orig_io_req->req_flags);
  252. goto srr_compl_done;
  253. }
  254. orig_io_req->srr_retry++;
  255. if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
  256. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  257. spin_unlock_bh(&tgt->tgt_lock);
  258. rc = bnx2fc_send_srr(orig_io_req,
  259. orig_io_req->srr_offset,
  260. orig_io_req->srr_rctl);
  261. spin_lock_bh(&tgt->tgt_lock);
  262. if (!rc)
  263. goto srr_compl_done;
  264. }
  265. rc = bnx2fc_initiate_abts(orig_io_req);
  266. if (rc != SUCCESS) {
  267. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  268. "failed xid = 0x%x. issue cleanup\n",
  269. orig_io_req->xid);
  270. bnx2fc_initiate_cleanup(orig_io_req);
  271. }
  272. goto srr_compl_done;
  273. }
  274. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
  275. test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  276. BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
  277. orig_io_req->xid, orig_io_req->req_flags);
  278. goto srr_compl_done;
  279. }
  280. mp_req = &(srr_req->mp_req);
  281. fc_hdr = &(mp_req->resp_fc_hdr);
  282. resp_len = mp_req->resp_len;
  283. resp_buf = mp_req->resp_buf;
  284. hdr_len = sizeof(*fc_hdr);
  285. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  286. if (!buf) {
  287. printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
  288. goto srr_compl_done;
  289. }
  290. memcpy(buf, fc_hdr, hdr_len);
  291. memcpy(buf + hdr_len, resp_buf, resp_len);
  292. fp = fc_frame_alloc(NULL, resp_len);
  293. if (!fp) {
  294. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  295. goto free_buf;
  296. }
  297. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  298. /* Copy FC Frame header and payload into the frame */
  299. memcpy(fh, buf, hdr_len + resp_len);
  300. opcode = fc_frame_payload_op(fp);
  301. switch (opcode) {
  302. case ELS_LS_ACC:
  303. BNX2FC_IO_DBG(srr_req, "SRR success\n");
  304. break;
  305. case ELS_LS_RJT:
  306. BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
  307. rc = bnx2fc_initiate_abts(orig_io_req);
  308. if (rc != SUCCESS) {
  309. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  310. "failed xid = 0x%x. issue cleanup\n",
  311. orig_io_req->xid);
  312. bnx2fc_initiate_cleanup(orig_io_req);
  313. }
  314. break;
  315. default:
  316. BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
  317. opcode);
  318. break;
  319. }
  320. fc_frame_free(fp);
  321. free_buf:
  322. kfree(buf);
  323. srr_compl_done:
  324. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  325. }
  326. void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
  327. {
  328. struct bnx2fc_cmd *orig_io_req, *new_io_req;
  329. struct bnx2fc_cmd *rec_req;
  330. struct bnx2fc_mp_req *mp_req;
  331. struct fc_frame_header *fc_hdr, *fh;
  332. struct fc_els_ls_rjt *rjt;
  333. struct fc_els_rec_acc *acc;
  334. struct bnx2fc_rport *tgt;
  335. struct fcoe_err_report_entry *err_entry;
  336. struct scsi_cmnd *sc_cmd;
  337. enum fc_rctl r_ctl;
  338. unsigned char *buf;
  339. void *resp_buf;
  340. struct fc_frame *fp;
  341. u8 opcode;
  342. u32 offset;
  343. u32 e_stat;
  344. u32 resp_len, hdr_len;
  345. int rc = 0;
  346. bool send_seq_clnp = false;
  347. bool abort_io = false;
  348. BNX2FC_MISC_DBG("Entered rec_compl callback\n");
  349. rec_req = cb_arg->io_req;
  350. orig_io_req = cb_arg->aborted_io_req;
  351. BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
  352. tgt = orig_io_req->tgt;
  353. /* Handle REC timeout case */
  354. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
  355. BNX2FC_IO_DBG(rec_req, "timed out, abort "
  356. "orig_io - 0x%x\n",
  357. orig_io_req->xid);
  358. /* els req is timed out. send abts for els */
  359. rc = bnx2fc_initiate_abts(rec_req);
  360. if (rc != SUCCESS) {
  361. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  362. "failed. issue cleanup\n");
  363. bnx2fc_initiate_cleanup(rec_req);
  364. }
  365. orig_io_req->rec_retry++;
  366. /* REC timedout. send ABTS to the orig IO req */
  367. if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
  368. spin_unlock_bh(&tgt->tgt_lock);
  369. rc = bnx2fc_send_rec(orig_io_req);
  370. spin_lock_bh(&tgt->tgt_lock);
  371. if (!rc)
  372. goto rec_compl_done;
  373. }
  374. rc = bnx2fc_initiate_abts(orig_io_req);
  375. if (rc != SUCCESS) {
  376. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  377. "failed xid = 0x%x. issue cleanup\n",
  378. orig_io_req->xid);
  379. bnx2fc_initiate_cleanup(orig_io_req);
  380. }
  381. goto rec_compl_done;
  382. }
  383. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
  384. BNX2FC_IO_DBG(rec_req, "completed"
  385. "orig_io - 0x%x\n",
  386. orig_io_req->xid);
  387. goto rec_compl_done;
  388. }
  389. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  390. BNX2FC_IO_DBG(rec_req, "abts in prog "
  391. "orig_io - 0x%x\n",
  392. orig_io_req->xid);
  393. goto rec_compl_done;
  394. }
  395. mp_req = &(rec_req->mp_req);
  396. fc_hdr = &(mp_req->resp_fc_hdr);
  397. resp_len = mp_req->resp_len;
  398. acc = resp_buf = mp_req->resp_buf;
  399. hdr_len = sizeof(*fc_hdr);
  400. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  401. if (!buf) {
  402. printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
  403. goto rec_compl_done;
  404. }
  405. memcpy(buf, fc_hdr, hdr_len);
  406. memcpy(buf + hdr_len, resp_buf, resp_len);
  407. fp = fc_frame_alloc(NULL, resp_len);
  408. if (!fp) {
  409. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  410. goto free_buf;
  411. }
  412. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  413. /* Copy FC Frame header and payload into the frame */
  414. memcpy(fh, buf, hdr_len + resp_len);
  415. opcode = fc_frame_payload_op(fp);
  416. if (opcode == ELS_LS_RJT) {
  417. BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
  418. rjt = fc_frame_payload_get(fp, sizeof(*rjt));
  419. if ((rjt->er_reason == ELS_RJT_LOGIC ||
  420. rjt->er_reason == ELS_RJT_UNAB) &&
  421. rjt->er_explan == ELS_EXPL_OXID_RXID) {
  422. BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
  423. new_io_req = bnx2fc_cmd_alloc(tgt);
  424. if (!new_io_req)
  425. goto abort_io;
  426. new_io_req->sc_cmd = orig_io_req->sc_cmd;
  427. /* cleanup orig_io_req that is with the FW */
  428. set_bit(BNX2FC_FLAG_CMD_LOST,
  429. &orig_io_req->req_flags);
  430. bnx2fc_initiate_cleanup(orig_io_req);
  431. /* Post a new IO req with the same sc_cmd */
  432. BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
  433. spin_unlock_bh(&tgt->tgt_lock);
  434. rc = bnx2fc_post_io_req(tgt, new_io_req);
  435. spin_lock_bh(&tgt->tgt_lock);
  436. if (!rc)
  437. goto free_frame;
  438. BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
  439. }
  440. abort_io:
  441. rc = bnx2fc_initiate_abts(orig_io_req);
  442. if (rc != SUCCESS) {
  443. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  444. "failed. issue cleanup\n");
  445. bnx2fc_initiate_cleanup(orig_io_req);
  446. }
  447. } else if (opcode == ELS_LS_ACC) {
  448. /* REVISIT: Check if the exchange is already aborted */
  449. offset = ntohl(acc->reca_fc4value);
  450. e_stat = ntohl(acc->reca_e_stat);
  451. if (e_stat & ESB_ST_SEQ_INIT) {
  452. BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
  453. goto free_frame;
  454. }
  455. BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
  456. e_stat, offset);
  457. /* Seq initiative is with us */
  458. err_entry = (struct fcoe_err_report_entry *)
  459. &orig_io_req->err_entry;
  460. sc_cmd = orig_io_req->sc_cmd;
  461. if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  462. /* SCSI WRITE command */
  463. if (offset == orig_io_req->data_xfer_len) {
  464. BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
  465. /* FCP_RSP lost */
  466. r_ctl = FC_RCTL_DD_CMD_STATUS;
  467. offset = 0;
  468. } else {
  469. /* start transmitting from offset */
  470. BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
  471. send_seq_clnp = true;
  472. r_ctl = FC_RCTL_DD_DATA_DESC;
  473. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  474. offset, r_ctl))
  475. abort_io = true;
  476. /* XFER_RDY */
  477. }
  478. } else {
  479. /* SCSI READ command */
  480. if (err_entry->data.rx_buf_off ==
  481. orig_io_req->data_xfer_len) {
  482. /* FCP_RSP lost */
  483. BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
  484. r_ctl = FC_RCTL_DD_CMD_STATUS;
  485. offset = 0;
  486. } else {
  487. /* request retransmission from this offset */
  488. send_seq_clnp = true;
  489. offset = err_entry->data.rx_buf_off;
  490. BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
  491. /* FCP_DATA lost */
  492. r_ctl = FC_RCTL_DD_SOL_DATA;
  493. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  494. offset, r_ctl))
  495. abort_io = true;
  496. }
  497. }
  498. if (abort_io) {
  499. rc = bnx2fc_initiate_abts(orig_io_req);
  500. if (rc != SUCCESS) {
  501. BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
  502. " failed. issue cleanup\n");
  503. bnx2fc_initiate_cleanup(orig_io_req);
  504. }
  505. } else if (!send_seq_clnp) {
  506. BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
  507. spin_unlock_bh(&tgt->tgt_lock);
  508. rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
  509. spin_lock_bh(&tgt->tgt_lock);
  510. if (rc) {
  511. BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
  512. " IO will abort\n");
  513. }
  514. }
  515. }
  516. free_frame:
  517. fc_frame_free(fp);
  518. free_buf:
  519. kfree(buf);
  520. rec_compl_done:
  521. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  522. kfree(cb_arg);
  523. }
  524. int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
  525. {
  526. struct fc_els_rec rec;
  527. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  528. struct fc_lport *lport = tgt->rdata->local_port;
  529. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  530. u32 sid = tgt->sid;
  531. u32 r_a_tov = lport->r_a_tov;
  532. int rc;
  533. BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
  534. memset(&rec, 0, sizeof(rec));
  535. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  536. if (!cb_arg) {
  537. printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
  538. rc = -ENOMEM;
  539. goto rec_err;
  540. }
  541. kref_get(&orig_io_req->refcount);
  542. cb_arg->aborted_io_req = orig_io_req;
  543. rec.rec_cmd = ELS_REC;
  544. hton24(rec.rec_s_id, sid);
  545. rec.rec_ox_id = htons(orig_io_req->xid);
  546. rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  547. rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
  548. bnx2fc_rec_compl, cb_arg,
  549. r_a_tov);
  550. rec_err:
  551. if (rc) {
  552. BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
  553. spin_lock_bh(&tgt->tgt_lock);
  554. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  555. spin_unlock_bh(&tgt->tgt_lock);
  556. kfree(cb_arg);
  557. }
  558. return rc;
  559. }
  560. int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
  561. {
  562. struct fcp_srr srr;
  563. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  564. struct fc_lport *lport = tgt->rdata->local_port;
  565. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  566. u32 r_a_tov = lport->r_a_tov;
  567. int rc;
  568. BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
  569. memset(&srr, 0, sizeof(srr));
  570. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  571. if (!cb_arg) {
  572. printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
  573. rc = -ENOMEM;
  574. goto srr_err;
  575. }
  576. kref_get(&orig_io_req->refcount);
  577. cb_arg->aborted_io_req = orig_io_req;
  578. srr.srr_op = ELS_SRR;
  579. srr.srr_ox_id = htons(orig_io_req->xid);
  580. srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  581. srr.srr_rel_off = htonl(offset);
  582. srr.srr_r_ctl = r_ctl;
  583. orig_io_req->srr_offset = offset;
  584. orig_io_req->srr_rctl = r_ctl;
  585. rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
  586. bnx2fc_srr_compl, cb_arg,
  587. r_a_tov);
  588. srr_err:
  589. if (rc) {
  590. BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
  591. spin_lock_bh(&tgt->tgt_lock);
  592. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  593. spin_unlock_bh(&tgt->tgt_lock);
  594. kfree(cb_arg);
  595. } else
  596. set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
  597. return rc;
  598. }
  599. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  600. void *data, u32 data_len,
  601. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  602. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
  603. {
  604. struct fcoe_port *port = tgt->port;
  605. struct bnx2fc_interface *interface = port->priv;
  606. struct fc_rport *rport = tgt->rport;
  607. struct fc_lport *lport = port->lport;
  608. struct bnx2fc_cmd *els_req;
  609. struct bnx2fc_mp_req *mp_req;
  610. struct fc_frame_header *fc_hdr;
  611. struct fcoe_task_ctx_entry *task;
  612. struct fcoe_task_ctx_entry *task_page;
  613. int rc = 0;
  614. int task_idx, index;
  615. u32 did, sid;
  616. u16 xid;
  617. rc = fc_remote_port_chkready(rport);
  618. if (rc) {
  619. printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
  620. rc = -EINVAL;
  621. goto els_err;
  622. }
  623. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  624. printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
  625. rc = -EINVAL;
  626. goto els_err;
  627. }
  628. if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
  629. (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
  630. printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
  631. rc = -EINVAL;
  632. goto els_err;
  633. }
  634. els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
  635. if (!els_req) {
  636. rc = -ENOMEM;
  637. goto els_err;
  638. }
  639. els_req->sc_cmd = NULL;
  640. els_req->port = port;
  641. els_req->tgt = tgt;
  642. els_req->cb_func = cb_func;
  643. cb_arg->io_req = els_req;
  644. els_req->cb_arg = cb_arg;
  645. mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
  646. rc = bnx2fc_init_mp_req(els_req);
  647. if (rc == FAILED) {
  648. printk(KERN_ERR PFX "ELS MP request init failed\n");
  649. spin_lock_bh(&tgt->tgt_lock);
  650. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  651. spin_unlock_bh(&tgt->tgt_lock);
  652. rc = -ENOMEM;
  653. goto els_err;
  654. } else {
  655. /* rc SUCCESS */
  656. rc = 0;
  657. }
  658. /* Set the data_xfer_len to the size of ELS payload */
  659. mp_req->req_len = data_len;
  660. els_req->data_xfer_len = mp_req->req_len;
  661. /* Fill ELS Payload */
  662. if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
  663. memcpy(mp_req->req_buf, data, data_len);
  664. } else {
  665. printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
  666. els_req->cb_func = NULL;
  667. els_req->cb_arg = NULL;
  668. spin_lock_bh(&tgt->tgt_lock);
  669. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  670. spin_unlock_bh(&tgt->tgt_lock);
  671. rc = -EINVAL;
  672. }
  673. if (rc)
  674. goto els_err;
  675. /* Fill FC header */
  676. fc_hdr = &(mp_req->req_fc_hdr);
  677. did = tgt->rport->port_id;
  678. sid = tgt->sid;
  679. if (op == ELS_SRR)
  680. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
  681. FC_TYPE_FCP, FC_FC_FIRST_SEQ |
  682. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  683. else
  684. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
  685. FC_TYPE_ELS, FC_FC_FIRST_SEQ |
  686. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  687. /* Obtain exchange id */
  688. xid = els_req->xid;
  689. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  690. index = xid % BNX2FC_TASKS_PER_PAGE;
  691. /* Initialize task context for this IO request */
  692. task_page = (struct fcoe_task_ctx_entry *)
  693. interface->hba->task_ctx[task_idx];
  694. task = &(task_page[index]);
  695. bnx2fc_init_mp_task(els_req, task);
  696. spin_lock_bh(&tgt->tgt_lock);
  697. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
  698. printk(KERN_ERR PFX "initiate_els.. session not ready\n");
  699. els_req->cb_func = NULL;
  700. els_req->cb_arg = NULL;
  701. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  702. spin_unlock_bh(&tgt->tgt_lock);
  703. return -EINVAL;
  704. }
  705. if (timer_msec)
  706. bnx2fc_cmd_timer_set(els_req, timer_msec);
  707. bnx2fc_add_2_sq(tgt, xid);
  708. els_req->on_active_queue = 1;
  709. list_add_tail(&els_req->link, &tgt->els_queue);
  710. /* Ring doorbell */
  711. bnx2fc_ring_doorbell(tgt);
  712. spin_unlock_bh(&tgt->tgt_lock);
  713. els_err:
  714. return rc;
  715. }
  716. void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
  717. struct fcoe_task_ctx_entry *task, u8 num_rq)
  718. {
  719. struct bnx2fc_mp_req *mp_req;
  720. struct fc_frame_header *fc_hdr;
  721. u64 *hdr;
  722. u64 *temp_hdr;
  723. BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
  724. "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
  725. if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
  726. &els_req->req_flags)) {
  727. BNX2FC_ELS_DBG("Timer context finished processing this "
  728. "els - 0x%x\n", els_req->xid);
  729. /* This IO doesn't receive cleanup completion */
  730. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  731. return;
  732. }
  733. /* Cancel the timeout_work, as we received the response */
  734. if (cancel_delayed_work(&els_req->timeout_work))
  735. kref_put(&els_req->refcount,
  736. bnx2fc_cmd_release); /* drop timer hold */
  737. if (els_req->on_active_queue) {
  738. list_del_init(&els_req->link);
  739. els_req->on_active_queue = 0;
  740. }
  741. mp_req = &(els_req->mp_req);
  742. fc_hdr = &(mp_req->resp_fc_hdr);
  743. hdr = (u64 *)fc_hdr;
  744. temp_hdr = (u64 *)
  745. &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
  746. hdr[0] = cpu_to_be64(temp_hdr[0]);
  747. hdr[1] = cpu_to_be64(temp_hdr[1]);
  748. hdr[2] = cpu_to_be64(temp_hdr[2]);
  749. mp_req->resp_len =
  750. task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
  751. /* Parse ELS response */
  752. if ((els_req->cb_func) && (els_req->cb_arg)) {
  753. els_req->cb_func(els_req->cb_arg);
  754. els_req->cb_arg = NULL;
  755. }
  756. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  757. }
  758. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  759. void *arg)
  760. {
  761. struct fcoe_ctlr *fip = arg;
  762. struct fc_exch *exch = fc_seq_exch(seq);
  763. struct fc_lport *lport = exch->lp;
  764. u8 *mac;
  765. struct fc_frame_header *fh;
  766. u8 op;
  767. if (IS_ERR(fp))
  768. goto done;
  769. mac = fr_cb(fp)->granted_mac;
  770. if (is_zero_ether_addr(mac)) {
  771. fh = fc_frame_header_get(fp);
  772. if (fh->fh_type != FC_TYPE_ELS) {
  773. printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
  774. "fh_type != FC_TYPE_ELS\n");
  775. fc_frame_free(fp);
  776. return;
  777. }
  778. op = fc_frame_payload_op(fp);
  779. if (lport->vport) {
  780. if (op == ELS_LS_RJT) {
  781. printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
  782. fc_vport_terminate(lport->vport);
  783. fc_frame_free(fp);
  784. return;
  785. }
  786. }
  787. if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
  788. fc_frame_free(fp);
  789. return;
  790. }
  791. }
  792. fip->update_mac(lport, mac);
  793. done:
  794. fc_lport_flogi_resp(seq, fp, lport);
  795. }
  796. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  797. void *arg)
  798. {
  799. struct fcoe_ctlr *fip = arg;
  800. struct fc_exch *exch = fc_seq_exch(seq);
  801. struct fc_lport *lport = exch->lp;
  802. static u8 zero_mac[ETH_ALEN] = { 0 };
  803. if (!IS_ERR(fp))
  804. fip->update_mac(lport, zero_mac);
  805. fc_lport_logo_resp(seq, fp, lport);
  806. }
  807. struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
  808. struct fc_frame *fp, unsigned int op,
  809. void (*resp)(struct fc_seq *,
  810. struct fc_frame *,
  811. void *),
  812. void *arg, u32 timeout)
  813. {
  814. struct fcoe_port *port = lport_priv(lport);
  815. struct bnx2fc_interface *interface = port->priv;
  816. struct fcoe_ctlr *fip = &interface->ctlr;
  817. struct fc_frame_header *fh = fc_frame_header_get(fp);
  818. switch (op) {
  819. case ELS_FLOGI:
  820. case ELS_FDISC:
  821. return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
  822. fip, timeout);
  823. case ELS_LOGO:
  824. /* only hook onto fabric logouts, not port logouts */
  825. if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
  826. break;
  827. return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
  828. fip, timeout);
  829. }
  830. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  831. }