be_cmds.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. /**
  2. * Copyright (C) 2005 - 2011 Emulex
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@emulex.com
  12. *
  13. * Emulex
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include "be.h"
  18. #include "be_mgmt.h"
  19. #include "be_main.h"
  20. int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
  21. {
  22. u32 sreset;
  23. u8 *pci_reset_offset = 0;
  24. u8 *pci_online0_offset = 0;
  25. u8 *pci_online1_offset = 0;
  26. u32 pconline0 = 0;
  27. u32 pconline1 = 0;
  28. u32 i;
  29. pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
  30. pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
  31. pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
  32. sreset = readl((void *)pci_reset_offset);
  33. sreset |= BE2_SET_RESET;
  34. writel(sreset, (void *)pci_reset_offset);
  35. i = 0;
  36. while (sreset & BE2_SET_RESET) {
  37. if (i > 64)
  38. break;
  39. msleep(100);
  40. sreset = readl((void *)pci_reset_offset);
  41. i++;
  42. }
  43. if (sreset & BE2_SET_RESET) {
  44. printk(KERN_ERR "Soft Reset did not deassert\n");
  45. return -EIO;
  46. }
  47. pconline1 = BE2_MPU_IRAM_ONLINE;
  48. writel(pconline0, (void *)pci_online0_offset);
  49. writel(pconline1, (void *)pci_online1_offset);
  50. sreset = BE2_SET_RESET;
  51. writel(sreset, (void *)pci_reset_offset);
  52. i = 0;
  53. while (sreset & BE2_SET_RESET) {
  54. if (i > 64)
  55. break;
  56. msleep(1);
  57. sreset = readl((void *)pci_reset_offset);
  58. i++;
  59. }
  60. if (sreset & BE2_SET_RESET) {
  61. printk(KERN_ERR "MPU Online Soft Reset did not deassert\n");
  62. return -EIO;
  63. }
  64. return 0;
  65. }
  66. int be_chk_reset_complete(struct beiscsi_hba *phba)
  67. {
  68. unsigned int num_loop;
  69. u8 *mpu_sem = 0;
  70. u32 status;
  71. num_loop = 1000;
  72. mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
  73. msleep(5000);
  74. while (num_loop) {
  75. status = readl((void *)mpu_sem);
  76. if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
  77. break;
  78. msleep(60);
  79. num_loop--;
  80. }
  81. if ((status & 0x80000000) || (!num_loop)) {
  82. printk(KERN_ERR "Failed in be_chk_reset_complete"
  83. "status = 0x%x\n", status);
  84. return -EIO;
  85. }
  86. return 0;
  87. }
  88. void be_mcc_notify(struct beiscsi_hba *phba)
  89. {
  90. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  91. u32 val = 0;
  92. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  93. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  94. iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
  95. }
  96. unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
  97. {
  98. unsigned int tag = 0;
  99. if (phba->ctrl.mcc_tag_available) {
  100. tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
  101. phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
  102. phba->ctrl.mcc_numtag[tag] = 0;
  103. }
  104. if (tag) {
  105. phba->ctrl.mcc_tag_available--;
  106. if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
  107. phba->ctrl.mcc_alloc_index = 0;
  108. else
  109. phba->ctrl.mcc_alloc_index++;
  110. }
  111. return tag;
  112. }
  113. void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
  114. {
  115. spin_lock(&ctrl->mbox_lock);
  116. tag = tag & 0x000000FF;
  117. ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
  118. if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
  119. ctrl->mcc_free_index = 0;
  120. else
  121. ctrl->mcc_free_index++;
  122. ctrl->mcc_tag_available++;
  123. spin_unlock(&ctrl->mbox_lock);
  124. }
  125. bool is_link_state_evt(u32 trailer)
  126. {
  127. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  128. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  129. ASYNC_EVENT_CODE_LINK_STATE);
  130. }
  131. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  132. {
  133. if (compl->flags != 0) {
  134. compl->flags = le32_to_cpu(compl->flags);
  135. WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  136. return true;
  137. } else
  138. return false;
  139. }
  140. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  141. {
  142. compl->flags = 0;
  143. }
  144. static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
  145. struct be_mcc_compl *compl)
  146. {
  147. u16 compl_status, extd_status;
  148. be_dws_le_to_cpu(compl, 4);
  149. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  150. CQE_STATUS_COMPL_MASK;
  151. if (compl_status != MCC_STATUS_SUCCESS) {
  152. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  153. CQE_STATUS_EXTD_MASK;
  154. dev_err(&ctrl->pdev->dev,
  155. "error in cmd completion: status(compl/extd)=%d/%d\n",
  156. compl_status, extd_status);
  157. return -EBUSY;
  158. }
  159. return 0;
  160. }
  161. int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
  162. struct be_mcc_compl *compl)
  163. {
  164. u16 compl_status, extd_status;
  165. unsigned short tag;
  166. be_dws_le_to_cpu(compl, 4);
  167. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  168. CQE_STATUS_COMPL_MASK;
  169. /* The ctrl.mcc_numtag[tag] is filled with
  170. * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
  171. * [7:0] = compl_status
  172. */
  173. tag = (compl->tag0 & 0x000000FF);
  174. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  175. CQE_STATUS_EXTD_MASK;
  176. ctrl->mcc_numtag[tag] = 0x80000000;
  177. ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
  178. ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
  179. ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
  180. wake_up_interruptible(&ctrl->mcc_wait[tag]);
  181. return 0;
  182. }
  183. static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
  184. {
  185. struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
  186. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  187. if (be_mcc_compl_is_new(compl)) {
  188. queue_tail_inc(mcc_cq);
  189. return compl;
  190. }
  191. return NULL;
  192. }
  193. static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
  194. {
  195. iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
  196. }
  197. void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
  198. struct be_async_event_link_state *evt)
  199. {
  200. switch (evt->port_link_status) {
  201. case ASYNC_EVENT_LINK_DOWN:
  202. SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
  203. evt->physical_port);
  204. phba->state |= BE_ADAPTER_LINK_DOWN;
  205. iscsi_host_for_each_session(phba->shost,
  206. be2iscsi_fail_session);
  207. break;
  208. case ASYNC_EVENT_LINK_UP:
  209. phba->state = BE_ADAPTER_UP;
  210. SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
  211. evt->physical_port);
  212. break;
  213. default:
  214. SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
  215. "Physical Port %d\n",
  216. evt->port_link_status,
  217. evt->physical_port);
  218. }
  219. }
  220. static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
  221. u16 num_popped)
  222. {
  223. u32 val = 0;
  224. val |= qid & DB_CQ_RING_ID_MASK;
  225. if (arm)
  226. val |= 1 << DB_CQ_REARM_SHIFT;
  227. val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
  228. iowrite32(val, phba->db_va + DB_CQ_OFFSET);
  229. }
  230. int beiscsi_process_mcc(struct beiscsi_hba *phba)
  231. {
  232. struct be_mcc_compl *compl;
  233. int num = 0, status = 0;
  234. struct be_ctrl_info *ctrl = &phba->ctrl;
  235. spin_lock_bh(&phba->ctrl.mcc_cq_lock);
  236. while ((compl = be_mcc_compl_get(phba))) {
  237. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  238. /* Interpret flags as an async trailer */
  239. if (is_link_state_evt(compl->flags))
  240. /* Interpret compl as a async link evt */
  241. beiscsi_async_link_state_process(phba,
  242. (struct be_async_event_link_state *) compl);
  243. else
  244. SE_DEBUG(DBG_LVL_1,
  245. " Unsupported Async Event, flags"
  246. " = 0x%08x\n", compl->flags);
  247. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  248. status = be_mcc_compl_process(ctrl, compl);
  249. atomic_dec(&phba->ctrl.mcc_obj.q.used);
  250. }
  251. be_mcc_compl_use(compl);
  252. num++;
  253. }
  254. if (num)
  255. beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
  256. spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
  257. return status;
  258. }
  259. /* Wait till no more pending mcc requests are present */
  260. static int be_mcc_wait_compl(struct beiscsi_hba *phba)
  261. {
  262. int i, status;
  263. for (i = 0; i < mcc_timeout; i++) {
  264. status = beiscsi_process_mcc(phba);
  265. if (status)
  266. return status;
  267. if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
  268. break;
  269. udelay(100);
  270. }
  271. if (i == mcc_timeout) {
  272. dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
  273. return -EBUSY;
  274. }
  275. return 0;
  276. }
  277. /* Notify MCC requests and wait for completion */
  278. int be_mcc_notify_wait(struct beiscsi_hba *phba)
  279. {
  280. be_mcc_notify(phba);
  281. return be_mcc_wait_compl(phba);
  282. }
  283. static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
  284. {
  285. #define long_delay 2000
  286. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  287. int cnt = 0, wait = 5; /* in usecs */
  288. u32 ready;
  289. do {
  290. ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
  291. if (ready)
  292. break;
  293. if (cnt > 12000000) {
  294. dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
  295. return -EBUSY;
  296. }
  297. if (cnt > 50) {
  298. wait = long_delay;
  299. mdelay(long_delay / 1000);
  300. } else
  301. udelay(wait);
  302. cnt += wait;
  303. } while (true);
  304. return 0;
  305. }
  306. int be_mbox_notify(struct be_ctrl_info *ctrl)
  307. {
  308. int status;
  309. u32 val = 0;
  310. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  311. struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
  312. struct be_mcc_mailbox *mbox = mbox_mem->va;
  313. struct be_mcc_compl *compl = &mbox->compl;
  314. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  315. val |= MPU_MAILBOX_DB_HI_MASK;
  316. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  317. iowrite32(val, db);
  318. status = be_mbox_db_ready_wait(ctrl);
  319. if (status != 0) {
  320. SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
  321. return status;
  322. }
  323. val = 0;
  324. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  325. val &= ~MPU_MAILBOX_DB_HI_MASK;
  326. val |= (u32) (mbox_mem->dma >> 4) << 2;
  327. iowrite32(val, db);
  328. status = be_mbox_db_ready_wait(ctrl);
  329. if (status != 0) {
  330. SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
  331. return status;
  332. }
  333. if (be_mcc_compl_is_new(compl)) {
  334. status = be_mcc_compl_process(ctrl, &mbox->compl);
  335. be_mcc_compl_use(compl);
  336. if (status) {
  337. SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
  338. return status;
  339. }
  340. } else {
  341. dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
  342. return -EBUSY;
  343. }
  344. return 0;
  345. }
  346. /*
  347. * Insert the mailbox address into the doorbell in two steps
  348. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  349. */
  350. static int be_mbox_notify_wait(struct beiscsi_hba *phba)
  351. {
  352. int status;
  353. u32 val = 0;
  354. void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
  355. struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
  356. struct be_mcc_mailbox *mbox = mbox_mem->va;
  357. struct be_mcc_compl *compl = &mbox->compl;
  358. struct be_ctrl_info *ctrl = &phba->ctrl;
  359. val |= MPU_MAILBOX_DB_HI_MASK;
  360. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  361. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  362. iowrite32(val, db);
  363. /* wait for ready to be set */
  364. status = be_mbox_db_ready_wait(ctrl);
  365. if (status != 0)
  366. return status;
  367. val = 0;
  368. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  369. val |= (u32)(mbox_mem->dma >> 4) << 2;
  370. iowrite32(val, db);
  371. status = be_mbox_db_ready_wait(ctrl);
  372. if (status != 0)
  373. return status;
  374. /* A cq entry has been made now */
  375. if (be_mcc_compl_is_new(compl)) {
  376. status = be_mcc_compl_process(ctrl, &mbox->compl);
  377. be_mcc_compl_use(compl);
  378. if (status)
  379. return status;
  380. } else {
  381. dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
  382. return -EBUSY;
  383. }
  384. return 0;
  385. }
  386. void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  387. bool embedded, u8 sge_cnt)
  388. {
  389. if (embedded)
  390. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  391. else
  392. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  393. MCC_WRB_SGE_CNT_SHIFT;
  394. wrb->payload_length = payload_len;
  395. be_dws_cpu_to_le(wrb, 8);
  396. }
  397. void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  398. u8 subsystem, u8 opcode, int cmd_len)
  399. {
  400. req_hdr->opcode = opcode;
  401. req_hdr->subsystem = subsystem;
  402. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  403. req_hdr->timeout = 120;
  404. }
  405. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  406. struct be_dma_mem *mem)
  407. {
  408. int i, buf_pages;
  409. u64 dma = (u64) mem->dma;
  410. buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  411. for (i = 0; i < buf_pages; i++) {
  412. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  413. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  414. dma += PAGE_SIZE_4K;
  415. }
  416. }
  417. static u32 eq_delay_to_mult(u32 usec_delay)
  418. {
  419. #define MAX_INTR_RATE 651042
  420. const u32 round = 10;
  421. u32 multiplier;
  422. if (usec_delay == 0)
  423. multiplier = 0;
  424. else {
  425. u32 interrupt_rate = 1000000 / usec_delay;
  426. if (interrupt_rate == 0)
  427. multiplier = 1023;
  428. else {
  429. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  430. multiplier /= interrupt_rate;
  431. multiplier = (multiplier + round / 2) / round;
  432. multiplier = min(multiplier, (u32) 1023);
  433. }
  434. }
  435. return multiplier;
  436. }
  437. struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
  438. {
  439. return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  440. }
  441. struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
  442. {
  443. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  444. struct be_mcc_wrb *wrb;
  445. BUG_ON(atomic_read(&mccq->used) >= mccq->len);
  446. wrb = queue_head_node(mccq);
  447. memset(wrb, 0, sizeof(*wrb));
  448. wrb->tag0 = (mccq->head & 0x000000FF) << 16;
  449. queue_head_inc(mccq);
  450. atomic_inc(&mccq->used);
  451. return wrb;
  452. }
  453. int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
  454. struct be_queue_info *eq, int eq_delay)
  455. {
  456. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  457. struct be_cmd_req_eq_create *req = embedded_payload(wrb);
  458. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  459. struct be_dma_mem *q_mem = &eq->dma_mem;
  460. int status;
  461. SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
  462. spin_lock(&ctrl->mbox_lock);
  463. memset(wrb, 0, sizeof(*wrb));
  464. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  465. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  466. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  467. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  468. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  469. PCI_FUNC(ctrl->pdev->devfn));
  470. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  471. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  472. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  473. __ilog2_u32(eq->len / 256));
  474. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  475. eq_delay_to_mult(eq_delay));
  476. be_dws_cpu_to_le(req->context, sizeof(req->context));
  477. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  478. status = be_mbox_notify(ctrl);
  479. if (!status) {
  480. eq->id = le16_to_cpu(resp->eq_id);
  481. eq->created = true;
  482. }
  483. spin_unlock(&ctrl->mbox_lock);
  484. return status;
  485. }
  486. int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
  487. {
  488. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  489. int status;
  490. u8 *endian_check;
  491. SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
  492. spin_lock(&ctrl->mbox_lock);
  493. memset(wrb, 0, sizeof(*wrb));
  494. endian_check = (u8 *) wrb;
  495. *endian_check++ = 0xFF;
  496. *endian_check++ = 0x12;
  497. *endian_check++ = 0x34;
  498. *endian_check++ = 0xFF;
  499. *endian_check++ = 0xFF;
  500. *endian_check++ = 0x56;
  501. *endian_check++ = 0x78;
  502. *endian_check++ = 0xFF;
  503. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  504. status = be_mbox_notify(ctrl);
  505. if (status)
  506. SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
  507. spin_unlock(&ctrl->mbox_lock);
  508. return status;
  509. }
  510. int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
  511. struct be_queue_info *cq, struct be_queue_info *eq,
  512. bool sol_evts, bool no_delay, int coalesce_wm)
  513. {
  514. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  515. struct be_cmd_req_cq_create *req = embedded_payload(wrb);
  516. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  517. struct be_dma_mem *q_mem = &cq->dma_mem;
  518. void *ctxt = &req->context;
  519. int status;
  520. SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
  521. spin_lock(&ctrl->mbox_lock);
  522. memset(wrb, 0, sizeof(*wrb));
  523. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  524. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  525. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  526. if (!q_mem->va)
  527. SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
  528. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  529. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  530. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  531. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  532. __ilog2_u32(cq->len / 256));
  533. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  534. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  535. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  536. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  537. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  538. AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
  539. PCI_FUNC(ctrl->pdev->devfn));
  540. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  541. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  542. status = be_mbox_notify(ctrl);
  543. if (!status) {
  544. cq->id = le16_to_cpu(resp->cq_id);
  545. cq->created = true;
  546. } else
  547. SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
  548. status);
  549. spin_unlock(&ctrl->mbox_lock);
  550. return status;
  551. }
  552. static u32 be_encoded_q_len(int q_len)
  553. {
  554. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  555. if (len_encoded == 16)
  556. len_encoded = 0;
  557. return len_encoded;
  558. }
  559. int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
  560. struct be_queue_info *mccq,
  561. struct be_queue_info *cq)
  562. {
  563. struct be_mcc_wrb *wrb;
  564. struct be_cmd_req_mcc_create *req;
  565. struct be_dma_mem *q_mem = &mccq->dma_mem;
  566. struct be_ctrl_info *ctrl;
  567. void *ctxt;
  568. int status;
  569. spin_lock(&phba->ctrl.mbox_lock);
  570. ctrl = &phba->ctrl;
  571. wrb = wrb_from_mbox(&ctrl->mbox_mem);
  572. memset(wrb, 0, sizeof(*wrb));
  573. req = embedded_payload(wrb);
  574. ctxt = &req->context;
  575. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  576. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  577. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  578. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  579. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
  580. PCI_FUNC(phba->pcidev->devfn));
  581. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  582. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  583. be_encoded_q_len(mccq->len));
  584. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  585. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  586. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  587. status = be_mbox_notify_wait(phba);
  588. if (!status) {
  589. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  590. mccq->id = le16_to_cpu(resp->id);
  591. mccq->created = true;
  592. }
  593. spin_unlock(&phba->ctrl.mbox_lock);
  594. return status;
  595. }
  596. int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
  597. int queue_type)
  598. {
  599. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  600. struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
  601. u8 subsys = 0, opcode = 0;
  602. int status;
  603. SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
  604. spin_lock(&ctrl->mbox_lock);
  605. memset(wrb, 0, sizeof(*wrb));
  606. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  607. switch (queue_type) {
  608. case QTYPE_EQ:
  609. subsys = CMD_SUBSYSTEM_COMMON;
  610. opcode = OPCODE_COMMON_EQ_DESTROY;
  611. break;
  612. case QTYPE_CQ:
  613. subsys = CMD_SUBSYSTEM_COMMON;
  614. opcode = OPCODE_COMMON_CQ_DESTROY;
  615. break;
  616. case QTYPE_MCCQ:
  617. subsys = CMD_SUBSYSTEM_COMMON;
  618. opcode = OPCODE_COMMON_MCC_DESTROY;
  619. break;
  620. case QTYPE_WRBQ:
  621. subsys = CMD_SUBSYSTEM_ISCSI;
  622. opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
  623. break;
  624. case QTYPE_DPDUQ:
  625. subsys = CMD_SUBSYSTEM_ISCSI;
  626. opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
  627. break;
  628. case QTYPE_SGL:
  629. subsys = CMD_SUBSYSTEM_ISCSI;
  630. opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
  631. break;
  632. default:
  633. spin_unlock(&ctrl->mbox_lock);
  634. BUG();
  635. return -ENXIO;
  636. }
  637. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  638. if (queue_type != QTYPE_SGL)
  639. req->id = cpu_to_le16(q->id);
  640. status = be_mbox_notify(ctrl);
  641. spin_unlock(&ctrl->mbox_lock);
  642. return status;
  643. }
  644. int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
  645. struct be_queue_info *cq,
  646. struct be_queue_info *dq, int length,
  647. int entry_size)
  648. {
  649. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  650. struct be_defq_create_req *req = embedded_payload(wrb);
  651. struct be_dma_mem *q_mem = &dq->dma_mem;
  652. void *ctxt = &req->context;
  653. int status;
  654. SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
  655. spin_lock(&ctrl->mbox_lock);
  656. memset(wrb, 0, sizeof(*wrb));
  657. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  658. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  659. OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
  660. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  661. AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
  662. AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
  663. 1);
  664. AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
  665. PCI_FUNC(ctrl->pdev->devfn));
  666. AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
  667. be_encoded_q_len(length / sizeof(struct phys_addr)));
  668. AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
  669. ctxt, entry_size);
  670. AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
  671. cq->id);
  672. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  673. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  674. status = be_mbox_notify(ctrl);
  675. if (!status) {
  676. struct be_defq_create_resp *resp = embedded_payload(wrb);
  677. dq->id = le16_to_cpu(resp->id);
  678. dq->created = true;
  679. }
  680. spin_unlock(&ctrl->mbox_lock);
  681. return status;
  682. }
  683. int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
  684. struct be_queue_info *wrbq)
  685. {
  686. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  687. struct be_wrbq_create_req *req = embedded_payload(wrb);
  688. struct be_wrbq_create_resp *resp = embedded_payload(wrb);
  689. int status;
  690. spin_lock(&ctrl->mbox_lock);
  691. memset(wrb, 0, sizeof(*wrb));
  692. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  693. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  694. OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
  695. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  696. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  697. status = be_mbox_notify(ctrl);
  698. if (!status) {
  699. wrbq->id = le16_to_cpu(resp->cid);
  700. wrbq->created = true;
  701. }
  702. spin_unlock(&ctrl->mbox_lock);
  703. return status;
  704. }
  705. int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
  706. struct be_dma_mem *q_mem,
  707. u32 page_offset, u32 num_pages)
  708. {
  709. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  710. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  711. int status;
  712. unsigned int curr_pages;
  713. u32 internal_page_offset = 0;
  714. u32 temp_num_pages = num_pages;
  715. if (num_pages == 0xff)
  716. num_pages = 1;
  717. spin_lock(&ctrl->mbox_lock);
  718. do {
  719. memset(wrb, 0, sizeof(*wrb));
  720. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  721. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  722. OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
  723. sizeof(*req));
  724. curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
  725. pages);
  726. req->num_pages = min(num_pages, curr_pages);
  727. req->page_offset = page_offset;
  728. be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
  729. q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
  730. internal_page_offset += req->num_pages;
  731. page_offset += req->num_pages;
  732. num_pages -= req->num_pages;
  733. if (temp_num_pages == 0xff)
  734. req->num_pages = temp_num_pages;
  735. status = be_mbox_notify(ctrl);
  736. if (status) {
  737. SE_DEBUG(DBG_LVL_1,
  738. "FW CMD to map iscsi frags failed.\n");
  739. goto error;
  740. }
  741. } while (num_pages > 0);
  742. error:
  743. spin_unlock(&ctrl->mbox_lock);
  744. if (status != 0)
  745. beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
  746. return status;
  747. }
  748. int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
  749. {
  750. struct be_ctrl_info *ctrl = &phba->ctrl;
  751. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  752. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  753. int status;
  754. spin_lock(&ctrl->mbox_lock);
  755. req = embedded_payload(wrb);
  756. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  757. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  758. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  759. status = be_mbox_notify_wait(phba);
  760. spin_unlock(&ctrl->mbox_lock);
  761. return status;
  762. }