bfa_ioc_ct.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_reg.h"
  20. #include "bfa_defs.h"
  21. BFA_TRC_FILE(CNA, IOC_CT);
  22. #define bfa_ioc_ct_sync_pos(__ioc) \
  23. ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
  24. #define BFA_IOC_SYNC_REQD_SH 16
  25. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  26. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  27. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  28. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  29. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  30. /*
  31. * forward declarations
  32. */
  33. static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
  34. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
  35. static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
  36. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
  37. static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
  38. static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
  39. static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
  40. static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
  41. static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
  42. static struct bfa_ioc_hwif_s hwif_ct;
  43. static struct bfa_ioc_hwif_s hwif_ct2;
  44. /*
  45. * Return true if firmware of current driver matches the running firmware.
  46. */
  47. static bfa_boolean_t
  48. bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
  49. {
  50. enum bfi_ioc_state ioc_fwstate;
  51. u32 usecnt;
  52. struct bfi_ioc_image_hdr_s fwhdr;
  53. /*
  54. * If bios boot (flash based) -- do not increment usage count
  55. */
  56. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  57. BFA_IOC_FWIMG_MINSZ)
  58. return BFA_TRUE;
  59. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  60. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  61. /*
  62. * If usage count is 0, always return TRUE.
  63. */
  64. if (usecnt == 0) {
  65. writel(1, ioc->ioc_regs.ioc_usage_reg);
  66. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  67. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  68. writel(0, ioc->ioc_regs.ioc_fail_sync);
  69. bfa_trc(ioc, usecnt);
  70. return BFA_TRUE;
  71. }
  72. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  73. bfa_trc(ioc, ioc_fwstate);
  74. /*
  75. * Use count cannot be non-zero and chip in uninitialized state.
  76. */
  77. WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
  78. /*
  79. * Check if another driver with a different firmware is active
  80. */
  81. bfa_ioc_fwver_get(ioc, &fwhdr);
  82. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  83. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  84. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  85. bfa_trc(ioc, usecnt);
  86. return BFA_FALSE;
  87. }
  88. /*
  89. * Same firmware version. Increment the reference count.
  90. */
  91. usecnt++;
  92. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  93. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  94. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  95. bfa_trc(ioc, usecnt);
  96. return BFA_TRUE;
  97. }
  98. static void
  99. bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
  100. {
  101. u32 usecnt;
  102. /*
  103. * If bios boot (flash based) -- do not decrement usage count
  104. */
  105. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  106. BFA_IOC_FWIMG_MINSZ)
  107. return;
  108. /*
  109. * decrement usage count
  110. */
  111. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  112. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  113. WARN_ON(usecnt <= 0);
  114. usecnt--;
  115. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  116. bfa_trc(ioc, usecnt);
  117. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  118. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  119. }
  120. /*
  121. * Notify other functions on HB failure.
  122. */
  123. static void
  124. bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
  125. {
  126. if (bfa_ioc_is_cna(ioc)) {
  127. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  128. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  129. /* Wait for halt to take effect */
  130. readl(ioc->ioc_regs.ll_halt);
  131. readl(ioc->ioc_regs.alt_ll_halt);
  132. } else {
  133. writel(~0U, ioc->ioc_regs.err_set);
  134. readl(ioc->ioc_regs.err_set);
  135. }
  136. }
  137. /*
  138. * Host to LPU mailbox message addresses
  139. */
  140. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
  141. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  142. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  143. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  144. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  145. };
  146. /*
  147. * Host <-> LPU mailbox command/status registers - port 0
  148. */
  149. static struct { u32 hfn, lpu; } ct_p0reg[] = {
  150. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  151. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  152. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  153. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  154. };
  155. /*
  156. * Host <-> LPU mailbox command/status registers - port 1
  157. */
  158. static struct { u32 hfn, lpu; } ct_p1reg[] = {
  159. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  160. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  161. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  162. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  163. };
  164. static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
  165. ct2_reg[] = {
  166. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  167. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  168. CT2_HOSTFN_LPU0_READ_STAT},
  169. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  170. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  171. CT2_HOSTFN_LPU1_READ_STAT},
  172. };
  173. static void
  174. bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
  175. {
  176. void __iomem *rb;
  177. int pcifn = bfa_ioc_pcifn(ioc);
  178. rb = bfa_ioc_bar0(ioc);
  179. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  180. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  181. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  182. if (ioc->port_id == 0) {
  183. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  184. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  185. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  186. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  187. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  188. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  189. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  190. } else {
  191. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  192. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  193. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  194. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  195. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  196. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  197. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  198. }
  199. /*
  200. * PSS control registers
  201. */
  202. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  203. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  204. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
  205. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
  206. /*
  207. * IOC semaphore registers and serialization
  208. */
  209. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  210. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  211. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  212. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  213. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  214. /*
  215. * sram memory access
  216. */
  217. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  218. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  219. /*
  220. * err set reg : for notification of hb failure in fcmode
  221. */
  222. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  223. }
  224. static void
  225. bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
  226. {
  227. void __iomem *rb;
  228. int port = bfa_ioc_portid(ioc);
  229. rb = bfa_ioc_bar0(ioc);
  230. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  231. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  232. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  233. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  234. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  235. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  236. if (port == 0) {
  237. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  238. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  239. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  240. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  241. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  242. } else {
  243. ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
  244. ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
  245. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  246. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  247. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  248. }
  249. /*
  250. * PSS control registers
  251. */
  252. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  253. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  254. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
  255. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
  256. /*
  257. * IOC semaphore registers and serialization
  258. */
  259. ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
  260. ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
  261. ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
  262. ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
  263. ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
  264. /*
  265. * sram memory access
  266. */
  267. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  268. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  269. /*
  270. * err set reg : for notification of hb failure in fcmode
  271. */
  272. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  273. }
  274. /*
  275. * Initialize IOC to port mapping.
  276. */
  277. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  278. static void
  279. bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
  280. {
  281. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  282. u32 r32;
  283. /*
  284. * For catapult, base port id on personality register and IOC type
  285. */
  286. r32 = readl(rb + FNC_PERS_REG);
  287. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  288. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  289. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  290. bfa_trc(ioc, ioc->port_id);
  291. }
  292. static void
  293. bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
  294. {
  295. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  296. u32 r32;
  297. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  298. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  299. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  300. bfa_trc(ioc, ioc->port_id);
  301. }
  302. /*
  303. * Set interrupt mode for a function: INTX or MSIX
  304. */
  305. static void
  306. bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  307. {
  308. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  309. u32 r32, mode;
  310. r32 = readl(rb + FNC_PERS_REG);
  311. bfa_trc(ioc, r32);
  312. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  313. __F0_INTX_STATUS;
  314. /*
  315. * If already in desired mode, do not change anything
  316. */
  317. if ((!msix && mode) || (msix && !mode))
  318. return;
  319. if (msix)
  320. mode = __F0_INTX_STATUS_MSIX;
  321. else
  322. mode = __F0_INTX_STATUS_INTA;
  323. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  324. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  325. bfa_trc(ioc, r32);
  326. writel(r32, rb + FNC_PERS_REG);
  327. }
  328. bfa_boolean_t
  329. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
  330. {
  331. u32 r32;
  332. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  333. if (r32) {
  334. writel(1, ioc->ioc_regs.lpu_read_stat);
  335. return BFA_TRUE;
  336. }
  337. return BFA_FALSE;
  338. }
  339. /*
  340. * Cleanup hw semaphore and usecnt registers
  341. */
  342. static void
  343. bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  344. {
  345. if (bfa_ioc_is_cna(ioc)) {
  346. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  347. writel(0, ioc->ioc_regs.ioc_usage_reg);
  348. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  349. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  350. }
  351. /*
  352. * Read the hw sem reg to make sure that it is locked
  353. * before we clear it. If it is not locked, writing 1
  354. * will lock it instead of clearing it.
  355. */
  356. readl(ioc->ioc_regs.ioc_sem_reg);
  357. writel(1, ioc->ioc_regs.ioc_sem_reg);
  358. }
  359. static bfa_boolean_t
  360. bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
  361. {
  362. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  363. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  364. /*
  365. * Driver load time. If the sync required bit for this PCI fn
  366. * is set, it is due to an unclean exit by the driver for this
  367. * PCI fn in the previous incarnation. Whoever comes here first
  368. * should clean it up, no matter which PCI fn.
  369. */
  370. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  371. writel(0, ioc->ioc_regs.ioc_fail_sync);
  372. writel(1, ioc->ioc_regs.ioc_usage_reg);
  373. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  374. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  375. return BFA_TRUE;
  376. }
  377. return bfa_ioc_ct_sync_complete(ioc);
  378. }
  379. /*
  380. * Synchronized IOC failure processing routines
  381. */
  382. static void
  383. bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
  384. {
  385. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  386. uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  387. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  388. }
  389. static void
  390. bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
  391. {
  392. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  393. uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  394. bfa_ioc_ct_sync_pos(ioc);
  395. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  396. }
  397. static void
  398. bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
  399. {
  400. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  401. writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
  402. ioc->ioc_regs.ioc_fail_sync);
  403. }
  404. static bfa_boolean_t
  405. bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
  406. {
  407. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  408. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  409. uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  410. uint32_t tmp_ackd;
  411. if (sync_ackd == 0)
  412. return BFA_TRUE;
  413. /*
  414. * The check below is to see whether any other PCI fn
  415. * has reinitialized the ASIC (reset sync_ackd bits)
  416. * and failed again while this IOC was waiting for hw
  417. * semaphore (in bfa_iocpf_sm_semwait()).
  418. */
  419. tmp_ackd = sync_ackd;
  420. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  421. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  422. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  423. if (sync_reqd == sync_ackd) {
  424. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  425. ioc->ioc_regs.ioc_fail_sync);
  426. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  427. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  428. return BFA_TRUE;
  429. }
  430. /*
  431. * If another PCI fn reinitialized and failed again while
  432. * this IOC was waiting for hw sem, the sync_ackd bit for
  433. * this IOC need to be set again to allow reinitialization.
  434. */
  435. if (tmp_ackd != sync_ackd)
  436. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  437. return BFA_FALSE;
  438. }
  439. /**
  440. * Called from bfa_ioc_attach() to map asic specific calls.
  441. */
  442. static void
  443. bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
  444. {
  445. hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  446. hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  447. hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
  448. hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  449. hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
  450. hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
  451. hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
  452. hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
  453. hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
  454. }
  455. /**
  456. * Called from bfa_ioc_attach() to map asic specific calls.
  457. */
  458. void
  459. bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
  460. {
  461. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
  462. hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  463. hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  464. hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  465. hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  466. ioc->ioc_hwif = &hwif_ct;
  467. }
  468. /**
  469. * Called from bfa_ioc_attach() to map asic specific calls.
  470. */
  471. void
  472. bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
  473. {
  474. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
  475. hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
  476. hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
  477. hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
  478. hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
  479. hwif_ct2.ioc_isr_mode_set = NULL;
  480. ioc->ioc_hwif = &hwif_ct2;
  481. }
  482. /*
  483. * Workaround for MSI-X resource allocation for catapult-2 with no asic block
  484. */
  485. #define HOSTFN_MSIX_DEFAULT 64
  486. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  487. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  488. #define __MSIX_VT_NUMVT__MK 0x003ff800
  489. #define __MSIX_VT_NUMVT__SH 11
  490. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  491. #define __MSIX_VT_OFST_ 0x000007ff
  492. void
  493. bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
  494. {
  495. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  496. u32 r32;
  497. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  498. if (r32 & __MSIX_VT_NUMVT__MK) {
  499. writel(r32 & __MSIX_VT_OFST_,
  500. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  501. return;
  502. }
  503. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  504. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  505. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  506. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  507. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  508. }
  509. bfa_status_t
  510. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  511. {
  512. u32 pll_sclk, pll_fclk, r32;
  513. bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
  514. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  515. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  516. __APP_PLL_SCLK_JITLMT0_1(3U) |
  517. __APP_PLL_SCLK_CNTLMT0_1(1U);
  518. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  519. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  520. __APP_PLL_LCLK_JITLMT0_1(3U) |
  521. __APP_PLL_LCLK_CNTLMT0_1(1U);
  522. if (fcmode) {
  523. writel(0, (rb + OP_MODE));
  524. writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
  525. __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
  526. } else {
  527. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  528. writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
  529. }
  530. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  531. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  532. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  533. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  534. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  535. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  536. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  537. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  538. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  539. rb + APP_PLL_SCLK_CTL_REG);
  540. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  541. rb + APP_PLL_LCLK_CTL_REG);
  542. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
  543. __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  544. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
  545. __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  546. readl(rb + HOSTFN0_INT_MSK);
  547. udelay(2000);
  548. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  549. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  550. writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  551. writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  552. if (!fcmode) {
  553. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  554. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  555. }
  556. r32 = readl((rb + PSS_CTL_REG));
  557. r32 &= ~__PSS_LMEM_RESET;
  558. writel(r32, (rb + PSS_CTL_REG));
  559. udelay(1000);
  560. if (!fcmode) {
  561. writel(0, (rb + PMM_1T_RESET_REG_P0));
  562. writel(0, (rb + PMM_1T_RESET_REG_P1));
  563. }
  564. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  565. udelay(1000);
  566. r32 = readl((rb + MBIST_STAT_REG));
  567. writel(0, (rb + MBIST_CTL_REG));
  568. return BFA_STATUS_OK;
  569. }
  570. static void
  571. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  572. {
  573. u32 r32;
  574. /*
  575. * put s_clk PLL and PLL FSM in reset
  576. */
  577. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  578. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  579. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  580. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  581. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  582. /*
  583. * Ignore mode and program for the max clock (which is FC16)
  584. * Firmware/NFC will do the PLL init appropiately
  585. */
  586. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  587. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  588. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  589. /*
  590. * while doing PLL init dont clock gate ethernet subsystem
  591. */
  592. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  593. writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
  594. r32 = readl((rb + CT2_PCIE_MISC_REG));
  595. writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
  596. /*
  597. * set sclk value
  598. */
  599. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  600. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  601. __APP_PLL_SCLK_CLK_DIV2);
  602. writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  603. /*
  604. * poll for s_clk lock or delay 1ms
  605. */
  606. udelay(1000);
  607. }
  608. static void
  609. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  610. {
  611. u32 r32;
  612. /*
  613. * put l_clk PLL and PLL FSM in reset
  614. */
  615. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  616. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  617. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  618. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  619. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  620. /*
  621. * set LPU speed (set for FC16 which will work for other modes)
  622. */
  623. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  624. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  625. /*
  626. * set LPU half speed (set for FC16 which will work for other modes)
  627. */
  628. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  629. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  630. /*
  631. * set lclk for mode (set for FC16)
  632. */
  633. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  634. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  635. r32 |= 0x20c1731b;
  636. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  637. /*
  638. * poll for s_clk lock or delay 1ms
  639. */
  640. udelay(1000);
  641. }
  642. static void
  643. bfa_ioc_ct2_mem_init(void __iomem *rb)
  644. {
  645. u32 r32;
  646. r32 = readl((rb + PSS_CTL_REG));
  647. r32 &= ~__PSS_LMEM_RESET;
  648. writel(r32, (rb + PSS_CTL_REG));
  649. udelay(1000);
  650. writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
  651. udelay(1000);
  652. writel(0, (rb + CT2_MBIST_CTL_REG));
  653. }
  654. void
  655. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  656. {
  657. u32 r32;
  658. bfa_ioc_ct2_sclk_init(rb);
  659. bfa_ioc_ct2_lclk_init(rb);
  660. /*
  661. * release soft reset on s_clk & l_clk
  662. */
  663. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  664. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  665. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  666. /*
  667. * release soft reset on s_clk & l_clk
  668. */
  669. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  670. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  671. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  672. /* put port0, port1 MAC & AHB in reset */
  673. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  674. rb + CT2_CSI_MAC_CONTROL_REG(0));
  675. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  676. rb + CT2_CSI_MAC_CONTROL_REG(1));
  677. }
  678. #define CT2_NFC_MAX_DELAY 1000
  679. #define CT2_NFC_VER_VALID 0x143
  680. #define BFA_IOC_PLL_POLL 1000000
  681. static bfa_boolean_t
  682. bfa_ioc_ct2_nfc_halted(void __iomem *rb)
  683. {
  684. u32 r32;
  685. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  686. if (r32 & __NFC_CONTROLLER_HALTED)
  687. return BFA_TRUE;
  688. return BFA_FALSE;
  689. }
  690. static void
  691. bfa_ioc_ct2_nfc_resume(void __iomem *rb)
  692. {
  693. u32 r32;
  694. int i;
  695. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
  696. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  697. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  698. if (!(r32 & __NFC_CONTROLLER_HALTED))
  699. return;
  700. udelay(1000);
  701. }
  702. WARN_ON(1);
  703. }
  704. bfa_status_t
  705. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  706. {
  707. u32 wgn, r32, nfc_ver, i;
  708. wgn = readl(rb + CT2_WGN_STATUS);
  709. nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
  710. if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
  711. (nfc_ver >= CT2_NFC_VER_VALID)) {
  712. if (bfa_ioc_ct2_nfc_halted(rb))
  713. bfa_ioc_ct2_nfc_resume(rb);
  714. writel(__RESET_AND_START_SCLK_LCLK_PLLS,
  715. rb + CT2_CSI_FW_CTL_SET_REG);
  716. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  717. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  718. if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
  719. break;
  720. }
  721. WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
  722. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  723. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  724. if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
  725. break;
  726. }
  727. WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
  728. udelay(1000);
  729. r32 = readl(rb + CT2_CSI_FW_CTL_REG);
  730. WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
  731. } else {
  732. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
  733. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  734. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  735. if (r32 & __NFC_CONTROLLER_HALTED)
  736. break;
  737. udelay(1000);
  738. }
  739. bfa_ioc_ct2_mac_reset(rb);
  740. bfa_ioc_ct2_sclk_init(rb);
  741. bfa_ioc_ct2_lclk_init(rb);
  742. /*
  743. * release soft reset on s_clk & l_clk
  744. */
  745. r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
  746. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  747. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  748. /*
  749. * release soft reset on s_clk & l_clk
  750. */
  751. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  752. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  753. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  754. }
  755. /*
  756. * Announce flash device presence, if flash was corrupted.
  757. */
  758. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  759. r32 = readl(rb + PSS_GPIO_OUT_REG);
  760. writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
  761. r32 = readl(rb + PSS_GPIO_OE_REG);
  762. writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
  763. }
  764. /*
  765. * Mask the interrupts and clear any
  766. * pending interrupts.
  767. */
  768. writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
  769. writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
  770. /* For first time initialization, no need to clear interrupts */
  771. r32 = readl(rb + HOST_SEM5_REG);
  772. if (r32 & 0x1) {
  773. r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
  774. if (r32 == 1) {
  775. writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
  776. readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  777. }
  778. r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
  779. if (r32 == 1) {
  780. writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
  781. readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
  782. }
  783. }
  784. bfa_ioc_ct2_mem_init(rb);
  785. writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
  786. writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
  787. return BFA_STATUS_OK;
  788. }