cn23xx_vf_device.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/pci.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/vmalloc.h>
  21. #include "liquidio_common.h"
  22. #include "octeon_droq.h"
  23. #include "octeon_iq.h"
  24. #include "response_manager.h"
  25. #include "octeon_device.h"
  26. #include "cn23xx_vf_device.h"
  27. #include "octeon_main.h"
  28. #include "octeon_mailbox.h"
  29. u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
  30. {
  31. /* This gives the SLI clock per microsec */
  32. u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us;
  33. /* This gives the clock cycles per millisecond */
  34. oqticks_per_us *= 1000;
  35. /* This gives the oq ticks (1024 core clock cycles) per millisecond */
  36. oqticks_per_us /= 1024;
  37. /* time_intr is in microseconds. The next 2 steps gives the oq ticks
  38. * corressponding to time_intr.
  39. */
  40. oqticks_per_us *= time_intr_in_us;
  41. oqticks_per_us /= 1000;
  42. return oqticks_per_us;
  43. }
  44. static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues)
  45. {
  46. u32 loop = BUSY_READING_REG_VF_LOOP_COUNT;
  47. int ret_val = 0;
  48. u32 q_no;
  49. u64 d64;
  50. for (q_no = 0; q_no < num_queues; q_no++) {
  51. /* set RST bit to 1. This bit applies to both IQ and OQ */
  52. d64 = octeon_read_csr64(oct,
  53. CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
  54. d64 |= CN23XX_PKT_INPUT_CTL_RST;
  55. octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
  56. d64);
  57. }
  58. /* wait until the RST bit is clear or the RST and QUIET bits are set */
  59. for (q_no = 0; q_no < num_queues; q_no++) {
  60. u64 reg_val = octeon_read_csr64(oct,
  61. CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
  62. while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
  63. !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
  64. loop) {
  65. WRITE_ONCE(reg_val, octeon_read_csr64(
  66. oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
  67. loop--;
  68. }
  69. if (!loop) {
  70. dev_err(&oct->pci_dev->dev,
  71. "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
  72. q_no);
  73. return -1;
  74. }
  75. WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
  76. ~CN23XX_PKT_INPUT_CTL_RST);
  77. octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
  78. READ_ONCE(reg_val));
  79. WRITE_ONCE(reg_val, octeon_read_csr64(
  80. oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
  81. if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
  82. dev_err(&oct->pci_dev->dev,
  83. "clearing the reset failed for qno: %u\n",
  84. q_no);
  85. ret_val = -1;
  86. }
  87. }
  88. return ret_val;
  89. }
  90. static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct)
  91. {
  92. struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
  93. struct octeon_instr_queue *iq;
  94. u64 q_no, intr_threshold;
  95. u64 d64;
  96. if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf))
  97. return -1;
  98. for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
  99. void __iomem *inst_cnt_reg;
  100. octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no),
  101. 0xFFFFFFFF);
  102. iq = oct->instr_queue[q_no];
  103. if (iq)
  104. inst_cnt_reg = iq->inst_cnt_reg;
  105. else
  106. inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
  107. CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no);
  108. d64 = octeon_read_csr64(oct,
  109. CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no));
  110. d64 &= 0xEFFFFFFFFFFFFFFFL;
  111. octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
  112. d64);
  113. /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
  114. * the Input Queues
  115. */
  116. octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
  117. CN23XX_PKT_INPUT_CTL_MASK);
  118. /* set the wmark level to trigger PI_INT */
  119. intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
  120. CN23XX_PKT_IN_DONE_WMARK_MASK;
  121. writeq((readq(inst_cnt_reg) &
  122. ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
  123. CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
  124. (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
  125. inst_cnt_reg);
  126. }
  127. return 0;
  128. }
  129. static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
  130. {
  131. u32 reg_val;
  132. u32 q_no;
  133. for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
  134. octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
  135. 0xFFFFFFFF);
  136. reg_val =
  137. octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));
  138. reg_val &= 0xEFFFFFFFFFFFFFFFL;
  139. reg_val =
  140. octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
  141. /* set IPTR & DPTR */
  142. reg_val |=
  143. (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
  144. /* reset BMODE */
  145. reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
  146. /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
  147. * for Output Queue ScatterList reset ROR_P, NSR_P
  148. */
  149. reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
  150. reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
  151. #ifdef __LITTLE_ENDIAN_BITFIELD
  152. reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
  153. #else
  154. reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
  155. #endif
  156. /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
  157. * for Output Queue Data reset ROR, NSR
  158. */
  159. reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
  160. reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
  161. /* set the ES bit */
  162. reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
  163. /* write all the selected settings */
  164. octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
  165. reg_val);
  166. }
  167. }
  168. static int cn23xx_setup_vf_device_regs(struct octeon_device *oct)
  169. {
  170. if (cn23xx_vf_setup_global_input_regs(oct))
  171. return -1;
  172. cn23xx_vf_setup_global_output_regs(oct);
  173. return 0;
  174. }
  175. static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no)
  176. {
  177. struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
  178. u64 pkt_in_done;
  179. /* Write the start of the input queue's ring and its size */
  180. octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no),
  181. iq->base_addr_dma);
  182. octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count);
  183. /* Remember the doorbell & instruction count register addr
  184. * for this queue
  185. */
  186. iq->doorbell_reg =
  187. (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no);
  188. iq->inst_cnt_reg =
  189. (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no);
  190. dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
  191. iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
  192. /* Store the current instruction counter (used in flush_iq
  193. * calculation)
  194. */
  195. pkt_in_done = readq(iq->inst_cnt_reg);
  196. if (oct->msix_on) {
  197. /* Set CINT_ENB to enable IQ interrupt */
  198. writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
  199. iq->inst_cnt_reg);
  200. }
  201. iq->reset_instr_cnt = 0;
  202. }
  203. static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
  204. {
  205. struct octeon_droq *droq = oct->droq[oq_no];
  206. octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no),
  207. droq->desc_ring_dma);
  208. octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
  209. octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
  210. (droq->buffer_size | (OCT_RH_SIZE << 16)));
  211. /* Get the mapped address of the pkt_sent and pkts_credit regs */
  212. droq->pkts_sent_reg =
  213. (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no);
  214. droq->pkts_credit_reg =
  215. (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no);
  216. }
  217. static void cn23xx_vf_mbox_thread(struct work_struct *work)
  218. {
  219. struct cavium_wk *wk = (struct cavium_wk *)work;
  220. struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
  221. octeon_mbox_process_message(mbox);
  222. }
  223. static int cn23xx_free_vf_mbox(struct octeon_device *oct)
  224. {
  225. cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work);
  226. vfree(oct->mbox[0]);
  227. return 0;
  228. }
  229. static int cn23xx_setup_vf_mbox(struct octeon_device *oct)
  230. {
  231. struct octeon_mbox *mbox = NULL;
  232. mbox = vmalloc(sizeof(*mbox));
  233. if (!mbox)
  234. return 1;
  235. memset(mbox, 0, sizeof(struct octeon_mbox));
  236. spin_lock_init(&mbox->lock);
  237. mbox->oct_dev = oct;
  238. mbox->q_no = 0;
  239. mbox->state = OCTEON_MBOX_STATE_IDLE;
  240. /* VF mbox interrupt reg */
  241. mbox->mbox_int_reg =
  242. (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0);
  243. /* VF reads from SIG0 reg */
  244. mbox->mbox_read_reg =
  245. (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
  246. /* VF writes into SIG1 reg */
  247. mbox->mbox_write_reg =
  248. (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
  249. INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
  250. cn23xx_vf_mbox_thread);
  251. mbox->mbox_poll_wk.ctxptr = mbox;
  252. oct->mbox[0] = mbox;
  253. writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
  254. return 0;
  255. }
  256. static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
  257. {
  258. u32 q_no;
  259. for (q_no = 0; q_no < oct->num_iqs; q_no++) {
  260. u64 reg_val;
  261. /* set the corresponding IQ IS_64B bit */
  262. if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
  263. reg_val = octeon_read_csr64(
  264. oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
  265. reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
  266. octeon_write_csr64(
  267. oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
  268. }
  269. /* set the corresponding IQ ENB bit */
  270. if (oct->io_qmask.iq & BIT_ULL(q_no)) {
  271. reg_val = octeon_read_csr64(
  272. oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
  273. reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
  274. octeon_write_csr64(
  275. oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
  276. }
  277. }
  278. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  279. u32 reg_val;
  280. /* set the corresponding OQ ENB bit */
  281. if (oct->io_qmask.oq & BIT_ULL(q_no)) {
  282. reg_val = octeon_read_csr(
  283. oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
  284. reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
  285. octeon_write_csr(
  286. oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
  287. }
  288. }
  289. return 0;
  290. }
  291. static void cn23xx_disable_vf_io_queues(struct octeon_device *oct)
  292. {
  293. u32 num_queues = oct->num_iqs;
  294. /* per HRM, rings can only be disabled via reset operation,
  295. * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
  296. */
  297. if (num_queues < oct->num_oqs)
  298. num_queues = oct->num_oqs;
  299. cn23xx_vf_reset_io_queues(oct, num_queues);
  300. }
  301. void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
  302. {
  303. struct octeon_mbox_cmd mbox_cmd;
  304. mbox_cmd.msg.u64 = 0;
  305. mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
  306. mbox_cmd.msg.s.resp_needed = 0;
  307. mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST;
  308. mbox_cmd.msg.s.len = 1;
  309. mbox_cmd.q_no = 0;
  310. mbox_cmd.recv_len = 0;
  311. mbox_cmd.recv_status = 0;
  312. mbox_cmd.fn = NULL;
  313. mbox_cmd.fn_arg = 0;
  314. octeon_mbox_write(oct, &mbox_cmd);
  315. }
  316. static void octeon_pfvf_hs_callback(struct octeon_device *oct,
  317. struct octeon_mbox_cmd *cmd,
  318. void *arg)
  319. {
  320. u32 major = 0;
  321. memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params,
  322. CN23XX_MAILBOX_MSGPARAM_SIZE);
  323. if (cmd->recv_len > 1) {
  324. major = ((struct lio_version *)(cmd->data))->major;
  325. major = major << 16;
  326. }
  327. atomic_set((atomic_t *)arg, major | 1);
  328. }
  329. int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
  330. {
  331. struct octeon_mbox_cmd mbox_cmd;
  332. u32 q_no, count = 0;
  333. atomic_t status;
  334. u32 pfmajor;
  335. u32 vfmajor;
  336. u32 ret;
  337. /* Sending VF_ACTIVE indication to the PF driver */
  338. dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n");
  339. mbox_cmd.msg.u64 = 0;
  340. mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
  341. mbox_cmd.msg.s.resp_needed = 1;
  342. mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE;
  343. mbox_cmd.msg.s.len = 2;
  344. mbox_cmd.data[0] = 0;
  345. ((struct lio_version *)&mbox_cmd.data[0])->major =
  346. LIQUIDIO_BASE_MAJOR_VERSION;
  347. ((struct lio_version *)&mbox_cmd.data[0])->minor =
  348. LIQUIDIO_BASE_MINOR_VERSION;
  349. ((struct lio_version *)&mbox_cmd.data[0])->micro =
  350. LIQUIDIO_BASE_MICRO_VERSION;
  351. mbox_cmd.q_no = 0;
  352. mbox_cmd.recv_len = 0;
  353. mbox_cmd.recv_status = 0;
  354. mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
  355. mbox_cmd.fn_arg = &status;
  356. /* Interrupts are not enabled at this point.
  357. * Enable them with default oq ticks
  358. */
  359. oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
  360. octeon_mbox_write(oct, &mbox_cmd);
  361. atomic_set(&status, 0);
  362. do {
  363. schedule_timeout_uninterruptible(1);
  364. } while ((!atomic_read(&status)) && (count++ < 100000));
  365. /* Disable the interrupt so that the interrupsts will be reenabled
  366. * with the oq ticks received from the PF
  367. */
  368. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  369. ret = atomic_read(&status);
  370. if (!ret) {
  371. dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
  372. return 1;
  373. }
  374. for (q_no = 0 ; q_no < oct->num_iqs ; q_no++)
  375. oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
  376. vfmajor = LIQUIDIO_BASE_MAJOR_VERSION;
  377. pfmajor = ret >> 16;
  378. if (pfmajor != vfmajor) {
  379. dev_err(&oct->pci_dev->dev,
  380. "VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n",
  381. vfmajor, pfmajor);
  382. return 1;
  383. }
  384. dev_dbg(&oct->pci_dev->dev,
  385. "VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n",
  386. vfmajor, pfmajor);
  387. dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n",
  388. oct->pfvf_hsword.pkind);
  389. return 0;
  390. }
  391. static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector)
  392. {
  393. struct octeon_device *oct = ioq_vector->oct_dev;
  394. u64 mbox_int_val;
  395. if (!ioq_vector->droq_index) {
  396. /* read and clear by writing 1 */
  397. mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
  398. writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
  399. if (octeon_mbox_read(oct->mbox[0]))
  400. schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
  401. msecs_to_jiffies(0));
  402. }
  403. }
  404. static u64 cn23xx_vf_msix_interrupt_handler(void *dev)
  405. {
  406. struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
  407. struct octeon_device *oct = ioq_vector->oct_dev;
  408. struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
  409. u64 pkts_sent;
  410. u64 ret = 0;
  411. dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
  412. pkts_sent = readq(droq->pkts_sent_reg);
  413. /* If our device has interrupted, then proceed. Also check
  414. * for all f's if interrupt was triggered on an error
  415. * and the PCI read fails.
  416. */
  417. if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
  418. return ret;
  419. /* Write count reg in sli_pkt_cnts to clear these int. */
  420. if ((pkts_sent & CN23XX_INTR_PO_INT) ||
  421. (pkts_sent & CN23XX_INTR_PI_INT)) {
  422. if (pkts_sent & CN23XX_INTR_PO_INT)
  423. ret |= MSIX_PO_INT;
  424. }
  425. if (pkts_sent & CN23XX_INTR_PI_INT)
  426. /* We will clear the count when we update the read_index. */
  427. ret |= MSIX_PI_INT;
  428. if (pkts_sent & CN23XX_INTR_MBOX_INT) {
  429. cn23xx_handle_vf_mbox_intr(ioq_vector);
  430. ret |= MSIX_MBOX_INT;
  431. }
  432. return ret;
  433. }
  434. static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
  435. {
  436. u32 pkt_in_done = readl(iq->inst_cnt_reg);
  437. u32 last_done;
  438. u32 new_idx;
  439. last_done = pkt_in_done - iq->pkt_in_done;
  440. iq->pkt_in_done = pkt_in_done;
  441. /* Modulo of the new index with the IQ size will give us
  442. * the new index. The iq->reset_instr_cnt is always zero for
  443. * cn23xx, so no extra adjustments are needed.
  444. */
  445. new_idx = (iq->octeon_read_index +
  446. (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
  447. iq->max_count;
  448. return new_idx;
  449. }
  450. static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
  451. {
  452. struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
  453. u32 q_no, time_threshold;
  454. if (intr_flag & OCTEON_OUTPUT_INTR) {
  455. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  456. /* Set up interrupt packet and time thresholds
  457. * for all the OQs
  458. */
  459. time_threshold = cn23xx_vf_get_oq_ticks(
  460. oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
  461. octeon_write_csr64(
  462. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
  463. (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
  464. ((u64)time_threshold << 32)));
  465. }
  466. }
  467. if (intr_flag & OCTEON_INPUT_INTR) {
  468. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  469. /* Set CINT_ENB to enable IQ interrupt */
  470. octeon_write_csr64(
  471. oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
  472. ((octeon_read_csr64(
  473. oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
  474. ~CN23XX_PKT_IN_DONE_CNT_MASK) |
  475. CN23XX_INTR_CINT_ENB));
  476. }
  477. }
  478. /* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */
  479. if (intr_flag & OCTEON_MBOX_INTR) {
  480. octeon_write_csr64(
  481. oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
  482. (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) |
  483. CN23XX_INTR_MBOX_ENB));
  484. }
  485. }
  486. static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
  487. {
  488. u32 q_no;
  489. if (intr_flag & OCTEON_OUTPUT_INTR) {
  490. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  491. /* Write all 1's in INT_LEVEL reg to disable PO_INT */
  492. octeon_write_csr64(
  493. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
  494. 0x3fffffffffffff);
  495. }
  496. }
  497. if (intr_flag & OCTEON_INPUT_INTR) {
  498. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  499. octeon_write_csr64(
  500. oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
  501. (octeon_read_csr64(
  502. oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
  503. ~(CN23XX_INTR_CINT_ENB |
  504. CN23XX_PKT_IN_DONE_CNT_MASK)));
  505. }
  506. }
  507. if (intr_flag & OCTEON_MBOX_INTR) {
  508. octeon_write_csr64(
  509. oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
  510. (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) &
  511. ~CN23XX_INTR_MBOX_ENB));
  512. }
  513. }
  514. int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
  515. {
  516. struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
  517. u32 rings_per_vf, ring_flag;
  518. u64 reg_val;
  519. if (octeon_map_pci_barx(oct, 0, 0))
  520. return 1;
  521. /* INPUT_CONTROL[RPVF] gives the VF IOq count */
  522. reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0));
  523. oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
  524. CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
  525. oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
  526. CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
  527. reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
  528. rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
  529. ring_flag = 0;
  530. cn23xx->conf = oct_get_config_info(oct, LIO_23XX);
  531. if (!cn23xx->conf) {
  532. dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
  533. __func__);
  534. octeon_unmap_pci_barx(oct, 0);
  535. return 1;
  536. }
  537. if (oct->sriov_info.rings_per_vf > rings_per_vf) {
  538. dev_warn(&oct->pci_dev->dev,
  539. "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n",
  540. oct->sriov_info.rings_per_vf, rings_per_vf,
  541. rings_per_vf);
  542. oct->sriov_info.rings_per_vf = rings_per_vf;
  543. } else {
  544. if (rings_per_vf > num_present_cpus()) {
  545. dev_warn(&oct->pci_dev->dev,
  546. "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
  547. rings_per_vf,
  548. num_present_cpus(),
  549. num_present_cpus());
  550. oct->sriov_info.rings_per_vf =
  551. num_present_cpus();
  552. } else {
  553. oct->sriov_info.rings_per_vf = rings_per_vf;
  554. }
  555. }
  556. oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs;
  557. oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs;
  558. oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox;
  559. oct->fn_list.free_mbox = cn23xx_free_vf_mbox;
  560. oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler;
  561. oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs;
  562. oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
  563. oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt;
  564. oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt;
  565. oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues;
  566. oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues;
  567. return 0;
  568. }
  569. void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
  570. {
  571. u32 regval, q_no;
  572. dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
  573. CN23XX_VF_SLI_IQ_DOORBELL(0),
  574. CVM_CAST64(octeon_read_csr64(
  575. oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
  576. dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
  577. CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
  578. CVM_CAST64(octeon_read_csr64(
  579. oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
  580. dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
  581. CN23XX_VF_SLI_IQ_SIZE(0),
  582. CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
  583. for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
  584. dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
  585. q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
  586. CVM_CAST64(octeon_read_csr64(
  587. oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
  588. }
  589. pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
  590. dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
  591. CN23XX_CONFIG_PCIE_DEVCTL, regval);
  592. }