nic_main.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/pci.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/of.h>
  13. #include <linux/if_vlan.h>
  14. #include "nic_reg.h"
  15. #include "nic.h"
  16. #include "q_struct.h"
  17. #include "thunder_bgx.h"
  18. #define DRV_NAME "thunder-nic"
  19. #define DRV_VERSION "1.0"
  20. struct hw_info {
  21. u8 bgx_cnt;
  22. u8 chans_per_lmac;
  23. u8 chans_per_bgx; /* Rx/Tx chans */
  24. u8 chans_per_rgx;
  25. u8 chans_per_lbk;
  26. u16 cpi_cnt;
  27. u16 rssi_cnt;
  28. u16 rss_ind_tbl_size;
  29. u16 tl4_cnt;
  30. u16 tl3_cnt;
  31. u8 tl2_cnt;
  32. u8 tl1_cnt;
  33. bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
  34. };
  35. struct nicpf {
  36. struct pci_dev *pdev;
  37. struct hw_info *hw;
  38. u8 node;
  39. unsigned int flags;
  40. u8 num_vf_en; /* No of VF enabled */
  41. bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
  42. void __iomem *reg_base; /* Register start address */
  43. u8 num_sqs_en; /* Secondary qsets enabled */
  44. u64 nicvf[MAX_NUM_VFS_SUPPORTED];
  45. u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
  46. u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
  47. bool sqs_used[MAX_NUM_VFS_SUPPORTED];
  48. struct pkind_cfg pkind;
  49. #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
  50. #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
  51. #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
  52. u8 *vf_lmac_map;
  53. struct delayed_work dwork;
  54. struct workqueue_struct *check_link;
  55. u8 *link;
  56. u8 *duplex;
  57. u32 *speed;
  58. u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
  59. u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
  60. bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
  61. /* MSI-X */
  62. bool msix_enabled;
  63. u8 num_vec;
  64. struct msix_entry *msix_entries;
  65. bool irq_allocated[NIC_PF_MSIX_VECTORS];
  66. char irq_name[NIC_PF_MSIX_VECTORS][20];
  67. };
  68. /* Supported devices */
  69. static const struct pci_device_id nic_id_table[] = {
  70. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
  71. { 0, } /* end of table */
  72. };
  73. MODULE_AUTHOR("Sunil Goutham");
  74. MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
  75. MODULE_LICENSE("GPL v2");
  76. MODULE_VERSION(DRV_VERSION);
  77. MODULE_DEVICE_TABLE(pci, nic_id_table);
  78. /* The Cavium ThunderX network controller can *only* be found in SoCs
  79. * containing the ThunderX ARM64 CPU implementation. All accesses to the device
  80. * registers on this platform are implicitly strongly ordered with respect
  81. * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
  82. * with no memory barriers in this driver. The readq()/writeq() functions add
  83. * explicit ordering operation which in this case are redundant, and only
  84. * add overhead.
  85. */
  86. /* Register read/write APIs */
  87. static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
  88. {
  89. writeq_relaxed(val, nic->reg_base + offset);
  90. }
  91. static u64 nic_reg_read(struct nicpf *nic, u64 offset)
  92. {
  93. return readq_relaxed(nic->reg_base + offset);
  94. }
  95. /* PF -> VF mailbox communication APIs */
  96. static void nic_enable_mbx_intr(struct nicpf *nic)
  97. {
  98. int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
  99. #define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
  100. /* Clear it, to avoid spurious interrupts (if any) */
  101. nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));
  102. /* Enable mailbox interrupt for all VFs */
  103. nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
  104. /* One mailbox intr enable reg per 64 VFs */
  105. if (vf_cnt > 64) {
  106. nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
  107. INTR_MASK(vf_cnt - 64));
  108. nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
  109. INTR_MASK(vf_cnt - 64));
  110. }
  111. }
  112. static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
  113. {
  114. nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
  115. }
  116. static u64 nic_get_mbx_addr(int vf)
  117. {
  118. return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
  119. }
  120. /* Send a mailbox message to VF
  121. * @vf: vf to which this message to be sent
  122. * @mbx: Message to be sent
  123. */
  124. static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
  125. {
  126. void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
  127. u64 *msg = (u64 *)mbx;
  128. /* In first revision HW, mbox interrupt is triggerred
  129. * when PF writes to MBOX(1), in next revisions when
  130. * PF writes to MBOX(0)
  131. */
  132. if (pass1_silicon(nic->pdev)) {
  133. /* see the comment for nic_reg_write()/nic_reg_read()
  134. * functions above
  135. */
  136. writeq_relaxed(msg[0], mbx_addr);
  137. writeq_relaxed(msg[1], mbx_addr + 8);
  138. } else {
  139. writeq_relaxed(msg[1], mbx_addr + 8);
  140. writeq_relaxed(msg[0], mbx_addr);
  141. }
  142. }
  143. /* Responds to VF's READY message with VF's
  144. * ID, node, MAC address e.t.c
  145. * @vf: VF which sent READY message
  146. */
  147. static void nic_mbx_send_ready(struct nicpf *nic, int vf)
  148. {
  149. union nic_mbx mbx = {};
  150. int bgx_idx, lmac;
  151. const char *mac;
  152. mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
  153. mbx.nic_cfg.vf_id = vf;
  154. mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
  155. if (vf < nic->num_vf_en) {
  156. bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  157. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  158. mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
  159. if (mac)
  160. ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
  161. }
  162. mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
  163. mbx.nic_cfg.node_id = nic->node;
  164. mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
  165. nic_send_msg_to_vf(nic, vf, &mbx);
  166. }
  167. /* ACKs VF's mailbox message
  168. * @vf: VF to which ACK to be sent
  169. */
  170. static void nic_mbx_send_ack(struct nicpf *nic, int vf)
  171. {
  172. union nic_mbx mbx = {};
  173. mbx.msg.msg = NIC_MBOX_MSG_ACK;
  174. nic_send_msg_to_vf(nic, vf, &mbx);
  175. }
  176. /* NACKs VF's mailbox message that PF is not able to
  177. * complete the action
  178. * @vf: VF to which ACK to be sent
  179. */
  180. static void nic_mbx_send_nack(struct nicpf *nic, int vf)
  181. {
  182. union nic_mbx mbx = {};
  183. mbx.msg.msg = NIC_MBOX_MSG_NACK;
  184. nic_send_msg_to_vf(nic, vf, &mbx);
  185. }
  186. /* Flush all in flight receive packets to memory and
  187. * bring down an active RQ
  188. */
  189. static int nic_rcv_queue_sw_sync(struct nicpf *nic)
  190. {
  191. u16 timeout = ~0x00;
  192. nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
  193. /* Wait till sync cycle is finished */
  194. while (timeout) {
  195. if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
  196. break;
  197. timeout--;
  198. }
  199. nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
  200. if (!timeout) {
  201. dev_err(&nic->pdev->dev, "Receive queue software sync failed");
  202. return 1;
  203. }
  204. return 0;
  205. }
  206. /* Get BGX Rx/Tx stats and respond to VF's request */
  207. static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
  208. {
  209. int bgx_idx, lmac;
  210. union nic_mbx mbx = {};
  211. bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
  212. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
  213. mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
  214. mbx.bgx_stats.vf_id = bgx->vf_id;
  215. mbx.bgx_stats.rx = bgx->rx;
  216. mbx.bgx_stats.idx = bgx->idx;
  217. if (bgx->rx)
  218. mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
  219. lmac, bgx->idx);
  220. else
  221. mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
  222. lmac, bgx->idx);
  223. nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
  224. }
  225. /* Update hardware min/max frame size */
  226. static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
  227. {
  228. int bgx, lmac, lmac_cnt;
  229. u64 lmac_credits;
  230. if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
  231. return 1;
  232. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  233. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  234. lmac += bgx * MAX_LMAC_PER_BGX;
  235. new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
  236. /* Update corresponding LMAC credits */
  237. lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
  238. lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
  239. lmac_credits &= ~(0xFFFFFULL << 12);
  240. lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
  241. nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
  242. /* Enforce MTU in HW
  243. * This config is supported only from 88xx pass 2.0 onwards.
  244. */
  245. if (!pass1_silicon(nic->pdev))
  246. nic_reg_write(nic,
  247. NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
  248. return 0;
  249. }
  250. /* Set minimum transmit packet size */
  251. static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
  252. {
  253. int lmac, max_lmac;
  254. u16 sdevid;
  255. u64 lmac_cfg;
  256. /* There is a issue in HW where-in while sending GSO sized
  257. * pkts as part of TSO, if pkt len falls below this size
  258. * NIC will zero PAD packet and also updates IP total length.
  259. * Hence set this value to lessthan min pkt size of MAC+IP+TCP
  260. * headers, BGX will do the padding to transmit 64 byte pkt.
  261. */
  262. if (size > 52)
  263. size = 52;
  264. pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
  265. /* 81xx's RGX has only one LMAC */
  266. if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
  267. max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
  268. else
  269. max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
  270. for (lmac = 0; lmac < max_lmac; lmac++) {
  271. lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
  272. lmac_cfg &= ~(0xF << 2);
  273. lmac_cfg |= ((size / 4) << 2);
  274. nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
  275. }
  276. }
  277. /* Function to check number of LMACs present and set VF::LMAC mapping.
  278. * Mapping will be used while initializing channels.
  279. */
  280. static void nic_set_lmac_vf_mapping(struct nicpf *nic)
  281. {
  282. unsigned bgx_map = bgx_get_map(nic->node);
  283. int bgx, next_bgx_lmac = 0;
  284. int lmac, lmac_cnt = 0;
  285. u64 lmac_credit;
  286. nic->num_vf_en = 0;
  287. for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
  288. if (!(bgx_map & (1 << bgx)))
  289. continue;
  290. lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
  291. for (lmac = 0; lmac < lmac_cnt; lmac++)
  292. nic->vf_lmac_map[next_bgx_lmac++] =
  293. NIC_SET_VF_LMAC_MAP(bgx, lmac);
  294. nic->num_vf_en += lmac_cnt;
  295. /* Program LMAC credits */
  296. lmac_credit = (1ull << 1); /* channel credit enable */
  297. lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
  298. /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
  299. lmac_credit |= (((((48 * 1024) / lmac_cnt) -
  300. NIC_HW_MAX_FRS) / 16) << 12);
  301. lmac = bgx * MAX_LMAC_PER_BGX;
  302. for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
  303. nic_reg_write(nic,
  304. NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
  305. lmac_credit);
  306. /* On CN81XX there are only 8 VFs but max possible no of
  307. * interfaces are 9.
  308. */
  309. if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
  310. nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
  311. break;
  312. }
  313. }
  314. }
  315. static void nic_free_lmacmem(struct nicpf *nic)
  316. {
  317. kfree(nic->vf_lmac_map);
  318. kfree(nic->link);
  319. kfree(nic->duplex);
  320. kfree(nic->speed);
  321. }
  322. static int nic_get_hw_info(struct nicpf *nic)
  323. {
  324. u8 max_lmac;
  325. u16 sdevid;
  326. struct hw_info *hw = nic->hw;
  327. pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
  328. switch (sdevid) {
  329. case PCI_SUBSYS_DEVID_88XX_NIC_PF:
  330. hw->bgx_cnt = MAX_BGX_PER_CN88XX;
  331. hw->chans_per_lmac = 16;
  332. hw->chans_per_bgx = 128;
  333. hw->cpi_cnt = 2048;
  334. hw->rssi_cnt = 4096;
  335. hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
  336. hw->tl3_cnt = 256;
  337. hw->tl2_cnt = 64;
  338. hw->tl1_cnt = 2;
  339. hw->tl1_per_bgx = true;
  340. break;
  341. case PCI_SUBSYS_DEVID_81XX_NIC_PF:
  342. hw->bgx_cnt = MAX_BGX_PER_CN81XX;
  343. hw->chans_per_lmac = 8;
  344. hw->chans_per_bgx = 32;
  345. hw->chans_per_rgx = 8;
  346. hw->chans_per_lbk = 24;
  347. hw->cpi_cnt = 512;
  348. hw->rssi_cnt = 256;
  349. hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
  350. hw->tl3_cnt = 64;
  351. hw->tl2_cnt = 16;
  352. hw->tl1_cnt = 10;
  353. hw->tl1_per_bgx = false;
  354. break;
  355. case PCI_SUBSYS_DEVID_83XX_NIC_PF:
  356. hw->bgx_cnt = MAX_BGX_PER_CN83XX;
  357. hw->chans_per_lmac = 8;
  358. hw->chans_per_bgx = 32;
  359. hw->chans_per_lbk = 64;
  360. hw->cpi_cnt = 2048;
  361. hw->rssi_cnt = 1024;
  362. hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
  363. hw->tl3_cnt = 256;
  364. hw->tl2_cnt = 64;
  365. hw->tl1_cnt = 18;
  366. hw->tl1_per_bgx = false;
  367. break;
  368. }
  369. hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
  370. /* Allocate memory for LMAC tracking elements */
  371. max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX;
  372. nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
  373. if (!nic->vf_lmac_map)
  374. goto error;
  375. nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
  376. if (!nic->link)
  377. goto error;
  378. nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
  379. if (!nic->duplex)
  380. goto error;
  381. nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL);
  382. if (!nic->speed)
  383. goto error;
  384. return 0;
  385. error:
  386. nic_free_lmacmem(nic);
  387. return -ENOMEM;
  388. }
  389. #define BGX0_BLOCK 8
  390. #define BGX1_BLOCK 9
  391. static int nic_init_hw(struct nicpf *nic)
  392. {
  393. int i, err;
  394. u64 cqm_cfg;
  395. /* Get HW capability info */
  396. err = nic_get_hw_info(nic);
  397. if (err)
  398. return err;
  399. /* Enable NIC HW block */
  400. nic_reg_write(nic, NIC_PF_CFG, 0x3);
  401. /* Enable backpressure */
  402. nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
  403. /* TNS and TNS bypass modes are present only on 88xx */
  404. if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
  405. /* Disable TNS mode on both interfaces */
  406. nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
  407. (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
  408. nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
  409. (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
  410. }
  411. nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
  412. (1ULL << 63) | BGX0_BLOCK);
  413. nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
  414. (1ULL << 63) | BGX1_BLOCK);
  415. /* PKIND configuration */
  416. nic->pkind.minlen = 0;
  417. nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
  418. nic->pkind.lenerr_en = 1;
  419. nic->pkind.rx_hdr = 0;
  420. nic->pkind.hdr_sl = 0;
  421. for (i = 0; i < NIC_MAX_PKIND; i++)
  422. nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
  423. *(u64 *)&nic->pkind);
  424. nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
  425. /* Timer config */
  426. nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
  427. /* Enable VLAN ethertype matching and stripping */
  428. nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
  429. (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
  430. /* Check if HW expected value is higher (could be in future chips) */
  431. cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
  432. if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
  433. nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
  434. return 0;
  435. }
  436. /* Channel parse index configuration */
  437. static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
  438. {
  439. struct hw_info *hw = nic->hw;
  440. u32 vnic, bgx, lmac, chan;
  441. u32 padd, cpi_count = 0;
  442. u64 cpi_base, cpi, rssi_base, rssi;
  443. u8 qset, rq_idx = 0;
  444. vnic = cfg->vf_id;
  445. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
  446. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
  447. chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
  448. cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
  449. rssi_base = vnic * hw->rss_ind_tbl_size;
  450. /* Rx channel configuration */
  451. nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
  452. (1ull << 63) | (vnic << 0));
  453. nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
  454. ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
  455. if (cfg->cpi_alg == CPI_ALG_NONE)
  456. cpi_count = 1;
  457. else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
  458. cpi_count = 8;
  459. else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
  460. cpi_count = 16;
  461. else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
  462. cpi_count = NIC_MAX_CPI_PER_LMAC;
  463. /* RSS Qset, Qidx mapping */
  464. qset = cfg->vf_id;
  465. rssi = rssi_base;
  466. for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
  467. nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
  468. (qset << 3) | rq_idx);
  469. rq_idx++;
  470. }
  471. rssi = 0;
  472. cpi = cpi_base;
  473. for (; cpi < (cpi_base + cpi_count); cpi++) {
  474. /* Determine port to channel adder */
  475. if (cfg->cpi_alg != CPI_ALG_DIFF)
  476. padd = cpi % cpi_count;
  477. else
  478. padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
  479. /* Leave RSS_SIZE as '0' to disable RSS */
  480. if (pass1_silicon(nic->pdev)) {
  481. nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
  482. (vnic << 24) | (padd << 16) |
  483. (rssi_base + rssi));
  484. } else {
  485. /* Set MPI_ALG to '0' to disable MCAM parsing */
  486. nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
  487. (padd << 16));
  488. /* MPI index is same as CPI if MPI_ALG is not enabled */
  489. nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
  490. (vnic << 24) | (rssi_base + rssi));
  491. }
  492. if ((rssi + 1) >= cfg->rq_cnt)
  493. continue;
  494. if (cfg->cpi_alg == CPI_ALG_VLAN)
  495. rssi++;
  496. else if (cfg->cpi_alg == CPI_ALG_VLAN16)
  497. rssi = ((cpi - cpi_base) & 0xe) >> 1;
  498. else if (cfg->cpi_alg == CPI_ALG_DIFF)
  499. rssi = ((cpi - cpi_base) & 0x38) >> 3;
  500. }
  501. nic->cpi_base[cfg->vf_id] = cpi_base;
  502. nic->rssi_base[cfg->vf_id] = rssi_base;
  503. }
  504. /* Responsds to VF with its RSS indirection table size */
  505. static void nic_send_rss_size(struct nicpf *nic, int vf)
  506. {
  507. union nic_mbx mbx = {};
  508. u64 *msg;
  509. msg = (u64 *)&mbx;
  510. mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
  511. mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
  512. nic_send_msg_to_vf(nic, vf, &mbx);
  513. }
  514. /* Receive side scaling configuration
  515. * configure:
  516. * - RSS index
  517. * - indir table i.e hash::RQ mapping
  518. * - no of hash bits to consider
  519. */
  520. static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
  521. {
  522. u8 qset, idx = 0;
  523. u64 cpi_cfg, cpi_base, rssi_base, rssi;
  524. u64 idx_addr;
  525. rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
  526. rssi = rssi_base;
  527. qset = cfg->vf_id;
  528. for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
  529. u8 svf = cfg->ind_tbl[idx] >> 3;
  530. if (svf)
  531. qset = nic->vf_sqs[cfg->vf_id][svf - 1];
  532. else
  533. qset = cfg->vf_id;
  534. nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
  535. (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
  536. idx++;
  537. }
  538. cpi_base = nic->cpi_base[cfg->vf_id];
  539. if (pass1_silicon(nic->pdev))
  540. idx_addr = NIC_PF_CPI_0_2047_CFG;
  541. else
  542. idx_addr = NIC_PF_MPI_0_2047_CFG;
  543. cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
  544. cpi_cfg &= ~(0xFULL << 20);
  545. cpi_cfg |= (cfg->hash_bits << 20);
  546. nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
  547. }
  548. /* 4 level transmit side scheduler configutation
  549. * for TNS bypass mode
  550. *
  551. * Sample configuration for SQ0 on 88xx
  552. * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
  553. * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
  554. * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
  555. * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
  556. * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
  557. * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
  558. * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
  559. * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
  560. */
  561. static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
  562. struct sq_cfg_msg *sq)
  563. {
  564. struct hw_info *hw = nic->hw;
  565. u32 bgx, lmac, chan;
  566. u32 tl2, tl3, tl4;
  567. u32 rr_quantum;
  568. u8 sq_idx = sq->sq_num;
  569. u8 pqs_vnic;
  570. int svf;
  571. if (sq->sqs_mode)
  572. pqs_vnic = nic->pqs_vf[vnic];
  573. else
  574. pqs_vnic = vnic;
  575. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
  576. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
  577. /* 24 bytes for FCS, IPG and preamble */
  578. rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
  579. /* For 88xx 0-511 TL4 transmits via BGX0 and
  580. * 512-1023 TL4s transmit via BGX1.
  581. */
  582. if (hw->tl1_per_bgx) {
  583. tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
  584. if (!sq->sqs_mode) {
  585. tl4 += (lmac * MAX_QUEUES_PER_QSET);
  586. } else {
  587. for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
  588. if (nic->vf_sqs[pqs_vnic][svf] == vnic)
  589. break;
  590. }
  591. tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
  592. tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
  593. tl4 += (svf * MAX_QUEUES_PER_QSET);
  594. }
  595. } else {
  596. tl4 = (vnic * MAX_QUEUES_PER_QSET);
  597. }
  598. tl4 += sq_idx;
  599. tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
  600. nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
  601. ((u64)vnic << NIC_QS_ID_SHIFT) |
  602. ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
  603. nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
  604. ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
  605. nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
  606. /* On 88xx 0-127 channels are for BGX0 and
  607. * 127-255 channels for BGX1.
  608. *
  609. * On 81xx/83xx TL3_CHAN reg should be configured with channel
  610. * within LMAC i.e 0-7 and not the actual channel number like on 88xx
  611. */
  612. chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
  613. if (hw->tl1_per_bgx)
  614. nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
  615. else
  616. nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
  617. /* Enable backpressure on the channel */
  618. nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
  619. tl2 = tl3 >> 2;
  620. nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
  621. nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
  622. /* No priorities as of now */
  623. nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
  624. /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
  625. * on 81xx/83xx TL2 needs to be configured to transmit to one of the
  626. * possible LMACs.
  627. *
  628. * This register doesn't exist on 88xx.
  629. */
  630. if (!hw->tl1_per_bgx)
  631. nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
  632. lmac + (bgx * MAX_LMAC_PER_BGX));
  633. }
  634. /* Send primary nicvf pointer to secondary QS's VF */
  635. static void nic_send_pnicvf(struct nicpf *nic, int sqs)
  636. {
  637. union nic_mbx mbx = {};
  638. mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
  639. mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
  640. nic_send_msg_to_vf(nic, sqs, &mbx);
  641. }
  642. /* Send SQS's nicvf pointer to primary QS's VF */
  643. static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
  644. {
  645. union nic_mbx mbx = {};
  646. int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
  647. mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
  648. mbx.nicvf.sqs_id = nicvf->sqs_id;
  649. mbx.nicvf.nicvf = nic->nicvf[sqs_id];
  650. nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
  651. }
  652. /* Find next available Qset that can be assigned as a
  653. * secondary Qset to a VF.
  654. */
  655. static int nic_nxt_avail_sqs(struct nicpf *nic)
  656. {
  657. int sqs;
  658. for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
  659. if (!nic->sqs_used[sqs])
  660. nic->sqs_used[sqs] = true;
  661. else
  662. continue;
  663. return sqs + nic->num_vf_en;
  664. }
  665. return -1;
  666. }
  667. /* Allocate additional Qsets for requested VF */
  668. static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
  669. {
  670. union nic_mbx mbx = {};
  671. int idx, alloc_qs = 0;
  672. int sqs_id;
  673. if (!nic->num_sqs_en)
  674. goto send_mbox;
  675. for (idx = 0; idx < sqs->qs_count; idx++) {
  676. sqs_id = nic_nxt_avail_sqs(nic);
  677. if (sqs_id < 0)
  678. break;
  679. nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
  680. nic->pqs_vf[sqs_id] = sqs->vf_id;
  681. alloc_qs++;
  682. }
  683. send_mbox:
  684. mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
  685. mbx.sqs_alloc.vf_id = sqs->vf_id;
  686. mbx.sqs_alloc.qs_count = alloc_qs;
  687. nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
  688. }
  689. static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
  690. {
  691. int bgx_idx, lmac_idx;
  692. if (lbk->vf_id >= nic->num_vf_en)
  693. return -1;
  694. bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
  695. lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
  696. bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
  697. /* Enable moving average calculation.
  698. * Keep the LVL/AVG delay to HW enforced minimum so that, not too many
  699. * packets sneek in between average calculations.
  700. */
  701. nic_reg_write(nic, NIC_PF_CQ_AVG_CFG,
  702. (BIT_ULL(20) | 0x2ull << 14 | 0x1));
  703. nic_reg_write(nic, NIC_PF_RRM_AVG_CFG,
  704. (BIT_ULL(20) | 0x3ull << 14 | 0x1));
  705. return 0;
  706. }
  707. /* Reset statistics counters */
  708. static int nic_reset_stat_counters(struct nicpf *nic,
  709. int vf, struct reset_stat_cfg *cfg)
  710. {
  711. int i, stat, qnum;
  712. u64 reg_addr;
  713. for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
  714. if (cfg->rx_stat_mask & BIT(i)) {
  715. reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
  716. (vf << NIC_QS_ID_SHIFT) |
  717. (i << 3);
  718. nic_reg_write(nic, reg_addr, 0);
  719. }
  720. }
  721. for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
  722. if (cfg->tx_stat_mask & BIT(i)) {
  723. reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
  724. (vf << NIC_QS_ID_SHIFT) |
  725. (i << 3);
  726. nic_reg_write(nic, reg_addr, 0);
  727. }
  728. }
  729. for (i = 0; i <= 15; i++) {
  730. qnum = i >> 1;
  731. stat = i & 1 ? 1 : 0;
  732. reg_addr = (vf << NIC_QS_ID_SHIFT) |
  733. (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
  734. if (cfg->rq_stat_mask & BIT(i)) {
  735. reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
  736. nic_reg_write(nic, reg_addr, 0);
  737. }
  738. if (cfg->sq_stat_mask & BIT(i)) {
  739. reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
  740. nic_reg_write(nic, reg_addr, 0);
  741. }
  742. }
  743. return 0;
  744. }
  745. static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
  746. {
  747. u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
  748. u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
  749. (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;
  750. /* Configure tunnel parsing parameters */
  751. nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
  752. (1ULL << 63 | UDP_GENEVE_PORT_NUM));
  753. nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
  754. ((7ULL << 61) | prot_def));
  755. nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
  756. ((7ULL << 61) | prot_def));
  757. nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
  758. ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
  759. nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
  760. ((0xfULL << 60) | vxlan_prot_def));
  761. }
  762. static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
  763. {
  764. int bgx, lmac;
  765. nic->vf_enabled[vf] = enable;
  766. if (vf >= nic->num_vf_en)
  767. return;
  768. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  769. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  770. bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
  771. }
  772. static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
  773. {
  774. int bgx, lmac;
  775. struct pfc pfc;
  776. union nic_mbx mbx = {};
  777. if (vf >= nic->num_vf_en)
  778. return;
  779. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  780. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  781. if (cfg->get) {
  782. bgx_lmac_get_pfc(nic->node, bgx, lmac, &pfc);
  783. mbx.pfc.msg = NIC_MBOX_MSG_PFC;
  784. mbx.pfc.autoneg = pfc.autoneg;
  785. mbx.pfc.fc_rx = pfc.fc_rx;
  786. mbx.pfc.fc_tx = pfc.fc_tx;
  787. nic_send_msg_to_vf(nic, vf, &mbx);
  788. } else {
  789. bgx_lmac_set_pfc(nic->node, bgx, lmac, cfg);
  790. nic_mbx_send_ack(nic, vf);
  791. }
  792. }
  793. /* Interrupt handler to handle mailbox messages from VFs */
  794. static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
  795. {
  796. union nic_mbx mbx = {};
  797. u64 *mbx_data;
  798. u64 mbx_addr;
  799. u64 reg_addr;
  800. u64 cfg;
  801. int bgx, lmac;
  802. int i;
  803. int ret = 0;
  804. nic->mbx_lock[vf] = true;
  805. mbx_addr = nic_get_mbx_addr(vf);
  806. mbx_data = (u64 *)&mbx;
  807. for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
  808. *mbx_data = nic_reg_read(nic, mbx_addr);
  809. mbx_data++;
  810. mbx_addr += sizeof(u64);
  811. }
  812. dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
  813. __func__, mbx.msg.msg, vf);
  814. switch (mbx.msg.msg) {
  815. case NIC_MBOX_MSG_READY:
  816. nic_mbx_send_ready(nic, vf);
  817. if (vf < nic->num_vf_en) {
  818. nic->link[vf] = 0;
  819. nic->duplex[vf] = 0;
  820. nic->speed[vf] = 0;
  821. }
  822. goto unlock;
  823. case NIC_MBOX_MSG_QS_CFG:
  824. reg_addr = NIC_PF_QSET_0_127_CFG |
  825. (mbx.qs.num << NIC_QS_ID_SHIFT);
  826. cfg = mbx.qs.cfg;
  827. /* Check if its a secondary Qset */
  828. if (vf >= nic->num_vf_en) {
  829. cfg = cfg & (~0x7FULL);
  830. /* Assign this Qset to primary Qset's VF */
  831. cfg |= nic->pqs_vf[vf];
  832. }
  833. nic_reg_write(nic, reg_addr, cfg);
  834. break;
  835. case NIC_MBOX_MSG_RQ_CFG:
  836. reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
  837. (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
  838. (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
  839. nic_reg_write(nic, reg_addr, mbx.rq.cfg);
  840. /* Enable CQE_RX2_S extension in CQE_RX descriptor.
  841. * This gets appended by default on 81xx/83xx chips,
  842. * for consistency enabling the same on 88xx pass2
  843. * where this is introduced.
  844. */
  845. if (pass2_silicon(nic->pdev))
  846. nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
  847. if (!pass1_silicon(nic->pdev))
  848. nic_enable_tunnel_parsing(nic, vf);
  849. break;
  850. case NIC_MBOX_MSG_RQ_BP_CFG:
  851. reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
  852. (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
  853. (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
  854. nic_reg_write(nic, reg_addr, mbx.rq.cfg);
  855. break;
  856. case NIC_MBOX_MSG_RQ_SW_SYNC:
  857. ret = nic_rcv_queue_sw_sync(nic);
  858. break;
  859. case NIC_MBOX_MSG_RQ_DROP_CFG:
  860. reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
  861. (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
  862. (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
  863. nic_reg_write(nic, reg_addr, mbx.rq.cfg);
  864. break;
  865. case NIC_MBOX_MSG_SQ_CFG:
  866. reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
  867. (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
  868. (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
  869. nic_reg_write(nic, reg_addr, mbx.sq.cfg);
  870. nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
  871. break;
  872. case NIC_MBOX_MSG_SET_MAC:
  873. if (vf >= nic->num_vf_en) {
  874. ret = -1; /* NACK */
  875. break;
  876. }
  877. lmac = mbx.mac.vf_id;
  878. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
  879. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
  880. bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
  881. break;
  882. case NIC_MBOX_MSG_SET_MAX_FRS:
  883. ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
  884. mbx.frs.vf_id);
  885. break;
  886. case NIC_MBOX_MSG_CPI_CFG:
  887. nic_config_cpi(nic, &mbx.cpi_cfg);
  888. break;
  889. case NIC_MBOX_MSG_RSS_SIZE:
  890. nic_send_rss_size(nic, vf);
  891. goto unlock;
  892. case NIC_MBOX_MSG_RSS_CFG:
  893. case NIC_MBOX_MSG_RSS_CFG_CONT:
  894. nic_config_rss(nic, &mbx.rss_cfg);
  895. break;
  896. case NIC_MBOX_MSG_CFG_DONE:
  897. /* Last message of VF config msg sequence */
  898. nic_enable_vf(nic, vf, true);
  899. goto unlock;
  900. case NIC_MBOX_MSG_SHUTDOWN:
  901. /* First msg in VF teardown sequence */
  902. if (vf >= nic->num_vf_en)
  903. nic->sqs_used[vf - nic->num_vf_en] = false;
  904. nic->pqs_vf[vf] = 0;
  905. nic_enable_vf(nic, vf, false);
  906. break;
  907. case NIC_MBOX_MSG_ALLOC_SQS:
  908. nic_alloc_sqs(nic, &mbx.sqs_alloc);
  909. goto unlock;
  910. case NIC_MBOX_MSG_NICVF_PTR:
  911. nic->nicvf[vf] = mbx.nicvf.nicvf;
  912. break;
  913. case NIC_MBOX_MSG_PNICVF_PTR:
  914. nic_send_pnicvf(nic, vf);
  915. goto unlock;
  916. case NIC_MBOX_MSG_SNICVF_PTR:
  917. nic_send_snicvf(nic, &mbx.nicvf);
  918. goto unlock;
  919. case NIC_MBOX_MSG_BGX_STATS:
  920. nic_get_bgx_stats(nic, &mbx.bgx_stats);
  921. goto unlock;
  922. case NIC_MBOX_MSG_LOOPBACK:
  923. ret = nic_config_loopback(nic, &mbx.lbk);
  924. break;
  925. case NIC_MBOX_MSG_RESET_STAT_COUNTER:
  926. ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
  927. break;
  928. case NIC_MBOX_MSG_PFC:
  929. nic_pause_frame(nic, vf, &mbx.pfc);
  930. goto unlock;
  931. default:
  932. dev_err(&nic->pdev->dev,
  933. "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
  934. break;
  935. }
  936. if (!ret) {
  937. nic_mbx_send_ack(nic, vf);
  938. } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
  939. dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
  940. mbx.msg.msg, vf);
  941. nic_mbx_send_nack(nic, vf);
  942. }
  943. unlock:
  944. nic->mbx_lock[vf] = false;
  945. }
  946. static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
  947. {
  948. struct nicpf *nic = (struct nicpf *)nic_irq;
  949. int mbx;
  950. u64 intr;
  951. u8 vf, vf_per_mbx_reg = 64;
  952. if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
  953. mbx = 0;
  954. else
  955. mbx = 1;
  956. intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
  957. dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
  958. for (vf = 0; vf < vf_per_mbx_reg; vf++) {
  959. if (intr & (1ULL << vf)) {
  960. dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
  961. vf + (mbx * vf_per_mbx_reg));
  962. nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
  963. nic_clear_mbx_intr(nic, vf, mbx);
  964. }
  965. }
  966. return IRQ_HANDLED;
  967. }
  968. static int nic_enable_msix(struct nicpf *nic)
  969. {
  970. int i, ret;
  971. nic->num_vec = pci_msix_vec_count(nic->pdev);
  972. nic->msix_entries = kmalloc_array(nic->num_vec,
  973. sizeof(struct msix_entry),
  974. GFP_KERNEL);
  975. if (!nic->msix_entries)
  976. return -ENOMEM;
  977. for (i = 0; i < nic->num_vec; i++)
  978. nic->msix_entries[i].entry = i;
  979. ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
  980. if (ret) {
  981. dev_err(&nic->pdev->dev,
  982. "Request for #%d msix vectors failed, returned %d\n",
  983. nic->num_vec, ret);
  984. kfree(nic->msix_entries);
  985. return ret;
  986. }
  987. nic->msix_enabled = 1;
  988. return 0;
  989. }
  990. static void nic_disable_msix(struct nicpf *nic)
  991. {
  992. if (nic->msix_enabled) {
  993. pci_disable_msix(nic->pdev);
  994. kfree(nic->msix_entries);
  995. nic->msix_enabled = 0;
  996. nic->num_vec = 0;
  997. }
  998. }
  999. static void nic_free_all_interrupts(struct nicpf *nic)
  1000. {
  1001. int irq;
  1002. for (irq = 0; irq < nic->num_vec; irq++) {
  1003. if (nic->irq_allocated[irq])
  1004. free_irq(nic->msix_entries[irq].vector, nic);
  1005. nic->irq_allocated[irq] = false;
  1006. }
  1007. }
  1008. static int nic_register_interrupts(struct nicpf *nic)
  1009. {
  1010. int i, ret;
  1011. /* Enable MSI-X */
  1012. ret = nic_enable_msix(nic);
  1013. if (ret)
  1014. return ret;
  1015. /* Register mailbox interrupt handler */
  1016. for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
  1017. sprintf(nic->irq_name[i],
  1018. "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
  1019. ret = request_irq(nic->msix_entries[i].vector,
  1020. nic_mbx_intr_handler, 0,
  1021. nic->irq_name[i], nic);
  1022. if (ret)
  1023. goto fail;
  1024. nic->irq_allocated[i] = true;
  1025. }
  1026. /* Enable mailbox interrupt */
  1027. nic_enable_mbx_intr(nic);
  1028. return 0;
  1029. fail:
  1030. dev_err(&nic->pdev->dev, "Request irq failed\n");
  1031. nic_free_all_interrupts(nic);
  1032. nic_disable_msix(nic);
  1033. return ret;
  1034. }
  1035. static void nic_unregister_interrupts(struct nicpf *nic)
  1036. {
  1037. nic_free_all_interrupts(nic);
  1038. nic_disable_msix(nic);
  1039. }
  1040. static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
  1041. {
  1042. int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
  1043. u16 total_vf;
  1044. /* Secondary Qsets are needed only if CPU count is
  1045. * morethan MAX_QUEUES_PER_QSET.
  1046. */
  1047. if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
  1048. return 0;
  1049. /* Check if its a multi-node environment */
  1050. if (nr_node_ids > 1)
  1051. sqs_per_vf = MAX_SQS_PER_VF;
  1052. pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
  1053. pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
  1054. return min(total_vf - vf_en, vf_en * sqs_per_vf);
  1055. }
  1056. static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
  1057. {
  1058. int pos = 0;
  1059. int vf_en;
  1060. int err;
  1061. u16 total_vf_cnt;
  1062. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  1063. if (!pos) {
  1064. dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
  1065. return -ENODEV;
  1066. }
  1067. pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
  1068. if (total_vf_cnt < nic->num_vf_en)
  1069. nic->num_vf_en = total_vf_cnt;
  1070. if (!total_vf_cnt)
  1071. return 0;
  1072. vf_en = nic->num_vf_en;
  1073. nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
  1074. vf_en += nic->num_sqs_en;
  1075. err = pci_enable_sriov(pdev, vf_en);
  1076. if (err) {
  1077. dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
  1078. vf_en);
  1079. nic->num_vf_en = 0;
  1080. return err;
  1081. }
  1082. dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
  1083. vf_en);
  1084. nic->flags |= NIC_SRIOV_ENABLED;
  1085. return 0;
  1086. }
  1087. /* Poll for BGX LMAC link status and update corresponding VF
  1088. * if there is a change, valid only if internal L2 switch
  1089. * is not present otherwise VF link is always treated as up
  1090. */
  1091. static void nic_poll_for_link(struct work_struct *work)
  1092. {
  1093. union nic_mbx mbx = {};
  1094. struct nicpf *nic;
  1095. struct bgx_link_status link;
  1096. u8 vf, bgx, lmac;
  1097. nic = container_of(work, struct nicpf, dwork.work);
  1098. mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
  1099. for (vf = 0; vf < nic->num_vf_en; vf++) {
  1100. /* Poll only if VF is UP */
  1101. if (!nic->vf_enabled[vf])
  1102. continue;
  1103. /* Get BGX, LMAC indices for the VF */
  1104. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  1105. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  1106. /* Get interface link status */
  1107. bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
  1108. /* Inform VF only if link status changed */
  1109. if (nic->link[vf] == link.link_up)
  1110. continue;
  1111. if (!nic->mbx_lock[vf]) {
  1112. nic->link[vf] = link.link_up;
  1113. nic->duplex[vf] = link.duplex;
  1114. nic->speed[vf] = link.speed;
  1115. /* Send a mbox message to VF with current link status */
  1116. mbx.link_status.link_up = link.link_up;
  1117. mbx.link_status.duplex = link.duplex;
  1118. mbx.link_status.speed = link.speed;
  1119. mbx.link_status.mac_type = link.mac_type;
  1120. nic_send_msg_to_vf(nic, vf, &mbx);
  1121. }
  1122. }
  1123. queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
  1124. }
  1125. static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1126. {
  1127. struct device *dev = &pdev->dev;
  1128. struct nicpf *nic;
  1129. int err;
  1130. BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
  1131. nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
  1132. if (!nic)
  1133. return -ENOMEM;
  1134. nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
  1135. if (!nic->hw) {
  1136. devm_kfree(dev, nic);
  1137. return -ENOMEM;
  1138. }
  1139. pci_set_drvdata(pdev, nic);
  1140. nic->pdev = pdev;
  1141. err = pci_enable_device(pdev);
  1142. if (err) {
  1143. dev_err(dev, "Failed to enable PCI device\n");
  1144. pci_set_drvdata(pdev, NULL);
  1145. return err;
  1146. }
  1147. err = pci_request_regions(pdev, DRV_NAME);
  1148. if (err) {
  1149. dev_err(dev, "PCI request regions failed 0x%x\n", err);
  1150. goto err_disable_device;
  1151. }
  1152. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
  1153. if (err) {
  1154. dev_err(dev, "Unable to get usable DMA configuration\n");
  1155. goto err_release_regions;
  1156. }
  1157. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
  1158. if (err) {
  1159. dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
  1160. goto err_release_regions;
  1161. }
  1162. /* MAP PF's configuration registers */
  1163. nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
  1164. if (!nic->reg_base) {
  1165. dev_err(dev, "Cannot map config register space, aborting\n");
  1166. err = -ENOMEM;
  1167. goto err_release_regions;
  1168. }
  1169. nic->node = nic_get_node_id(pdev);
  1170. /* Initialize hardware */
  1171. err = nic_init_hw(nic);
  1172. if (err)
  1173. goto err_release_regions;
  1174. nic_set_lmac_vf_mapping(nic);
  1175. /* Register interrupts */
  1176. err = nic_register_interrupts(nic);
  1177. if (err)
  1178. goto err_release_regions;
  1179. /* Configure SRIOV */
  1180. err = nic_sriov_init(pdev, nic);
  1181. if (err)
  1182. goto err_unregister_interrupts;
  1183. /* Register a physical link status poll fn() */
  1184. nic->check_link = alloc_workqueue("check_link_status",
  1185. WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
  1186. if (!nic->check_link) {
  1187. err = -ENOMEM;
  1188. goto err_disable_sriov;
  1189. }
  1190. INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
  1191. queue_delayed_work(nic->check_link, &nic->dwork, 0);
  1192. return 0;
  1193. err_disable_sriov:
  1194. if (nic->flags & NIC_SRIOV_ENABLED)
  1195. pci_disable_sriov(pdev);
  1196. err_unregister_interrupts:
  1197. nic_unregister_interrupts(nic);
  1198. err_release_regions:
  1199. pci_release_regions(pdev);
  1200. err_disable_device:
  1201. nic_free_lmacmem(nic);
  1202. devm_kfree(dev, nic->hw);
  1203. devm_kfree(dev, nic);
  1204. pci_disable_device(pdev);
  1205. pci_set_drvdata(pdev, NULL);
  1206. return err;
  1207. }
  1208. static void nic_remove(struct pci_dev *pdev)
  1209. {
  1210. struct nicpf *nic = pci_get_drvdata(pdev);
  1211. if (nic->flags & NIC_SRIOV_ENABLED)
  1212. pci_disable_sriov(pdev);
  1213. if (nic->check_link) {
  1214. /* Destroy work Queue */
  1215. cancel_delayed_work_sync(&nic->dwork);
  1216. destroy_workqueue(nic->check_link);
  1217. }
  1218. nic_unregister_interrupts(nic);
  1219. pci_release_regions(pdev);
  1220. nic_free_lmacmem(nic);
  1221. devm_kfree(&pdev->dev, nic->hw);
  1222. devm_kfree(&pdev->dev, nic);
  1223. pci_disable_device(pdev);
  1224. pci_set_drvdata(pdev, NULL);
  1225. }
  1226. static struct pci_driver nic_driver = {
  1227. .name = DRV_NAME,
  1228. .id_table = nic_id_table,
  1229. .probe = nic_probe,
  1230. .remove = nic_remove,
  1231. };
  1232. static int __init nic_init_module(void)
  1233. {
  1234. pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
  1235. return pci_register_driver(&nic_driver);
  1236. }
  1237. static void __exit nic_cleanup_module(void)
  1238. {
  1239. pci_unregister_driver(&nic_driver);
  1240. }
  1241. module_init(nic_init_module);
  1242. module_exit(nic_cleanup_module);