nic.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #ifndef NIC_H
  9. #define NIC_H
  10. #include <linux/netdevice.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/pci.h>
  13. #include "thunder_bgx.h"
  14. /* PCI device IDs */
  15. #define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
  16. #define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
  17. #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
  18. #define PCI_DEVICE_ID_THUNDER_BGX 0xA026
  19. /* Subsystem device IDs */
  20. #define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
  21. #define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
  22. #define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
  23. #define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
  24. #define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
  25. #define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
  26. #define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
  27. /* PCI BAR nos */
  28. #define PCI_CFG_REG_BAR_NUM 0
  29. #define PCI_MSIX_REG_BAR_NUM 4
  30. /* NIC SRIOV VF count */
  31. #define MAX_NUM_VFS_SUPPORTED 128
  32. #define DEFAULT_NUM_VF_ENABLED 8
  33. #define NIC_TNS_BYPASS_MODE 0
  34. #define NIC_TNS_MODE 1
  35. /* NIC priv flags */
  36. #define NIC_SRIOV_ENABLED BIT(0)
  37. /* Min/Max packet size */
  38. #define NIC_HW_MIN_FRS 64
  39. #define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
  40. /* Max pkinds */
  41. #define NIC_MAX_PKIND 16
  42. /* Max when CPI_ALG is IP diffserv */
  43. #define NIC_MAX_CPI_PER_LMAC 64
  44. /* NIC VF Interrupts */
  45. #define NICVF_INTR_CQ 0
  46. #define NICVF_INTR_SQ 1
  47. #define NICVF_INTR_RBDR 2
  48. #define NICVF_INTR_PKT_DROP 3
  49. #define NICVF_INTR_TCP_TIMER 4
  50. #define NICVF_INTR_MBOX 5
  51. #define NICVF_INTR_QS_ERR 6
  52. #define NICVF_INTR_CQ_SHIFT 0
  53. #define NICVF_INTR_SQ_SHIFT 8
  54. #define NICVF_INTR_RBDR_SHIFT 16
  55. #define NICVF_INTR_PKT_DROP_SHIFT 20
  56. #define NICVF_INTR_TCP_TIMER_SHIFT 21
  57. #define NICVF_INTR_MBOX_SHIFT 22
  58. #define NICVF_INTR_QS_ERR_SHIFT 23
  59. #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
  60. #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
  61. #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
  62. #define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
  63. #define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
  64. #define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
  65. #define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
  66. /* MSI-X interrupts */
  67. #define NIC_PF_MSIX_VECTORS 10
  68. #define NIC_VF_MSIX_VECTORS 20
  69. #define NIC_PF_INTR_ID_ECC0_SBE 0
  70. #define NIC_PF_INTR_ID_ECC0_DBE 1
  71. #define NIC_PF_INTR_ID_ECC1_SBE 2
  72. #define NIC_PF_INTR_ID_ECC1_DBE 3
  73. #define NIC_PF_INTR_ID_ECC2_SBE 4
  74. #define NIC_PF_INTR_ID_ECC2_DBE 5
  75. #define NIC_PF_INTR_ID_ECC3_SBE 6
  76. #define NIC_PF_INTR_ID_ECC3_DBE 7
  77. #define NIC_PF_INTR_ID_MBOX0 8
  78. #define NIC_PF_INTR_ID_MBOX1 9
  79. /* Minimum FIFO level before all packets for the CQ are dropped
  80. *
  81. * This value ensures that once a packet has been "accepted"
  82. * for reception it will not get dropped due to non-availability
  83. * of CQ descriptor. An errata in HW mandates this value to be
  84. * atleast 0x100.
  85. */
  86. #define NICPF_CQM_MIN_DROP_LEVEL 0x100
  87. /* Global timer for CQ timer thresh interrupts
  88. * Calculated for SCLK of 700Mhz
  89. * value written should be a 1/16th of what is expected
  90. *
  91. * 1 tick per 0.025usec
  92. */
  93. #define NICPF_CLK_PER_INT_TICK 1
  94. /* Time to wait before we decide that a SQ is stuck.
  95. *
  96. * Since both pkt rx and tx notifications are done with same CQ,
  97. * when packets are being received at very high rate (eg: L2 forwarding)
  98. * then freeing transmitted skbs will be delayed and watchdog
  99. * will kick in, resetting interface. Hence keeping this value high.
  100. */
  101. #define NICVF_TX_TIMEOUT (50 * HZ)
  102. struct nicvf_cq_poll {
  103. struct nicvf *nicvf;
  104. u8 cq_idx; /* Completion queue index */
  105. struct napi_struct napi;
  106. };
  107. #define NIC_MAX_RSS_HASH_BITS 8
  108. #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
  109. #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
  110. struct nicvf_rss_info {
  111. bool enable;
  112. #define RSS_L2_EXTENDED_HASH_ENA BIT(0)
  113. #define RSS_IP_HASH_ENA BIT(1)
  114. #define RSS_TCP_HASH_ENA BIT(2)
  115. #define RSS_TCP_SYN_DIS BIT(3)
  116. #define RSS_UDP_HASH_ENA BIT(4)
  117. #define RSS_L4_EXTENDED_HASH_ENA BIT(5)
  118. #define RSS_ROCE_ENA BIT(6)
  119. #define RSS_L3_BI_DIRECTION_ENA BIT(7)
  120. #define RSS_L4_BI_DIRECTION_ENA BIT(8)
  121. u64 cfg;
  122. u8 hash_bits;
  123. u16 rss_size;
  124. u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
  125. u64 key[RSS_HASH_KEY_SIZE];
  126. } ____cacheline_aligned_in_smp;
  127. enum rx_stats_reg_offset {
  128. RX_OCTS = 0x0,
  129. RX_UCAST = 0x1,
  130. RX_BCAST = 0x2,
  131. RX_MCAST = 0x3,
  132. RX_RED = 0x4,
  133. RX_RED_OCTS = 0x5,
  134. RX_ORUN = 0x6,
  135. RX_ORUN_OCTS = 0x7,
  136. RX_FCS = 0x8,
  137. RX_L2ERR = 0x9,
  138. RX_DRP_BCAST = 0xa,
  139. RX_DRP_MCAST = 0xb,
  140. RX_DRP_L3BCAST = 0xc,
  141. RX_DRP_L3MCAST = 0xd,
  142. RX_STATS_ENUM_LAST,
  143. };
  144. enum tx_stats_reg_offset {
  145. TX_OCTS = 0x0,
  146. TX_UCAST = 0x1,
  147. TX_BCAST = 0x2,
  148. TX_MCAST = 0x3,
  149. TX_DROP = 0x4,
  150. TX_STATS_ENUM_LAST,
  151. };
  152. struct nicvf_hw_stats {
  153. u64 rx_bytes;
  154. u64 rx_frames;
  155. u64 rx_ucast_frames;
  156. u64 rx_bcast_frames;
  157. u64 rx_mcast_frames;
  158. u64 rx_drops;
  159. u64 rx_drop_red;
  160. u64 rx_drop_red_bytes;
  161. u64 rx_drop_overrun;
  162. u64 rx_drop_overrun_bytes;
  163. u64 rx_drop_bcast;
  164. u64 rx_drop_mcast;
  165. u64 rx_drop_l3_bcast;
  166. u64 rx_drop_l3_mcast;
  167. u64 rx_fcs_errors;
  168. u64 rx_l2_errors;
  169. u64 tx_bytes;
  170. u64 tx_frames;
  171. u64 tx_ucast_frames;
  172. u64 tx_bcast_frames;
  173. u64 tx_mcast_frames;
  174. u64 tx_drops;
  175. };
  176. struct nicvf_drv_stats {
  177. /* CQE Rx errs */
  178. u64 rx_bgx_truncated_pkts;
  179. u64 rx_jabber_errs;
  180. u64 rx_fcs_errs;
  181. u64 rx_bgx_errs;
  182. u64 rx_prel2_errs;
  183. u64 rx_l2_hdr_malformed;
  184. u64 rx_oversize;
  185. u64 rx_undersize;
  186. u64 rx_l2_len_mismatch;
  187. u64 rx_l2_pclp;
  188. u64 rx_ip_ver_errs;
  189. u64 rx_ip_csum_errs;
  190. u64 rx_ip_hdr_malformed;
  191. u64 rx_ip_payload_malformed;
  192. u64 rx_ip_ttl_errs;
  193. u64 rx_l3_pclp;
  194. u64 rx_l4_malformed;
  195. u64 rx_l4_csum_errs;
  196. u64 rx_udp_len_errs;
  197. u64 rx_l4_port_errs;
  198. u64 rx_tcp_flag_errs;
  199. u64 rx_tcp_offset_errs;
  200. u64 rx_l4_pclp;
  201. u64 rx_truncated_pkts;
  202. /* CQE Tx errs */
  203. u64 tx_desc_fault;
  204. u64 tx_hdr_cons_err;
  205. u64 tx_subdesc_err;
  206. u64 tx_max_size_exceeded;
  207. u64 tx_imm_size_oflow;
  208. u64 tx_data_seq_err;
  209. u64 tx_mem_seq_err;
  210. u64 tx_lock_viol;
  211. u64 tx_data_fault;
  212. u64 tx_tstmp_conflict;
  213. u64 tx_tstmp_timeout;
  214. u64 tx_mem_fault;
  215. u64 tx_csum_overlap;
  216. u64 tx_csum_overflow;
  217. /* driver debug stats */
  218. u64 rcv_buffer_alloc_failures;
  219. u64 tx_tso;
  220. u64 tx_timeout;
  221. u64 txq_stop;
  222. u64 txq_wake;
  223. struct u64_stats_sync syncp;
  224. };
  225. struct nicvf {
  226. struct nicvf *pnicvf;
  227. struct net_device *netdev;
  228. struct pci_dev *pdev;
  229. void __iomem *reg_base;
  230. #define MAX_QUEUES_PER_QSET 8
  231. struct queue_set *qs;
  232. struct nicvf_cq_poll *napi[8];
  233. u8 vf_id;
  234. u8 sqs_id;
  235. bool sqs_mode;
  236. bool hw_tso;
  237. bool t88;
  238. /* Receive buffer alloc */
  239. u32 rb_page_offset;
  240. u16 rb_pageref;
  241. bool rb_alloc_fail;
  242. bool rb_work_scheduled;
  243. struct page *rb_page;
  244. struct delayed_work rbdr_work;
  245. struct tasklet_struct rbdr_task;
  246. /* Secondary Qset */
  247. u8 sqs_count;
  248. #define MAX_SQS_PER_VF_SINGLE_NODE 5
  249. #define MAX_SQS_PER_VF 11
  250. struct nicvf *snicvf[MAX_SQS_PER_VF];
  251. /* Queue count */
  252. u8 rx_queues;
  253. u8 tx_queues;
  254. u8 max_queues;
  255. u8 node;
  256. u8 cpi_alg;
  257. bool link_up;
  258. u8 duplex;
  259. u32 speed;
  260. bool tns_mode;
  261. bool loopback_supported;
  262. struct nicvf_rss_info rss_info;
  263. struct tasklet_struct qs_err_task;
  264. struct work_struct reset_task;
  265. /* Interrupt coalescing settings */
  266. u32 cq_coalesce_usecs;
  267. u32 msg_enable;
  268. /* Stats */
  269. struct nicvf_hw_stats hw_stats;
  270. struct nicvf_drv_stats __percpu *drv_stats;
  271. struct bgx_stats bgx_stats;
  272. /* MSI-X */
  273. bool msix_enabled;
  274. u8 num_vec;
  275. struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
  276. char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
  277. bool irq_allocated[NIC_VF_MSIX_VECTORS];
  278. cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
  279. /* VF <-> PF mailbox communication */
  280. bool pf_acked;
  281. bool pf_nacked;
  282. bool set_mac_pending;
  283. } ____cacheline_aligned_in_smp;
  284. /* PF <--> VF Mailbox communication
  285. * Eight 64bit registers are shared between PF and VF.
  286. * Separate set for each VF.
  287. * Writing '1' into last register mbx7 means end of message.
  288. */
  289. /* PF <--> VF mailbox communication */
  290. #define NIC_PF_VF_MAILBOX_SIZE 2
  291. #define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
  292. /* Mailbox message types */
  293. #define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
  294. #define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
  295. #define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
  296. #define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
  297. #define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
  298. #define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
  299. #define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
  300. #define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
  301. #define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
  302. #define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
  303. #define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
  304. #define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
  305. #define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
  306. #define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
  307. #define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
  308. #define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
  309. #define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
  310. #define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
  311. #define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
  312. #define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
  313. #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
  314. #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
  315. #define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
  316. #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
  317. #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
  318. struct nic_cfg_msg {
  319. u8 msg;
  320. u8 vf_id;
  321. u8 node_id;
  322. u8 tns_mode:1;
  323. u8 sqs_mode:1;
  324. u8 loopback_supported:1;
  325. u8 mac_addr[ETH_ALEN];
  326. };
  327. /* Qset configuration */
  328. struct qs_cfg_msg {
  329. u8 msg;
  330. u8 num;
  331. u8 sqs_count;
  332. u64 cfg;
  333. };
  334. /* Receive queue configuration */
  335. struct rq_cfg_msg {
  336. u8 msg;
  337. u8 qs_num;
  338. u8 rq_num;
  339. u64 cfg;
  340. };
  341. /* Send queue configuration */
  342. struct sq_cfg_msg {
  343. u8 msg;
  344. u8 qs_num;
  345. u8 sq_num;
  346. bool sqs_mode;
  347. u64 cfg;
  348. };
  349. /* Set VF's MAC address */
  350. struct set_mac_msg {
  351. u8 msg;
  352. u8 vf_id;
  353. u8 mac_addr[ETH_ALEN];
  354. };
  355. /* Set Maximum frame size */
  356. struct set_frs_msg {
  357. u8 msg;
  358. u8 vf_id;
  359. u16 max_frs;
  360. };
  361. /* Set CPI algorithm type */
  362. struct cpi_cfg_msg {
  363. u8 msg;
  364. u8 vf_id;
  365. u8 rq_cnt;
  366. u8 cpi_alg;
  367. };
  368. /* Get RSS table size */
  369. struct rss_sz_msg {
  370. u8 msg;
  371. u8 vf_id;
  372. u16 ind_tbl_size;
  373. };
  374. /* Set RSS configuration */
  375. struct rss_cfg_msg {
  376. u8 msg;
  377. u8 vf_id;
  378. u8 hash_bits;
  379. u8 tbl_len;
  380. u8 tbl_offset;
  381. #define RSS_IND_TBL_LEN_PER_MBX_MSG 8
  382. u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
  383. };
  384. struct bgx_stats_msg {
  385. u8 msg;
  386. u8 vf_id;
  387. u8 rx;
  388. u8 idx;
  389. u64 stats;
  390. };
  391. /* Physical interface link status */
  392. struct bgx_link_status {
  393. u8 msg;
  394. u8 link_up;
  395. u8 duplex;
  396. u32 speed;
  397. };
  398. /* Get Extra Qset IDs */
  399. struct sqs_alloc {
  400. u8 msg;
  401. u8 vf_id;
  402. u8 qs_count;
  403. };
  404. struct nicvf_ptr {
  405. u8 msg;
  406. u8 vf_id;
  407. bool sqs_mode;
  408. u8 sqs_id;
  409. u64 nicvf;
  410. };
  411. /* Set interface in loopback mode */
  412. struct set_loopback {
  413. u8 msg;
  414. u8 vf_id;
  415. bool enable;
  416. };
  417. /* Reset statistics counters */
  418. struct reset_stat_cfg {
  419. u8 msg;
  420. /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
  421. u16 rx_stat_mask;
  422. /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
  423. u8 tx_stat_mask;
  424. /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
  425. * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
  426. * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
  427. * ..
  428. * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
  429. * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
  430. */
  431. u16 rq_stat_mask;
  432. /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
  433. * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
  434. * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
  435. * ..
  436. * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
  437. * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
  438. */
  439. u16 sq_stat_mask;
  440. };
  441. /* 128 bit shared memory between PF and each VF */
  442. union nic_mbx {
  443. struct { u8 msg; } msg;
  444. struct nic_cfg_msg nic_cfg;
  445. struct qs_cfg_msg qs;
  446. struct rq_cfg_msg rq;
  447. struct sq_cfg_msg sq;
  448. struct set_mac_msg mac;
  449. struct set_frs_msg frs;
  450. struct cpi_cfg_msg cpi_cfg;
  451. struct rss_sz_msg rss_size;
  452. struct rss_cfg_msg rss_cfg;
  453. struct bgx_stats_msg bgx_stats;
  454. struct bgx_link_status link_status;
  455. struct sqs_alloc sqs_alloc;
  456. struct nicvf_ptr nicvf;
  457. struct set_loopback lbk;
  458. struct reset_stat_cfg reset_stat;
  459. };
  460. #define NIC_NODE_ID_MASK 0x03
  461. #define NIC_NODE_ID_SHIFT 44
  462. static inline int nic_get_node_id(struct pci_dev *pdev)
  463. {
  464. u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
  465. return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
  466. }
  467. static inline bool pass1_silicon(struct pci_dev *pdev)
  468. {
  469. return (pdev->revision < 8) &&
  470. (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
  471. }
  472. static inline bool pass2_silicon(struct pci_dev *pdev)
  473. {
  474. return (pdev->revision >= 8) &&
  475. (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
  476. }
  477. int nicvf_set_real_num_queues(struct net_device *netdev,
  478. int tx_queues, int rx_queues);
  479. int nicvf_open(struct net_device *netdev);
  480. int nicvf_stop(struct net_device *netdev);
  481. int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
  482. void nicvf_config_rss(struct nicvf *nic);
  483. void nicvf_set_rss_key(struct nicvf *nic);
  484. void nicvf_set_ethtool_ops(struct net_device *netdev);
  485. void nicvf_update_stats(struct nicvf *nic);
  486. void nicvf_update_lmac_stats(struct nicvf *nic);
  487. #endif /* NIC_H */