nic.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/delay.h>
  12. #include <linux/pci.h>
  13. #include <linux/module.h>
  14. #include <linux/seq_file.h>
  15. #include "net_driver.h"
  16. #include "bitfield.h"
  17. #include "efx.h"
  18. #include "nic.h"
  19. #include "regs.h"
  20. #include "io.h"
  21. #include "workarounds.h"
  22. /**************************************************************************
  23. *
  24. * Configurable values
  25. *
  26. **************************************************************************
  27. */
  28. /* This is set to 16 for a good reason. In summary, if larger than
  29. * 16, the descriptor cache holds more than a default socket
  30. * buffer's worth of packets (for UDP we can only have at most one
  31. * socket buffer's worth outstanding). This combined with the fact
  32. * that we only get 1 TX event per descriptor cache means the NIC
  33. * goes idle.
  34. */
  35. #define TX_DC_ENTRIES 16
  36. #define TX_DC_ENTRIES_ORDER 1
  37. #define RX_DC_ENTRIES 64
  38. #define RX_DC_ENTRIES_ORDER 3
  39. /* If EFX_MAX_INT_ERRORS internal errors occur within
  40. * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  41. * disable it.
  42. */
  43. #define EFX_INT_ERROR_EXPIRE 3600
  44. #define EFX_MAX_INT_ERRORS 5
  45. /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
  46. */
  47. #define EFX_FLUSH_INTERVAL 10
  48. #define EFX_FLUSH_POLL_COUNT 100
  49. /* Size and alignment of special buffers (4KB) */
  50. #define EFX_BUF_SIZE 4096
  51. /* Depth of RX flush request fifo */
  52. #define EFX_RX_FLUSH_COUNT 4
  53. /* Generated event code for efx_generate_test_event() */
  54. #define EFX_CHANNEL_MAGIC_TEST(_channel) \
  55. (0x00010100 + (_channel)->channel)
  56. /* Generated event code for efx_generate_fill_event() */
  57. #define EFX_CHANNEL_MAGIC_FILL(_channel) \
  58. (0x00010200 + (_channel)->channel)
  59. /**************************************************************************
  60. *
  61. * Solarstorm hardware access
  62. *
  63. **************************************************************************/
  64. static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
  65. unsigned int index)
  66. {
  67. efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  68. value, index);
  69. }
  70. /* Read the current event from the event queue */
  71. static inline efx_qword_t *efx_event(struct efx_channel *channel,
  72. unsigned int index)
  73. {
  74. return ((efx_qword_t *) (channel->eventq.addr)) +
  75. (index & channel->eventq_mask);
  76. }
  77. /* See if an event is present
  78. *
  79. * We check both the high and low dword of the event for all ones. We
  80. * wrote all ones when we cleared the event, and no valid event can
  81. * have all ones in either its high or low dwords. This approach is
  82. * robust against reordering.
  83. *
  84. * Note that using a single 64-bit comparison is incorrect; even
  85. * though the CPU read will be atomic, the DMA write may not be.
  86. */
  87. static inline int efx_event_present(efx_qword_t *event)
  88. {
  89. return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
  90. EFX_DWORD_IS_ALL_ONES(event->dword[1]));
  91. }
  92. static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
  93. const efx_oword_t *mask)
  94. {
  95. return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  96. ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  97. }
  98. int efx_nic_test_registers(struct efx_nic *efx,
  99. const struct efx_nic_register_test *regs,
  100. size_t n_regs)
  101. {
  102. unsigned address = 0, i, j;
  103. efx_oword_t mask, imask, original, reg, buf;
  104. /* Falcon should be in loopback to isolate the XMAC from the PHY */
  105. WARN_ON(!LOOPBACK_INTERNAL(efx));
  106. for (i = 0; i < n_regs; ++i) {
  107. address = regs[i].address;
  108. mask = imask = regs[i].mask;
  109. EFX_INVERT_OWORD(imask);
  110. efx_reado(efx, &original, address);
  111. /* bit sweep on and off */
  112. for (j = 0; j < 128; j++) {
  113. if (!EFX_EXTRACT_OWORD32(mask, j, j))
  114. continue;
  115. /* Test this testable bit can be set in isolation */
  116. EFX_AND_OWORD(reg, original, mask);
  117. EFX_SET_OWORD32(reg, j, j, 1);
  118. efx_writeo(efx, &reg, address);
  119. efx_reado(efx, &buf, address);
  120. if (efx_masked_compare_oword(&reg, &buf, &mask))
  121. goto fail;
  122. /* Test this testable bit can be cleared in isolation */
  123. EFX_OR_OWORD(reg, original, mask);
  124. EFX_SET_OWORD32(reg, j, j, 0);
  125. efx_writeo(efx, &reg, address);
  126. efx_reado(efx, &buf, address);
  127. if (efx_masked_compare_oword(&reg, &buf, &mask))
  128. goto fail;
  129. }
  130. efx_writeo(efx, &original, address);
  131. }
  132. return 0;
  133. fail:
  134. netif_err(efx, hw, efx->net_dev,
  135. "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
  136. " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
  137. EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
  138. return -EIO;
  139. }
  140. /**************************************************************************
  141. *
  142. * Special buffer handling
  143. * Special buffers are used for event queues and the TX and RX
  144. * descriptor rings.
  145. *
  146. *************************************************************************/
  147. /*
  148. * Initialise a special buffer
  149. *
  150. * This will define a buffer (previously allocated via
  151. * efx_alloc_special_buffer()) in the buffer table, allowing
  152. * it to be used for event queues, descriptor rings etc.
  153. */
  154. static void
  155. efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  156. {
  157. efx_qword_t buf_desc;
  158. int index;
  159. dma_addr_t dma_addr;
  160. int i;
  161. EFX_BUG_ON_PARANOID(!buffer->addr);
  162. /* Write buffer descriptors to NIC */
  163. for (i = 0; i < buffer->entries; i++) {
  164. index = buffer->index + i;
  165. dma_addr = buffer->dma_addr + (i * 4096);
  166. netif_dbg(efx, probe, efx->net_dev,
  167. "mapping special buffer %d at %llx\n",
  168. index, (unsigned long long)dma_addr);
  169. EFX_POPULATE_QWORD_3(buf_desc,
  170. FRF_AZ_BUF_ADR_REGION, 0,
  171. FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
  172. FRF_AZ_BUF_OWNER_ID_FBUF, 0);
  173. efx_write_buf_tbl(efx, &buf_desc, index);
  174. }
  175. }
  176. /* Unmaps a buffer and clears the buffer table entries */
  177. static void
  178. efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  179. {
  180. efx_oword_t buf_tbl_upd;
  181. unsigned int start = buffer->index;
  182. unsigned int end = (buffer->index + buffer->entries - 1);
  183. if (!buffer->entries)
  184. return;
  185. netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
  186. buffer->index, buffer->index + buffer->entries - 1);
  187. EFX_POPULATE_OWORD_4(buf_tbl_upd,
  188. FRF_AZ_BUF_UPD_CMD, 0,
  189. FRF_AZ_BUF_CLR_CMD, 1,
  190. FRF_AZ_BUF_CLR_END_ID, end,
  191. FRF_AZ_BUF_CLR_START_ID, start);
  192. efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
  193. }
  194. /*
  195. * Allocate a new special buffer
  196. *
  197. * This allocates memory for a new buffer, clears it and allocates a
  198. * new buffer ID range. It does not write into the buffer table.
  199. *
  200. * This call will allocate 4KB buffers, since 8KB buffers can't be
  201. * used for event queues and descriptor rings.
  202. */
  203. static int efx_alloc_special_buffer(struct efx_nic *efx,
  204. struct efx_special_buffer *buffer,
  205. unsigned int len)
  206. {
  207. len = ALIGN(len, EFX_BUF_SIZE);
  208. buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
  209. &buffer->dma_addr, GFP_KERNEL);
  210. if (!buffer->addr)
  211. return -ENOMEM;
  212. buffer->len = len;
  213. buffer->entries = len / EFX_BUF_SIZE;
  214. BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
  215. /* All zeros is a potentially valid event so memset to 0xff */
  216. memset(buffer->addr, 0xff, len);
  217. /* Select new buffer ID */
  218. buffer->index = efx->next_buffer_table;
  219. efx->next_buffer_table += buffer->entries;
  220. netif_dbg(efx, probe, efx->net_dev,
  221. "allocating special buffers %d-%d at %llx+%x "
  222. "(virt %p phys %llx)\n", buffer->index,
  223. buffer->index + buffer->entries - 1,
  224. (u64)buffer->dma_addr, len,
  225. buffer->addr, (u64)virt_to_phys(buffer->addr));
  226. return 0;
  227. }
  228. static void
  229. efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  230. {
  231. if (!buffer->addr)
  232. return;
  233. netif_dbg(efx, hw, efx->net_dev,
  234. "deallocating special buffers %d-%d at %llx+%x "
  235. "(virt %p phys %llx)\n", buffer->index,
  236. buffer->index + buffer->entries - 1,
  237. (u64)buffer->dma_addr, buffer->len,
  238. buffer->addr, (u64)virt_to_phys(buffer->addr));
  239. dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
  240. buffer->dma_addr);
  241. buffer->addr = NULL;
  242. buffer->entries = 0;
  243. }
  244. /**************************************************************************
  245. *
  246. * Generic buffer handling
  247. * These buffers are used for interrupt status and MAC stats
  248. *
  249. **************************************************************************/
  250. int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
  251. unsigned int len)
  252. {
  253. buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
  254. &buffer->dma_addr);
  255. if (!buffer->addr)
  256. return -ENOMEM;
  257. buffer->len = len;
  258. memset(buffer->addr, 0, len);
  259. return 0;
  260. }
  261. void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
  262. {
  263. if (buffer->addr) {
  264. pci_free_consistent(efx->pci_dev, buffer->len,
  265. buffer->addr, buffer->dma_addr);
  266. buffer->addr = NULL;
  267. }
  268. }
  269. /**************************************************************************
  270. *
  271. * TX path
  272. *
  273. **************************************************************************/
  274. /* Returns a pointer to the specified transmit descriptor in the TX
  275. * descriptor queue belonging to the specified channel.
  276. */
  277. static inline efx_qword_t *
  278. efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
  279. {
  280. return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
  281. }
  282. /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
  283. static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
  284. {
  285. unsigned write_ptr;
  286. efx_dword_t reg;
  287. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  288. EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
  289. efx_writed_page(tx_queue->efx, &reg,
  290. FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
  291. }
  292. /* Write pointer and first descriptor for TX descriptor ring */
  293. static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
  294. const efx_qword_t *txd)
  295. {
  296. unsigned write_ptr;
  297. efx_oword_t reg;
  298. BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
  299. BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
  300. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  301. EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
  302. FRF_AZ_TX_DESC_WPTR, write_ptr);
  303. reg.qword[0] = *txd;
  304. efx_writeo_page(tx_queue->efx, &reg,
  305. FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
  306. }
  307. static inline bool
  308. efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
  309. {
  310. unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
  311. if (empty_read_count == 0)
  312. return false;
  313. tx_queue->empty_read_count = 0;
  314. return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
  315. }
  316. /* For each entry inserted into the software descriptor ring, create a
  317. * descriptor in the hardware TX descriptor ring (in host memory), and
  318. * write a doorbell.
  319. */
  320. void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
  321. {
  322. struct efx_tx_buffer *buffer;
  323. efx_qword_t *txd;
  324. unsigned write_ptr;
  325. unsigned old_write_count = tx_queue->write_count;
  326. BUG_ON(tx_queue->write_count == tx_queue->insert_count);
  327. do {
  328. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  329. buffer = &tx_queue->buffer[write_ptr];
  330. txd = efx_tx_desc(tx_queue, write_ptr);
  331. ++tx_queue->write_count;
  332. /* Create TX descriptor ring entry */
  333. EFX_POPULATE_QWORD_4(*txd,
  334. FSF_AZ_TX_KER_CONT, buffer->continuation,
  335. FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
  336. FSF_AZ_TX_KER_BUF_REGION, 0,
  337. FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
  338. } while (tx_queue->write_count != tx_queue->insert_count);
  339. wmb(); /* Ensure descriptors are written before they are fetched */
  340. if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
  341. txd = efx_tx_desc(tx_queue,
  342. old_write_count & tx_queue->ptr_mask);
  343. efx_push_tx_desc(tx_queue, txd);
  344. ++tx_queue->pushes;
  345. } else {
  346. efx_notify_tx_desc(tx_queue);
  347. }
  348. }
  349. /* Allocate hardware resources for a TX queue */
  350. int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
  351. {
  352. struct efx_nic *efx = tx_queue->efx;
  353. unsigned entries;
  354. entries = tx_queue->ptr_mask + 1;
  355. return efx_alloc_special_buffer(efx, &tx_queue->txd,
  356. entries * sizeof(efx_qword_t));
  357. }
  358. void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
  359. {
  360. struct efx_nic *efx = tx_queue->efx;
  361. efx_oword_t reg;
  362. tx_queue->flushed = FLUSH_NONE;
  363. /* Pin TX descriptor ring */
  364. efx_init_special_buffer(efx, &tx_queue->txd);
  365. /* Push TX descriptor ring to card */
  366. EFX_POPULATE_OWORD_10(reg,
  367. FRF_AZ_TX_DESCQ_EN, 1,
  368. FRF_AZ_TX_ISCSI_DDIG_EN, 0,
  369. FRF_AZ_TX_ISCSI_HDIG_EN, 0,
  370. FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
  371. FRF_AZ_TX_DESCQ_EVQ_ID,
  372. tx_queue->channel->channel,
  373. FRF_AZ_TX_DESCQ_OWNER_ID, 0,
  374. FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
  375. FRF_AZ_TX_DESCQ_SIZE,
  376. __ffs(tx_queue->txd.entries),
  377. FRF_AZ_TX_DESCQ_TYPE, 0,
  378. FRF_BZ_TX_NON_IP_DROP_DIS, 1);
  379. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  380. int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
  381. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
  382. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
  383. !csum);
  384. }
  385. efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
  386. tx_queue->queue);
  387. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
  388. /* Only 128 bits in this register */
  389. BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
  390. efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
  391. if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
  392. clear_bit_le(tx_queue->queue, (void *)&reg);
  393. else
  394. set_bit_le(tx_queue->queue, (void *)&reg);
  395. efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
  396. }
  397. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  398. EFX_POPULATE_OWORD_1(reg,
  399. FRF_BZ_TX_PACE,
  400. (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
  401. FFE_BZ_TX_PACE_OFF :
  402. FFE_BZ_TX_PACE_RESERVED);
  403. efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
  404. tx_queue->queue);
  405. }
  406. }
  407. static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
  408. {
  409. struct efx_nic *efx = tx_queue->efx;
  410. efx_oword_t tx_flush_descq;
  411. tx_queue->flushed = FLUSH_PENDING;
  412. /* Post a flush command */
  413. EFX_POPULATE_OWORD_2(tx_flush_descq,
  414. FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
  415. FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
  416. efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
  417. }
  418. void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
  419. {
  420. struct efx_nic *efx = tx_queue->efx;
  421. efx_oword_t tx_desc_ptr;
  422. /* The queue should have been flushed */
  423. WARN_ON(tx_queue->flushed != FLUSH_DONE);
  424. /* Remove TX descriptor ring from card */
  425. EFX_ZERO_OWORD(tx_desc_ptr);
  426. efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
  427. tx_queue->queue);
  428. /* Unpin TX descriptor ring */
  429. efx_fini_special_buffer(efx, &tx_queue->txd);
  430. }
  431. /* Free buffers backing TX queue */
  432. void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
  433. {
  434. efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
  435. }
  436. /**************************************************************************
  437. *
  438. * RX path
  439. *
  440. **************************************************************************/
  441. /* Returns a pointer to the specified descriptor in the RX descriptor queue */
  442. static inline efx_qword_t *
  443. efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
  444. {
  445. return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
  446. }
  447. /* This creates an entry in the RX descriptor queue */
  448. static inline void
  449. efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
  450. {
  451. struct efx_rx_buffer *rx_buf;
  452. efx_qword_t *rxd;
  453. rxd = efx_rx_desc(rx_queue, index);
  454. rx_buf = efx_rx_buffer(rx_queue, index);
  455. EFX_POPULATE_QWORD_3(*rxd,
  456. FSF_AZ_RX_KER_BUF_SIZE,
  457. rx_buf->len -
  458. rx_queue->efx->type->rx_buffer_padding,
  459. FSF_AZ_RX_KER_BUF_REGION, 0,
  460. FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
  461. }
  462. /* This writes to the RX_DESC_WPTR register for the specified receive
  463. * descriptor ring.
  464. */
  465. void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
  466. {
  467. struct efx_nic *efx = rx_queue->efx;
  468. efx_dword_t reg;
  469. unsigned write_ptr;
  470. while (rx_queue->notified_count != rx_queue->added_count) {
  471. efx_build_rx_desc(
  472. rx_queue,
  473. rx_queue->notified_count & rx_queue->ptr_mask);
  474. ++rx_queue->notified_count;
  475. }
  476. wmb();
  477. write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
  478. EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
  479. efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
  480. efx_rx_queue_index(rx_queue));
  481. }
  482. int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
  483. {
  484. struct efx_nic *efx = rx_queue->efx;
  485. unsigned entries;
  486. entries = rx_queue->ptr_mask + 1;
  487. return efx_alloc_special_buffer(efx, &rx_queue->rxd,
  488. entries * sizeof(efx_qword_t));
  489. }
  490. void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
  491. {
  492. efx_oword_t rx_desc_ptr;
  493. struct efx_nic *efx = rx_queue->efx;
  494. bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
  495. bool iscsi_digest_en = is_b0;
  496. netif_dbg(efx, hw, efx->net_dev,
  497. "RX queue %d ring in special buffers %d-%d\n",
  498. efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
  499. rx_queue->rxd.index + rx_queue->rxd.entries - 1);
  500. rx_queue->flushed = FLUSH_NONE;
  501. /* Pin RX descriptor ring */
  502. efx_init_special_buffer(efx, &rx_queue->rxd);
  503. /* Push RX descriptor ring to card */
  504. EFX_POPULATE_OWORD_10(rx_desc_ptr,
  505. FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
  506. FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
  507. FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
  508. FRF_AZ_RX_DESCQ_EVQ_ID,
  509. efx_rx_queue_channel(rx_queue)->channel,
  510. FRF_AZ_RX_DESCQ_OWNER_ID, 0,
  511. FRF_AZ_RX_DESCQ_LABEL,
  512. efx_rx_queue_index(rx_queue),
  513. FRF_AZ_RX_DESCQ_SIZE,
  514. __ffs(rx_queue->rxd.entries),
  515. FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
  516. /* For >=B0 this is scatter so disable */
  517. FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
  518. FRF_AZ_RX_DESCQ_EN, 1);
  519. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  520. efx_rx_queue_index(rx_queue));
  521. }
  522. static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
  523. {
  524. struct efx_nic *efx = rx_queue->efx;
  525. efx_oword_t rx_flush_descq;
  526. rx_queue->flushed = FLUSH_PENDING;
  527. /* Post a flush command */
  528. EFX_POPULATE_OWORD_2(rx_flush_descq,
  529. FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
  530. FRF_AZ_RX_FLUSH_DESCQ,
  531. efx_rx_queue_index(rx_queue));
  532. efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
  533. }
  534. void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
  535. {
  536. efx_oword_t rx_desc_ptr;
  537. struct efx_nic *efx = rx_queue->efx;
  538. /* The queue should already have been flushed */
  539. WARN_ON(rx_queue->flushed != FLUSH_DONE);
  540. /* Remove RX descriptor ring from card */
  541. EFX_ZERO_OWORD(rx_desc_ptr);
  542. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  543. efx_rx_queue_index(rx_queue));
  544. /* Unpin RX descriptor ring */
  545. efx_fini_special_buffer(efx, &rx_queue->rxd);
  546. }
  547. /* Free buffers backing RX queue */
  548. void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
  549. {
  550. efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
  551. }
  552. /**************************************************************************
  553. *
  554. * Event queue processing
  555. * Event queues are processed by per-channel tasklets.
  556. *
  557. **************************************************************************/
  558. /* Update a channel's event queue's read pointer (RPTR) register
  559. *
  560. * This writes the EVQ_RPTR_REG register for the specified channel's
  561. * event queue.
  562. */
  563. void efx_nic_eventq_read_ack(struct efx_channel *channel)
  564. {
  565. efx_dword_t reg;
  566. struct efx_nic *efx = channel->efx;
  567. EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
  568. channel->eventq_read_ptr & channel->eventq_mask);
  569. efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
  570. channel->channel);
  571. }
  572. /* Use HW to insert a SW defined event */
  573. static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
  574. {
  575. efx_oword_t drv_ev_reg;
  576. BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
  577. FRF_AZ_DRV_EV_DATA_WIDTH != 64);
  578. drv_ev_reg.u32[0] = event->u32[0];
  579. drv_ev_reg.u32[1] = event->u32[1];
  580. drv_ev_reg.u32[2] = 0;
  581. drv_ev_reg.u32[3] = 0;
  582. EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
  583. efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
  584. }
  585. /* Handle a transmit completion event
  586. *
  587. * The NIC batches TX completion events; the message we receive is of
  588. * the form "complete all TX events up to this index".
  589. */
  590. static int
  591. efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
  592. {
  593. unsigned int tx_ev_desc_ptr;
  594. unsigned int tx_ev_q_label;
  595. struct efx_tx_queue *tx_queue;
  596. struct efx_nic *efx = channel->efx;
  597. int tx_packets = 0;
  598. if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
  599. /* Transmit completion */
  600. tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
  601. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  602. tx_queue = efx_channel_get_tx_queue(
  603. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  604. tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
  605. tx_queue->ptr_mask);
  606. channel->irq_mod_score += tx_packets;
  607. efx_xmit_done(tx_queue, tx_ev_desc_ptr);
  608. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
  609. /* Rewrite the FIFO write pointer */
  610. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  611. tx_queue = efx_channel_get_tx_queue(
  612. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  613. if (efx_dev_registered(efx))
  614. netif_tx_lock(efx->net_dev);
  615. efx_notify_tx_desc(tx_queue);
  616. if (efx_dev_registered(efx))
  617. netif_tx_unlock(efx->net_dev);
  618. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
  619. EFX_WORKAROUND_10727(efx)) {
  620. efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
  621. } else {
  622. netif_err(efx, tx_err, efx->net_dev,
  623. "channel %d unexpected TX event "
  624. EFX_QWORD_FMT"\n", channel->channel,
  625. EFX_QWORD_VAL(*event));
  626. }
  627. return tx_packets;
  628. }
  629. /* Detect errors included in the rx_evt_pkt_ok bit. */
  630. static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
  631. const efx_qword_t *event,
  632. bool *rx_ev_pkt_ok,
  633. bool *discard)
  634. {
  635. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  636. struct efx_nic *efx = rx_queue->efx;
  637. bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
  638. bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
  639. bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
  640. bool rx_ev_other_err, rx_ev_pause_frm;
  641. bool rx_ev_hdr_type, rx_ev_mcast_pkt;
  642. unsigned rx_ev_pkt_type;
  643. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  644. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  645. rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
  646. rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
  647. rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
  648. FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
  649. rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
  650. FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
  651. rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
  652. FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
  653. rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
  654. rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
  655. rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
  656. 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
  657. rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
  658. /* Every error apart from tobe_disc and pause_frm */
  659. rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
  660. rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
  661. rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
  662. /* Count errors that are not in MAC stats. Ignore expected
  663. * checksum errors during self-test. */
  664. if (rx_ev_frm_trunc)
  665. ++channel->n_rx_frm_trunc;
  666. else if (rx_ev_tobe_disc)
  667. ++channel->n_rx_tobe_disc;
  668. else if (!efx->loopback_selftest) {
  669. if (rx_ev_ip_hdr_chksum_err)
  670. ++channel->n_rx_ip_hdr_chksum_err;
  671. else if (rx_ev_tcp_udp_chksum_err)
  672. ++channel->n_rx_tcp_udp_chksum_err;
  673. }
  674. /* The frame must be discarded if any of these are true. */
  675. *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
  676. rx_ev_tobe_disc | rx_ev_pause_frm);
  677. /* TOBE_DISC is expected on unicast mismatches; don't print out an
  678. * error message. FRM_TRUNC indicates RXDP dropped the packet due
  679. * to a FIFO overflow.
  680. */
  681. #ifdef EFX_ENABLE_DEBUG
  682. if (rx_ev_other_err && net_ratelimit()) {
  683. netif_dbg(efx, rx_err, efx->net_dev,
  684. " RX queue %d unexpected RX event "
  685. EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
  686. efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
  687. rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
  688. rx_ev_ip_hdr_chksum_err ?
  689. " [IP_HDR_CHKSUM_ERR]" : "",
  690. rx_ev_tcp_udp_chksum_err ?
  691. " [TCP_UDP_CHKSUM_ERR]" : "",
  692. rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
  693. rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
  694. rx_ev_drib_nib ? " [DRIB_NIB]" : "",
  695. rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
  696. rx_ev_pause_frm ? " [PAUSE]" : "");
  697. }
  698. #endif
  699. }
  700. /* Handle receive events that are not in-order. */
  701. static void
  702. efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
  703. {
  704. struct efx_nic *efx = rx_queue->efx;
  705. unsigned expected, dropped;
  706. expected = rx_queue->removed_count & rx_queue->ptr_mask;
  707. dropped = (index - expected) & rx_queue->ptr_mask;
  708. netif_info(efx, rx_err, efx->net_dev,
  709. "dropped %d events (index=%d expected=%d)\n",
  710. dropped, index, expected);
  711. efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
  712. RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
  713. }
  714. /* Handle a packet received event
  715. *
  716. * The NIC gives a "discard" flag if it's a unicast packet with the
  717. * wrong destination address
  718. * Also "is multicast" and "matches multicast filter" flags can be used to
  719. * discard non-matching multicast packets.
  720. */
  721. static void
  722. efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
  723. {
  724. unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
  725. unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
  726. unsigned expected_ptr;
  727. bool rx_ev_pkt_ok, discard = false, checksummed;
  728. struct efx_rx_queue *rx_queue;
  729. /* Basic packet information */
  730. rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
  731. rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
  732. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  733. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
  734. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
  735. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
  736. channel->channel);
  737. rx_queue = efx_channel_get_rx_queue(channel);
  738. rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
  739. expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
  740. if (unlikely(rx_ev_desc_ptr != expected_ptr))
  741. efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
  742. if (likely(rx_ev_pkt_ok)) {
  743. /* If packet is marked as OK and packet type is TCP/IP or
  744. * UDP/IP, then we can rely on the hardware checksum.
  745. */
  746. checksummed =
  747. rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
  748. rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
  749. } else {
  750. efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
  751. checksummed = false;
  752. }
  753. /* Detect multicast packets that didn't match the filter */
  754. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  755. if (rx_ev_mcast_pkt) {
  756. unsigned int rx_ev_mcast_hash_match =
  757. EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
  758. if (unlikely(!rx_ev_mcast_hash_match)) {
  759. ++channel->n_rx_mcast_mismatch;
  760. discard = true;
  761. }
  762. }
  763. channel->irq_mod_score += 2;
  764. /* Handle received packet */
  765. efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
  766. checksummed, discard);
  767. }
  768. static void
  769. efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
  770. {
  771. struct efx_nic *efx = channel->efx;
  772. unsigned code;
  773. code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
  774. if (code == EFX_CHANNEL_MAGIC_TEST(channel))
  775. ; /* ignore */
  776. else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
  777. /* The queue must be empty, so we won't receive any rx
  778. * events, so efx_process_channel() won't refill the
  779. * queue. Refill it here */
  780. efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
  781. else
  782. netif_dbg(efx, hw, efx->net_dev, "channel %d received "
  783. "generated event "EFX_QWORD_FMT"\n",
  784. channel->channel, EFX_QWORD_VAL(*event));
  785. }
  786. static void
  787. efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
  788. {
  789. struct efx_nic *efx = channel->efx;
  790. unsigned int ev_sub_code;
  791. unsigned int ev_sub_data;
  792. ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
  793. ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
  794. switch (ev_sub_code) {
  795. case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
  796. netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
  797. channel->channel, ev_sub_data);
  798. break;
  799. case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
  800. netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
  801. channel->channel, ev_sub_data);
  802. break;
  803. case FSE_AZ_EVQ_INIT_DONE_EV:
  804. netif_dbg(efx, hw, efx->net_dev,
  805. "channel %d EVQ %d initialised\n",
  806. channel->channel, ev_sub_data);
  807. break;
  808. case FSE_AZ_SRM_UPD_DONE_EV:
  809. netif_vdbg(efx, hw, efx->net_dev,
  810. "channel %d SRAM update done\n", channel->channel);
  811. break;
  812. case FSE_AZ_WAKE_UP_EV:
  813. netif_vdbg(efx, hw, efx->net_dev,
  814. "channel %d RXQ %d wakeup event\n",
  815. channel->channel, ev_sub_data);
  816. break;
  817. case FSE_AZ_TIMER_EV:
  818. netif_vdbg(efx, hw, efx->net_dev,
  819. "channel %d RX queue %d timer expired\n",
  820. channel->channel, ev_sub_data);
  821. break;
  822. case FSE_AA_RX_RECOVER_EV:
  823. netif_err(efx, rx_err, efx->net_dev,
  824. "channel %d seen DRIVER RX_RESET event. "
  825. "Resetting.\n", channel->channel);
  826. atomic_inc(&efx->rx_reset);
  827. efx_schedule_reset(efx,
  828. EFX_WORKAROUND_6555(efx) ?
  829. RESET_TYPE_RX_RECOVERY :
  830. RESET_TYPE_DISABLE);
  831. break;
  832. case FSE_BZ_RX_DSC_ERROR_EV:
  833. netif_err(efx, rx_err, efx->net_dev,
  834. "RX DMA Q %d reports descriptor fetch error."
  835. " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
  836. efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
  837. break;
  838. case FSE_BZ_TX_DSC_ERROR_EV:
  839. netif_err(efx, tx_err, efx->net_dev,
  840. "TX DMA Q %d reports descriptor fetch error."
  841. " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
  842. efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
  843. break;
  844. default:
  845. netif_vdbg(efx, hw, efx->net_dev,
  846. "channel %d unknown driver event code %d "
  847. "data %04x\n", channel->channel, ev_sub_code,
  848. ev_sub_data);
  849. break;
  850. }
  851. }
  852. int efx_nic_process_eventq(struct efx_channel *channel, int budget)
  853. {
  854. struct efx_nic *efx = channel->efx;
  855. unsigned int read_ptr;
  856. efx_qword_t event, *p_event;
  857. int ev_code;
  858. int tx_packets = 0;
  859. int spent = 0;
  860. read_ptr = channel->eventq_read_ptr;
  861. for (;;) {
  862. p_event = efx_event(channel, read_ptr);
  863. event = *p_event;
  864. if (!efx_event_present(&event))
  865. /* End of events */
  866. break;
  867. netif_vdbg(channel->efx, intr, channel->efx->net_dev,
  868. "channel %d event is "EFX_QWORD_FMT"\n",
  869. channel->channel, EFX_QWORD_VAL(event));
  870. /* Clear this event by marking it all ones */
  871. EFX_SET_QWORD(*p_event);
  872. ++read_ptr;
  873. ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
  874. switch (ev_code) {
  875. case FSE_AZ_EV_CODE_RX_EV:
  876. efx_handle_rx_event(channel, &event);
  877. if (++spent == budget)
  878. goto out;
  879. break;
  880. case FSE_AZ_EV_CODE_TX_EV:
  881. tx_packets += efx_handle_tx_event(channel, &event);
  882. if (tx_packets > efx->txq_entries) {
  883. spent = budget;
  884. goto out;
  885. }
  886. break;
  887. case FSE_AZ_EV_CODE_DRV_GEN_EV:
  888. efx_handle_generated_event(channel, &event);
  889. break;
  890. case FSE_AZ_EV_CODE_DRIVER_EV:
  891. efx_handle_driver_event(channel, &event);
  892. break;
  893. case FSE_CZ_EV_CODE_MCDI_EV:
  894. efx_mcdi_process_event(channel, &event);
  895. break;
  896. case FSE_AZ_EV_CODE_GLOBAL_EV:
  897. if (efx->type->handle_global_event &&
  898. efx->type->handle_global_event(channel, &event))
  899. break;
  900. /* else fall through */
  901. default:
  902. netif_err(channel->efx, hw, channel->efx->net_dev,
  903. "channel %d unknown event type %d (data "
  904. EFX_QWORD_FMT ")\n", channel->channel,
  905. ev_code, EFX_QWORD_VAL(event));
  906. }
  907. }
  908. out:
  909. channel->eventq_read_ptr = read_ptr;
  910. return spent;
  911. }
  912. /* Check whether an event is present in the eventq at the current
  913. * read pointer. Only useful for self-test.
  914. */
  915. bool efx_nic_event_present(struct efx_channel *channel)
  916. {
  917. return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
  918. }
  919. /* Allocate buffer table entries for event queue */
  920. int efx_nic_probe_eventq(struct efx_channel *channel)
  921. {
  922. struct efx_nic *efx = channel->efx;
  923. unsigned entries;
  924. entries = channel->eventq_mask + 1;
  925. return efx_alloc_special_buffer(efx, &channel->eventq,
  926. entries * sizeof(efx_qword_t));
  927. }
  928. void efx_nic_init_eventq(struct efx_channel *channel)
  929. {
  930. efx_oword_t reg;
  931. struct efx_nic *efx = channel->efx;
  932. netif_dbg(efx, hw, efx->net_dev,
  933. "channel %d event queue in special buffers %d-%d\n",
  934. channel->channel, channel->eventq.index,
  935. channel->eventq.index + channel->eventq.entries - 1);
  936. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
  937. EFX_POPULATE_OWORD_3(reg,
  938. FRF_CZ_TIMER_Q_EN, 1,
  939. FRF_CZ_HOST_NOTIFY_MODE, 0,
  940. FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
  941. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  942. }
  943. /* Pin event queue buffer */
  944. efx_init_special_buffer(efx, &channel->eventq);
  945. /* Fill event queue with all ones (i.e. empty events) */
  946. memset(channel->eventq.addr, 0xff, channel->eventq.len);
  947. /* Push event queue to card */
  948. EFX_POPULATE_OWORD_3(reg,
  949. FRF_AZ_EVQ_EN, 1,
  950. FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
  951. FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
  952. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  953. channel->channel);
  954. efx->type->push_irq_moderation(channel);
  955. }
  956. void efx_nic_fini_eventq(struct efx_channel *channel)
  957. {
  958. efx_oword_t reg;
  959. struct efx_nic *efx = channel->efx;
  960. /* Remove event queue from card */
  961. EFX_ZERO_OWORD(reg);
  962. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  963. channel->channel);
  964. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  965. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  966. /* Unpin event queue */
  967. efx_fini_special_buffer(efx, &channel->eventq);
  968. }
  969. /* Free buffers backing event queue */
  970. void efx_nic_remove_eventq(struct efx_channel *channel)
  971. {
  972. efx_free_special_buffer(channel->efx, &channel->eventq);
  973. }
  974. void efx_nic_generate_test_event(struct efx_channel *channel)
  975. {
  976. unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
  977. efx_qword_t test_event;
  978. EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
  979. FSE_AZ_EV_CODE_DRV_GEN_EV,
  980. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  981. efx_generate_event(channel, &test_event);
  982. }
  983. void efx_nic_generate_fill_event(struct efx_channel *channel)
  984. {
  985. unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
  986. efx_qword_t test_event;
  987. EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
  988. FSE_AZ_EV_CODE_DRV_GEN_EV,
  989. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  990. efx_generate_event(channel, &test_event);
  991. }
  992. /**************************************************************************
  993. *
  994. * Flush handling
  995. *
  996. **************************************************************************/
  997. static void efx_poll_flush_events(struct efx_nic *efx)
  998. {
  999. struct efx_channel *channel = efx_get_channel(efx, 0);
  1000. struct efx_tx_queue *tx_queue;
  1001. struct efx_rx_queue *rx_queue;
  1002. unsigned int read_ptr = channel->eventq_read_ptr;
  1003. unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
  1004. do {
  1005. efx_qword_t *event = efx_event(channel, read_ptr);
  1006. int ev_code, ev_sub_code, ev_queue;
  1007. bool ev_failed;
  1008. if (!efx_event_present(event))
  1009. break;
  1010. ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
  1011. ev_sub_code = EFX_QWORD_FIELD(*event,
  1012. FSF_AZ_DRIVER_EV_SUBCODE);
  1013. if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
  1014. ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
  1015. ev_queue = EFX_QWORD_FIELD(*event,
  1016. FSF_AZ_DRIVER_EV_SUBDATA);
  1017. if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
  1018. tx_queue = efx_get_tx_queue(
  1019. efx, ev_queue / EFX_TXQ_TYPES,
  1020. ev_queue % EFX_TXQ_TYPES);
  1021. tx_queue->flushed = FLUSH_DONE;
  1022. }
  1023. } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
  1024. ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
  1025. ev_queue = EFX_QWORD_FIELD(
  1026. *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
  1027. ev_failed = EFX_QWORD_FIELD(
  1028. *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
  1029. if (ev_queue < efx->n_rx_channels) {
  1030. rx_queue = efx_get_rx_queue(efx, ev_queue);
  1031. rx_queue->flushed =
  1032. ev_failed ? FLUSH_FAILED : FLUSH_DONE;
  1033. }
  1034. }
  1035. /* We're about to destroy the queue anyway, so
  1036. * it's ok to throw away every non-flush event */
  1037. EFX_SET_QWORD(*event);
  1038. ++read_ptr;
  1039. } while (read_ptr != end_ptr);
  1040. channel->eventq_read_ptr = read_ptr;
  1041. }
  1042. /* Handle tx and rx flushes at the same time, since they run in
  1043. * parallel in the hardware and there's no reason for us to
  1044. * serialise them */
  1045. int efx_nic_flush_queues(struct efx_nic *efx)
  1046. {
  1047. struct efx_channel *channel;
  1048. struct efx_rx_queue *rx_queue;
  1049. struct efx_tx_queue *tx_queue;
  1050. int i, tx_pending, rx_pending;
  1051. /* If necessary prepare the hardware for flushing */
  1052. efx->type->prepare_flush(efx);
  1053. /* Flush all tx queues in parallel */
  1054. efx_for_each_channel(channel, efx) {
  1055. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1056. if (tx_queue->initialised)
  1057. efx_flush_tx_queue(tx_queue);
  1058. }
  1059. }
  1060. /* The hardware supports four concurrent rx flushes, each of which may
  1061. * need to be retried if there is an outstanding descriptor fetch */
  1062. for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
  1063. rx_pending = tx_pending = 0;
  1064. efx_for_each_channel(channel, efx) {
  1065. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1066. if (rx_queue->flushed == FLUSH_PENDING)
  1067. ++rx_pending;
  1068. }
  1069. }
  1070. efx_for_each_channel(channel, efx) {
  1071. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1072. if (rx_pending == EFX_RX_FLUSH_COUNT)
  1073. break;
  1074. if (rx_queue->flushed == FLUSH_FAILED ||
  1075. rx_queue->flushed == FLUSH_NONE) {
  1076. efx_flush_rx_queue(rx_queue);
  1077. ++rx_pending;
  1078. }
  1079. }
  1080. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1081. if (tx_queue->initialised &&
  1082. tx_queue->flushed != FLUSH_DONE)
  1083. ++tx_pending;
  1084. }
  1085. }
  1086. if (rx_pending == 0 && tx_pending == 0)
  1087. return 0;
  1088. msleep(EFX_FLUSH_INTERVAL);
  1089. efx_poll_flush_events(efx);
  1090. }
  1091. /* Mark the queues as all flushed. We're going to return failure
  1092. * leading to a reset, or fake up success anyway */
  1093. efx_for_each_channel(channel, efx) {
  1094. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1095. if (tx_queue->initialised &&
  1096. tx_queue->flushed != FLUSH_DONE)
  1097. netif_err(efx, hw, efx->net_dev,
  1098. "tx queue %d flush command timed out\n",
  1099. tx_queue->queue);
  1100. tx_queue->flushed = FLUSH_DONE;
  1101. }
  1102. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1103. if (rx_queue->flushed != FLUSH_DONE)
  1104. netif_err(efx, hw, efx->net_dev,
  1105. "rx queue %d flush command timed out\n",
  1106. efx_rx_queue_index(rx_queue));
  1107. rx_queue->flushed = FLUSH_DONE;
  1108. }
  1109. }
  1110. return -ETIMEDOUT;
  1111. }
  1112. /**************************************************************************
  1113. *
  1114. * Hardware interrupts
  1115. * The hardware interrupt handler does very little work; all the event
  1116. * queue processing is carried out by per-channel tasklets.
  1117. *
  1118. **************************************************************************/
  1119. /* Enable/disable/generate interrupts */
  1120. static inline void efx_nic_interrupts(struct efx_nic *efx,
  1121. bool enabled, bool force)
  1122. {
  1123. efx_oword_t int_en_reg_ker;
  1124. EFX_POPULATE_OWORD_3(int_en_reg_ker,
  1125. FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
  1126. FRF_AZ_KER_INT_KER, force,
  1127. FRF_AZ_DRV_INT_EN_KER, enabled);
  1128. efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
  1129. }
  1130. void efx_nic_enable_interrupts(struct efx_nic *efx)
  1131. {
  1132. struct efx_channel *channel;
  1133. EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
  1134. wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
  1135. /* Enable interrupts */
  1136. efx_nic_interrupts(efx, true, false);
  1137. /* Force processing of all the channels to get the EVQ RPTRs up to
  1138. date */
  1139. efx_for_each_channel(channel, efx)
  1140. efx_schedule_channel(channel);
  1141. }
  1142. void efx_nic_disable_interrupts(struct efx_nic *efx)
  1143. {
  1144. /* Disable interrupts */
  1145. efx_nic_interrupts(efx, false, false);
  1146. }
  1147. /* Generate a test interrupt
  1148. * Interrupt must already have been enabled, otherwise nasty things
  1149. * may happen.
  1150. */
  1151. void efx_nic_generate_interrupt(struct efx_nic *efx)
  1152. {
  1153. efx_nic_interrupts(efx, true, true);
  1154. }
  1155. /* Process a fatal interrupt
  1156. * Disable bus mastering ASAP and schedule a reset
  1157. */
  1158. irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
  1159. {
  1160. struct falcon_nic_data *nic_data = efx->nic_data;
  1161. efx_oword_t *int_ker = efx->irq_status.addr;
  1162. efx_oword_t fatal_intr;
  1163. int error, mem_perr;
  1164. efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
  1165. error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
  1166. netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
  1167. EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
  1168. EFX_OWORD_VAL(fatal_intr),
  1169. error ? "disabling bus mastering" : "no recognised error");
  1170. /* If this is a memory parity error dump which blocks are offending */
  1171. mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
  1172. EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
  1173. if (mem_perr) {
  1174. efx_oword_t reg;
  1175. efx_reado(efx, &reg, FR_AZ_MEM_STAT);
  1176. netif_err(efx, hw, efx->net_dev,
  1177. "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
  1178. EFX_OWORD_VAL(reg));
  1179. }
  1180. /* Disable both devices */
  1181. pci_clear_master(efx->pci_dev);
  1182. if (efx_nic_is_dual_func(efx))
  1183. pci_clear_master(nic_data->pci_dev2);
  1184. efx_nic_disable_interrupts(efx);
  1185. /* Count errors and reset or disable the NIC accordingly */
  1186. if (efx->int_error_count == 0 ||
  1187. time_after(jiffies, efx->int_error_expire)) {
  1188. efx->int_error_count = 0;
  1189. efx->int_error_expire =
  1190. jiffies + EFX_INT_ERROR_EXPIRE * HZ;
  1191. }
  1192. if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
  1193. netif_err(efx, hw, efx->net_dev,
  1194. "SYSTEM ERROR - reset scheduled\n");
  1195. efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
  1196. } else {
  1197. netif_err(efx, hw, efx->net_dev,
  1198. "SYSTEM ERROR - max number of errors seen."
  1199. "NIC will be disabled\n");
  1200. efx_schedule_reset(efx, RESET_TYPE_DISABLE);
  1201. }
  1202. return IRQ_HANDLED;
  1203. }
  1204. /* Handle a legacy interrupt
  1205. * Acknowledges the interrupt and schedule event queue processing.
  1206. */
  1207. static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
  1208. {
  1209. struct efx_nic *efx = dev_id;
  1210. efx_oword_t *int_ker = efx->irq_status.addr;
  1211. irqreturn_t result = IRQ_NONE;
  1212. struct efx_channel *channel;
  1213. efx_dword_t reg;
  1214. u32 queues;
  1215. int syserr;
  1216. /* Could this be ours? If interrupts are disabled then the
  1217. * channel state may not be valid.
  1218. */
  1219. if (!efx->legacy_irq_enabled)
  1220. return result;
  1221. /* Read the ISR which also ACKs the interrupts */
  1222. efx_readd(efx, &reg, FR_BZ_INT_ISR0);
  1223. queues = EFX_EXTRACT_DWORD(reg, 0, 31);
  1224. /* Check to see if we have a serious error condition */
  1225. if (queues & (1U << efx->fatal_irq_level)) {
  1226. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1227. if (unlikely(syserr))
  1228. return efx_nic_fatal_interrupt(efx);
  1229. }
  1230. if (queues != 0) {
  1231. if (EFX_WORKAROUND_15783(efx))
  1232. efx->irq_zero_count = 0;
  1233. /* Schedule processing of any interrupting queues */
  1234. efx_for_each_channel(channel, efx) {
  1235. if (queues & 1)
  1236. efx_schedule_channel(channel);
  1237. queues >>= 1;
  1238. }
  1239. result = IRQ_HANDLED;
  1240. } else if (EFX_WORKAROUND_15783(efx)) {
  1241. efx_qword_t *event;
  1242. /* We can't return IRQ_HANDLED more than once on seeing ISR=0
  1243. * because this might be a shared interrupt. */
  1244. if (efx->irq_zero_count++ == 0)
  1245. result = IRQ_HANDLED;
  1246. /* Ensure we schedule or rearm all event queues */
  1247. efx_for_each_channel(channel, efx) {
  1248. event = efx_event(channel, channel->eventq_read_ptr);
  1249. if (efx_event_present(event))
  1250. efx_schedule_channel(channel);
  1251. else
  1252. efx_nic_eventq_read_ack(channel);
  1253. }
  1254. }
  1255. if (result == IRQ_HANDLED) {
  1256. efx->last_irq_cpu = raw_smp_processor_id();
  1257. netif_vdbg(efx, intr, efx->net_dev,
  1258. "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
  1259. irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
  1260. }
  1261. return result;
  1262. }
  1263. /* Handle an MSI interrupt
  1264. *
  1265. * Handle an MSI hardware interrupt. This routine schedules event
  1266. * queue processing. No interrupt acknowledgement cycle is necessary.
  1267. * Also, we never need to check that the interrupt is for us, since
  1268. * MSI interrupts cannot be shared.
  1269. */
  1270. static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
  1271. {
  1272. struct efx_channel *channel = *(struct efx_channel **)dev_id;
  1273. struct efx_nic *efx = channel->efx;
  1274. efx_oword_t *int_ker = efx->irq_status.addr;
  1275. int syserr;
  1276. efx->last_irq_cpu = raw_smp_processor_id();
  1277. netif_vdbg(efx, intr, efx->net_dev,
  1278. "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
  1279. irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
  1280. /* Check to see if we have a serious error condition */
  1281. if (channel->channel == efx->fatal_irq_level) {
  1282. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1283. if (unlikely(syserr))
  1284. return efx_nic_fatal_interrupt(efx);
  1285. }
  1286. /* Schedule processing of the channel */
  1287. efx_schedule_channel(channel);
  1288. return IRQ_HANDLED;
  1289. }
  1290. /* Setup RSS indirection table.
  1291. * This maps from the hash value of the packet to RXQ
  1292. */
  1293. void efx_nic_push_rx_indir_table(struct efx_nic *efx)
  1294. {
  1295. size_t i = 0;
  1296. efx_dword_t dword;
  1297. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
  1298. return;
  1299. BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
  1300. FR_BZ_RX_INDIRECTION_TBL_ROWS);
  1301. for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
  1302. EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
  1303. efx->rx_indir_table[i]);
  1304. efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
  1305. }
  1306. }
  1307. /* Hook interrupt handler(s)
  1308. * Try MSI and then legacy interrupts.
  1309. */
  1310. int efx_nic_init_interrupt(struct efx_nic *efx)
  1311. {
  1312. struct efx_channel *channel;
  1313. int rc;
  1314. if (!EFX_INT_MODE_USE_MSI(efx)) {
  1315. irq_handler_t handler;
  1316. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1317. handler = efx_legacy_interrupt;
  1318. else
  1319. handler = falcon_legacy_interrupt_a1;
  1320. rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
  1321. efx->name, efx);
  1322. if (rc) {
  1323. netif_err(efx, drv, efx->net_dev,
  1324. "failed to hook legacy IRQ %d\n",
  1325. efx->pci_dev->irq);
  1326. goto fail1;
  1327. }
  1328. return 0;
  1329. }
  1330. /* Hook MSI or MSI-X interrupt */
  1331. efx_for_each_channel(channel, efx) {
  1332. rc = request_irq(channel->irq, efx_msi_interrupt,
  1333. IRQF_PROBE_SHARED, /* Not shared */
  1334. efx->channel_name[channel->channel],
  1335. &efx->channel[channel->channel]);
  1336. if (rc) {
  1337. netif_err(efx, drv, efx->net_dev,
  1338. "failed to hook IRQ %d\n", channel->irq);
  1339. goto fail2;
  1340. }
  1341. }
  1342. return 0;
  1343. fail2:
  1344. efx_for_each_channel(channel, efx)
  1345. free_irq(channel->irq, &efx->channel[channel->channel]);
  1346. fail1:
  1347. return rc;
  1348. }
  1349. void efx_nic_fini_interrupt(struct efx_nic *efx)
  1350. {
  1351. struct efx_channel *channel;
  1352. efx_oword_t reg;
  1353. /* Disable MSI/MSI-X interrupts */
  1354. efx_for_each_channel(channel, efx) {
  1355. if (channel->irq)
  1356. free_irq(channel->irq, &efx->channel[channel->channel]);
  1357. }
  1358. /* ACK legacy interrupt */
  1359. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1360. efx_reado(efx, &reg, FR_BZ_INT_ISR0);
  1361. else
  1362. falcon_irq_ack_a1(efx);
  1363. /* Disable legacy interrupt */
  1364. if (efx->legacy_irq)
  1365. free_irq(efx->legacy_irq, efx);
  1366. }
  1367. u32 efx_nic_fpga_ver(struct efx_nic *efx)
  1368. {
  1369. efx_oword_t altera_build;
  1370. efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
  1371. return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
  1372. }
  1373. void efx_nic_init_common(struct efx_nic *efx)
  1374. {
  1375. efx_oword_t temp;
  1376. /* Set positions of descriptor caches in SRAM. */
  1377. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
  1378. efx->type->tx_dc_base / 8);
  1379. efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
  1380. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
  1381. efx->type->rx_dc_base / 8);
  1382. efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
  1383. /* Set TX descriptor cache size. */
  1384. BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
  1385. EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
  1386. efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
  1387. /* Set RX descriptor cache size. Set low watermark to size-8, as
  1388. * this allows most efficient prefetching.
  1389. */
  1390. BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
  1391. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
  1392. efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
  1393. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
  1394. efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
  1395. /* Program INT_KER address */
  1396. EFX_POPULATE_OWORD_2(temp,
  1397. FRF_AZ_NORM_INT_VEC_DIS_KER,
  1398. EFX_INT_MODE_USE_MSI(efx),
  1399. FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
  1400. efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
  1401. if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
  1402. /* Use an interrupt level unused by event queues */
  1403. efx->fatal_irq_level = 0x1f;
  1404. else
  1405. /* Use a valid MSI-X vector */
  1406. efx->fatal_irq_level = 0;
  1407. /* Enable all the genuinely fatal interrupts. (They are still
  1408. * masked by the overall interrupt mask, controlled by
  1409. * falcon_interrupts()).
  1410. *
  1411. * Note: All other fatal interrupts are enabled
  1412. */
  1413. EFX_POPULATE_OWORD_3(temp,
  1414. FRF_AZ_ILL_ADR_INT_KER_EN, 1,
  1415. FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
  1416. FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
  1417. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  1418. EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
  1419. EFX_INVERT_OWORD(temp);
  1420. efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
  1421. efx_nic_push_rx_indir_table(efx);
  1422. /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
  1423. * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
  1424. */
  1425. efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
  1426. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
  1427. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
  1428. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
  1429. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
  1430. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
  1431. /* Enable SW_EV to inherit in char driver - assume harmless here */
  1432. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
  1433. /* Prefetch threshold 2 => fetch when descriptor cache half empty */
  1434. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
  1435. /* Disable hardware watchdog which can misfire */
  1436. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
  1437. /* Squash TX of packets of 16 bytes or less */
  1438. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1439. EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
  1440. efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
  1441. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  1442. EFX_POPULATE_OWORD_4(temp,
  1443. /* Default values */
  1444. FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
  1445. FRF_BZ_TX_PACE_SB_AF, 0xb,
  1446. FRF_BZ_TX_PACE_FB_BASE, 0,
  1447. /* Allow large pace values in the
  1448. * fast bin. */
  1449. FRF_BZ_TX_PACE_BIN_TH,
  1450. FFE_BZ_TX_PACE_RESERVED);
  1451. efx_writeo(efx, &temp, FR_BZ_TX_PACE);
  1452. }
  1453. }
  1454. /* Register dump */
  1455. #define REGISTER_REVISION_A 1
  1456. #define REGISTER_REVISION_B 2
  1457. #define REGISTER_REVISION_C 3
  1458. #define REGISTER_REVISION_Z 3 /* latest revision */
  1459. struct efx_nic_reg {
  1460. u32 offset:24;
  1461. u32 min_revision:2, max_revision:2;
  1462. };
  1463. #define REGISTER(name, min_rev, max_rev) { \
  1464. FR_ ## min_rev ## max_rev ## _ ## name, \
  1465. REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
  1466. }
  1467. #define REGISTER_AA(name) REGISTER(name, A, A)
  1468. #define REGISTER_AB(name) REGISTER(name, A, B)
  1469. #define REGISTER_AZ(name) REGISTER(name, A, Z)
  1470. #define REGISTER_BB(name) REGISTER(name, B, B)
  1471. #define REGISTER_BZ(name) REGISTER(name, B, Z)
  1472. #define REGISTER_CZ(name) REGISTER(name, C, Z)
  1473. static const struct efx_nic_reg efx_nic_regs[] = {
  1474. REGISTER_AZ(ADR_REGION),
  1475. REGISTER_AZ(INT_EN_KER),
  1476. REGISTER_BZ(INT_EN_CHAR),
  1477. REGISTER_AZ(INT_ADR_KER),
  1478. REGISTER_BZ(INT_ADR_CHAR),
  1479. /* INT_ACK_KER is WO */
  1480. /* INT_ISR0 is RC */
  1481. REGISTER_AZ(HW_INIT),
  1482. REGISTER_CZ(USR_EV_CFG),
  1483. REGISTER_AB(EE_SPI_HCMD),
  1484. REGISTER_AB(EE_SPI_HADR),
  1485. REGISTER_AB(EE_SPI_HDATA),
  1486. REGISTER_AB(EE_BASE_PAGE),
  1487. REGISTER_AB(EE_VPD_CFG0),
  1488. /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
  1489. /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
  1490. /* PCIE_CORE_INDIRECT is indirect */
  1491. REGISTER_AB(NIC_STAT),
  1492. REGISTER_AB(GPIO_CTL),
  1493. REGISTER_AB(GLB_CTL),
  1494. /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
  1495. REGISTER_BZ(DP_CTRL),
  1496. REGISTER_AZ(MEM_STAT),
  1497. REGISTER_AZ(CS_DEBUG),
  1498. REGISTER_AZ(ALTERA_BUILD),
  1499. REGISTER_AZ(CSR_SPARE),
  1500. REGISTER_AB(PCIE_SD_CTL0123),
  1501. REGISTER_AB(PCIE_SD_CTL45),
  1502. REGISTER_AB(PCIE_PCS_CTL_STAT),
  1503. /* DEBUG_DATA_OUT is not used */
  1504. /* DRV_EV is WO */
  1505. REGISTER_AZ(EVQ_CTL),
  1506. REGISTER_AZ(EVQ_CNT1),
  1507. REGISTER_AZ(EVQ_CNT2),
  1508. REGISTER_AZ(BUF_TBL_CFG),
  1509. REGISTER_AZ(SRM_RX_DC_CFG),
  1510. REGISTER_AZ(SRM_TX_DC_CFG),
  1511. REGISTER_AZ(SRM_CFG),
  1512. /* BUF_TBL_UPD is WO */
  1513. REGISTER_AZ(SRM_UPD_EVQ),
  1514. REGISTER_AZ(SRAM_PARITY),
  1515. REGISTER_AZ(RX_CFG),
  1516. REGISTER_BZ(RX_FILTER_CTL),
  1517. /* RX_FLUSH_DESCQ is WO */
  1518. REGISTER_AZ(RX_DC_CFG),
  1519. REGISTER_AZ(RX_DC_PF_WM),
  1520. REGISTER_BZ(RX_RSS_TKEY),
  1521. /* RX_NODESC_DROP is RC */
  1522. REGISTER_AA(RX_SELF_RST),
  1523. /* RX_DEBUG, RX_PUSH_DROP are not used */
  1524. REGISTER_CZ(RX_RSS_IPV6_REG1),
  1525. REGISTER_CZ(RX_RSS_IPV6_REG2),
  1526. REGISTER_CZ(RX_RSS_IPV6_REG3),
  1527. /* TX_FLUSH_DESCQ is WO */
  1528. REGISTER_AZ(TX_DC_CFG),
  1529. REGISTER_AA(TX_CHKSM_CFG),
  1530. REGISTER_AZ(TX_CFG),
  1531. /* TX_PUSH_DROP is not used */
  1532. REGISTER_AZ(TX_RESERVED),
  1533. REGISTER_BZ(TX_PACE),
  1534. /* TX_PACE_DROP_QID is RC */
  1535. REGISTER_BB(TX_VLAN),
  1536. REGISTER_BZ(TX_IPFIL_PORTEN),
  1537. REGISTER_AB(MD_TXD),
  1538. REGISTER_AB(MD_RXD),
  1539. REGISTER_AB(MD_CS),
  1540. REGISTER_AB(MD_PHY_ADR),
  1541. REGISTER_AB(MD_ID),
  1542. /* MD_STAT is RC */
  1543. REGISTER_AB(MAC_STAT_DMA),
  1544. REGISTER_AB(MAC_CTRL),
  1545. REGISTER_BB(GEN_MODE),
  1546. REGISTER_AB(MAC_MC_HASH_REG0),
  1547. REGISTER_AB(MAC_MC_HASH_REG1),
  1548. REGISTER_AB(GM_CFG1),
  1549. REGISTER_AB(GM_CFG2),
  1550. /* GM_IPG and GM_HD are not used */
  1551. REGISTER_AB(GM_MAX_FLEN),
  1552. /* GM_TEST is not used */
  1553. REGISTER_AB(GM_ADR1),
  1554. REGISTER_AB(GM_ADR2),
  1555. REGISTER_AB(GMF_CFG0),
  1556. REGISTER_AB(GMF_CFG1),
  1557. REGISTER_AB(GMF_CFG2),
  1558. REGISTER_AB(GMF_CFG3),
  1559. REGISTER_AB(GMF_CFG4),
  1560. REGISTER_AB(GMF_CFG5),
  1561. REGISTER_BB(TX_SRC_MAC_CTL),
  1562. REGISTER_AB(XM_ADR_LO),
  1563. REGISTER_AB(XM_ADR_HI),
  1564. REGISTER_AB(XM_GLB_CFG),
  1565. REGISTER_AB(XM_TX_CFG),
  1566. REGISTER_AB(XM_RX_CFG),
  1567. REGISTER_AB(XM_MGT_INT_MASK),
  1568. REGISTER_AB(XM_FC),
  1569. REGISTER_AB(XM_PAUSE_TIME),
  1570. REGISTER_AB(XM_TX_PARAM),
  1571. REGISTER_AB(XM_RX_PARAM),
  1572. /* XM_MGT_INT_MSK (note no 'A') is RC */
  1573. REGISTER_AB(XX_PWR_RST),
  1574. REGISTER_AB(XX_SD_CTL),
  1575. REGISTER_AB(XX_TXDRV_CTL),
  1576. /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
  1577. /* XX_CORE_STAT is partly RC */
  1578. };
  1579. struct efx_nic_reg_table {
  1580. u32 offset:24;
  1581. u32 min_revision:2, max_revision:2;
  1582. u32 step:6, rows:21;
  1583. };
  1584. #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
  1585. offset, \
  1586. REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
  1587. step, rows \
  1588. }
  1589. #define REGISTER_TABLE(name, min_rev, max_rev) \
  1590. REGISTER_TABLE_DIMENSIONS( \
  1591. name, FR_ ## min_rev ## max_rev ## _ ## name, \
  1592. min_rev, max_rev, \
  1593. FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
  1594. FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
  1595. #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
  1596. #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
  1597. #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
  1598. #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
  1599. #define REGISTER_TABLE_BB_CZ(name) \
  1600. REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
  1601. FR_BZ_ ## name ## _STEP, \
  1602. FR_BB_ ## name ## _ROWS), \
  1603. REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
  1604. FR_BZ_ ## name ## _STEP, \
  1605. FR_CZ_ ## name ## _ROWS)
  1606. #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
  1607. static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
  1608. /* DRIVER is not used */
  1609. /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
  1610. REGISTER_TABLE_BB(TX_IPFIL_TBL),
  1611. REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
  1612. REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
  1613. REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
  1614. REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
  1615. REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
  1616. REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
  1617. REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
  1618. /* We can't reasonably read all of the buffer table (up to 8MB!).
  1619. * However this driver will only use a few entries. Reading
  1620. * 1K entries allows for some expansion of queue count and
  1621. * size before we need to change the version. */
  1622. REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
  1623. A, A, 8, 1024),
  1624. REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
  1625. B, Z, 8, 1024),
  1626. REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
  1627. REGISTER_TABLE_BB_CZ(TIMER_TBL),
  1628. REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
  1629. REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
  1630. /* TX_FILTER_TBL0 is huge and not used by this driver */
  1631. REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
  1632. REGISTER_TABLE_CZ(MC_TREG_SMEM),
  1633. /* MSIX_PBA_TABLE is not mapped */
  1634. /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
  1635. REGISTER_TABLE_BZ(RX_FILTER_TBL0),
  1636. };
  1637. size_t efx_nic_get_regs_len(struct efx_nic *efx)
  1638. {
  1639. const struct efx_nic_reg *reg;
  1640. const struct efx_nic_reg_table *table;
  1641. size_t len = 0;
  1642. for (reg = efx_nic_regs;
  1643. reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
  1644. reg++)
  1645. if (efx->type->revision >= reg->min_revision &&
  1646. efx->type->revision <= reg->max_revision)
  1647. len += sizeof(efx_oword_t);
  1648. for (table = efx_nic_reg_tables;
  1649. table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
  1650. table++)
  1651. if (efx->type->revision >= table->min_revision &&
  1652. efx->type->revision <= table->max_revision)
  1653. len += table->rows * min_t(size_t, table->step, 16);
  1654. return len;
  1655. }
  1656. void efx_nic_get_regs(struct efx_nic *efx, void *buf)
  1657. {
  1658. const struct efx_nic_reg *reg;
  1659. const struct efx_nic_reg_table *table;
  1660. for (reg = efx_nic_regs;
  1661. reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
  1662. reg++) {
  1663. if (efx->type->revision >= reg->min_revision &&
  1664. efx->type->revision <= reg->max_revision) {
  1665. efx_reado(efx, (efx_oword_t *)buf, reg->offset);
  1666. buf += sizeof(efx_oword_t);
  1667. }
  1668. }
  1669. for (table = efx_nic_reg_tables;
  1670. table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
  1671. table++) {
  1672. size_t size, i;
  1673. if (!(efx->type->revision >= table->min_revision &&
  1674. efx->type->revision <= table->max_revision))
  1675. continue;
  1676. size = min_t(size_t, table->step, 16);
  1677. for (i = 0; i < table->rows; i++) {
  1678. switch (table->step) {
  1679. case 4: /* 32-bit register or SRAM */
  1680. efx_readd_table(efx, buf, table->offset, i);
  1681. break;
  1682. case 8: /* 64-bit SRAM */
  1683. efx_sram_readq(efx,
  1684. efx->membase + table->offset,
  1685. buf, i);
  1686. break;
  1687. case 16: /* 128-bit register */
  1688. efx_reado_table(efx, buf, table->offset, i);
  1689. break;
  1690. case 32: /* 128-bit register, interleaved */
  1691. efx_reado_table(efx, buf, table->offset, 2 * i);
  1692. break;
  1693. default:
  1694. WARN_ON(1);
  1695. return;
  1696. }
  1697. buf += size;
  1698. }
  1699. }
  1700. }