efx.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/delay.h>
  15. #include <linux/notifier.h>
  16. #include <linux/ip.h>
  17. #include <linux/tcp.h>
  18. #include <linux/in.h>
  19. #include <linux/crc32.h>
  20. #include <linux/ethtool.h>
  21. #include <linux/topology.h>
  22. #include <linux/gfp.h>
  23. #include <linux/cpu_rmap.h>
  24. #include "net_driver.h"
  25. #include "efx.h"
  26. #include "nic.h"
  27. #include "selftest.h"
  28. #include "mcdi.h"
  29. #include "workarounds.h"
  30. /**************************************************************************
  31. *
  32. * Type name strings
  33. *
  34. **************************************************************************
  35. */
  36. /* Loopback mode names (see LOOPBACK_MODE()) */
  37. const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
  38. const char *const efx_loopback_mode_names[] = {
  39. [LOOPBACK_NONE] = "NONE",
  40. [LOOPBACK_DATA] = "DATAPATH",
  41. [LOOPBACK_GMAC] = "GMAC",
  42. [LOOPBACK_XGMII] = "XGMII",
  43. [LOOPBACK_XGXS] = "XGXS",
  44. [LOOPBACK_XAUI] = "XAUI",
  45. [LOOPBACK_GMII] = "GMII",
  46. [LOOPBACK_SGMII] = "SGMII",
  47. [LOOPBACK_XGBR] = "XGBR",
  48. [LOOPBACK_XFI] = "XFI",
  49. [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
  50. [LOOPBACK_GMII_FAR] = "GMII_FAR",
  51. [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
  52. [LOOPBACK_XFI_FAR] = "XFI_FAR",
  53. [LOOPBACK_GPHY] = "GPHY",
  54. [LOOPBACK_PHYXS] = "PHYXS",
  55. [LOOPBACK_PCS] = "PCS",
  56. [LOOPBACK_PMAPMD] = "PMA/PMD",
  57. [LOOPBACK_XPORT] = "XPORT",
  58. [LOOPBACK_XGMII_WS] = "XGMII_WS",
  59. [LOOPBACK_XAUI_WS] = "XAUI_WS",
  60. [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
  61. [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  62. [LOOPBACK_GMII_WS] = "GMII_WS",
  63. [LOOPBACK_XFI_WS] = "XFI_WS",
  64. [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
  65. [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
  66. };
  67. const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
  68. const char *const efx_reset_type_names[] = {
  69. [RESET_TYPE_INVISIBLE] = "INVISIBLE",
  70. [RESET_TYPE_ALL] = "ALL",
  71. [RESET_TYPE_WORLD] = "WORLD",
  72. [RESET_TYPE_DISABLE] = "DISABLE",
  73. [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
  74. [RESET_TYPE_INT_ERROR] = "INT_ERROR",
  75. [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
  76. [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
  77. [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
  78. [RESET_TYPE_TX_SKIP] = "TX_SKIP",
  79. [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
  80. };
  81. #define EFX_MAX_MTU (9 * 1024)
  82. /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  83. * queued onto this work queue. This is not a per-nic work queue, because
  84. * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  85. */
  86. static struct workqueue_struct *reset_workqueue;
  87. /**************************************************************************
  88. *
  89. * Configurable values
  90. *
  91. *************************************************************************/
  92. /*
  93. * Use separate channels for TX and RX events
  94. *
  95. * Set this to 1 to use separate channels for TX and RX. It allows us
  96. * to control interrupt affinity separately for TX and RX.
  97. *
  98. * This is only used in MSI-X interrupt mode
  99. */
  100. static unsigned int separate_tx_channels;
  101. module_param(separate_tx_channels, uint, 0444);
  102. MODULE_PARM_DESC(separate_tx_channels,
  103. "Use separate channels for TX and RX");
  104. /* This is the weight assigned to each of the (per-channel) virtual
  105. * NAPI devices.
  106. */
  107. static int napi_weight = 64;
  108. /* This is the time (in jiffies) between invocations of the hardware
  109. * monitor. On Falcon-based NICs, this will:
  110. * - Check the on-board hardware monitor;
  111. * - Poll the link state and reconfigure the hardware as necessary.
  112. */
  113. static unsigned int efx_monitor_interval = 1 * HZ;
  114. /* Initial interrupt moderation settings. They can be modified after
  115. * module load with ethtool.
  116. *
  117. * The default for RX should strike a balance between increasing the
  118. * round-trip latency and reducing overhead.
  119. */
  120. static unsigned int rx_irq_mod_usec = 60;
  121. /* Initial interrupt moderation settings. They can be modified after
  122. * module load with ethtool.
  123. *
  124. * This default is chosen to ensure that a 10G link does not go idle
  125. * while a TX queue is stopped after it has become full. A queue is
  126. * restarted when it drops below half full. The time this takes (assuming
  127. * worst case 3 descriptors per packet and 1024 descriptors) is
  128. * 512 / 3 * 1.2 = 205 usec.
  129. */
  130. static unsigned int tx_irq_mod_usec = 150;
  131. /* This is the first interrupt mode to try out of:
  132. * 0 => MSI-X
  133. * 1 => MSI
  134. * 2 => legacy
  135. */
  136. static unsigned int interrupt_mode;
  137. /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
  138. * i.e. the number of CPUs among which we may distribute simultaneous
  139. * interrupt handling.
  140. *
  141. * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
  142. * The default (0) means to assign an interrupt to each core.
  143. */
  144. static unsigned int rss_cpus;
  145. module_param(rss_cpus, uint, 0444);
  146. MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  147. static int phy_flash_cfg;
  148. module_param(phy_flash_cfg, int, 0644);
  149. MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
  150. static unsigned irq_adapt_low_thresh = 8000;
  151. module_param(irq_adapt_low_thresh, uint, 0644);
  152. MODULE_PARM_DESC(irq_adapt_low_thresh,
  153. "Threshold score for reducing IRQ moderation");
  154. static unsigned irq_adapt_high_thresh = 16000;
  155. module_param(irq_adapt_high_thresh, uint, 0644);
  156. MODULE_PARM_DESC(irq_adapt_high_thresh,
  157. "Threshold score for increasing IRQ moderation");
  158. static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  159. NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
  160. NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
  161. NETIF_MSG_TX_ERR | NETIF_MSG_HW);
  162. module_param(debug, uint, 0);
  163. MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
  164. /**************************************************************************
  165. *
  166. * Utility functions and prototypes
  167. *
  168. *************************************************************************/
  169. static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
  170. static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
  171. static void efx_remove_channel(struct efx_channel *channel);
  172. static void efx_remove_channels(struct efx_nic *efx);
  173. static const struct efx_channel_type efx_default_channel_type;
  174. static void efx_remove_port(struct efx_nic *efx);
  175. static void efx_init_napi_channel(struct efx_channel *channel);
  176. static void efx_fini_napi(struct efx_nic *efx);
  177. static void efx_fini_napi_channel(struct efx_channel *channel);
  178. static void efx_fini_struct(struct efx_nic *efx);
  179. static void efx_start_all(struct efx_nic *efx);
  180. static void efx_stop_all(struct efx_nic *efx);
  181. #define EFX_ASSERT_RESET_SERIALISED(efx) \
  182. do { \
  183. if ((efx->state == STATE_RUNNING) || \
  184. (efx->state == STATE_DISABLED)) \
  185. ASSERT_RTNL(); \
  186. } while (0)
  187. /**************************************************************************
  188. *
  189. * Event queue processing
  190. *
  191. *************************************************************************/
  192. /* Process channel's event queue
  193. *
  194. * This function is responsible for processing the event queue of a
  195. * single channel. The caller must guarantee that this function will
  196. * never be concurrently called more than once on the same channel,
  197. * though different channels may be being processed concurrently.
  198. */
  199. static int efx_process_channel(struct efx_channel *channel, int budget)
  200. {
  201. int spent;
  202. if (unlikely(!channel->enabled))
  203. return 0;
  204. spent = efx_nic_process_eventq(channel, budget);
  205. if (spent && efx_channel_has_rx_queue(channel)) {
  206. struct efx_rx_queue *rx_queue =
  207. efx_channel_get_rx_queue(channel);
  208. /* Deliver last RX packet. */
  209. if (channel->rx_pkt) {
  210. __efx_rx_packet(channel, channel->rx_pkt);
  211. channel->rx_pkt = NULL;
  212. }
  213. if (rx_queue->enabled) {
  214. efx_rx_strategy(channel);
  215. efx_fast_push_rx_descriptors(rx_queue);
  216. }
  217. }
  218. return spent;
  219. }
  220. /* Mark channel as finished processing
  221. *
  222. * Note that since we will not receive further interrupts for this
  223. * channel before we finish processing and call the eventq_read_ack()
  224. * method, there is no need to use the interrupt hold-off timers.
  225. */
  226. static inline void efx_channel_processed(struct efx_channel *channel)
  227. {
  228. /* The interrupt handler for this channel may set work_pending
  229. * as soon as we acknowledge the events we've seen. Make sure
  230. * it's cleared before then. */
  231. channel->work_pending = false;
  232. smp_wmb();
  233. efx_nic_eventq_read_ack(channel);
  234. }
  235. /* NAPI poll handler
  236. *
  237. * NAPI guarantees serialisation of polls of the same device, which
  238. * provides the guarantee required by efx_process_channel().
  239. */
  240. static int efx_poll(struct napi_struct *napi, int budget)
  241. {
  242. struct efx_channel *channel =
  243. container_of(napi, struct efx_channel, napi_str);
  244. struct efx_nic *efx = channel->efx;
  245. int spent;
  246. netif_vdbg(efx, intr, efx->net_dev,
  247. "channel %d NAPI poll executing on CPU %d\n",
  248. channel->channel, raw_smp_processor_id());
  249. spent = efx_process_channel(channel, budget);
  250. if (spent < budget) {
  251. if (efx_channel_has_rx_queue(channel) &&
  252. efx->irq_rx_adaptive &&
  253. unlikely(++channel->irq_count == 1000)) {
  254. if (unlikely(channel->irq_mod_score <
  255. irq_adapt_low_thresh)) {
  256. if (channel->irq_moderation > 1) {
  257. channel->irq_moderation -= 1;
  258. efx->type->push_irq_moderation(channel);
  259. }
  260. } else if (unlikely(channel->irq_mod_score >
  261. irq_adapt_high_thresh)) {
  262. if (channel->irq_moderation <
  263. efx->irq_rx_moderation) {
  264. channel->irq_moderation += 1;
  265. efx->type->push_irq_moderation(channel);
  266. }
  267. }
  268. channel->irq_count = 0;
  269. channel->irq_mod_score = 0;
  270. }
  271. efx_filter_rfs_expire(channel);
  272. /* There is no race here; although napi_disable() will
  273. * only wait for napi_complete(), this isn't a problem
  274. * since efx_channel_processed() will have no effect if
  275. * interrupts have already been disabled.
  276. */
  277. napi_complete(napi);
  278. efx_channel_processed(channel);
  279. }
  280. return spent;
  281. }
  282. /* Process the eventq of the specified channel immediately on this CPU
  283. *
  284. * Disable hardware generated interrupts, wait for any existing
  285. * processing to finish, then directly poll (and ack ) the eventq.
  286. * Finally reenable NAPI and interrupts.
  287. *
  288. * This is for use only during a loopback self-test. It must not
  289. * deliver any packets up the stack as this can result in deadlock.
  290. */
  291. void efx_process_channel_now(struct efx_channel *channel)
  292. {
  293. struct efx_nic *efx = channel->efx;
  294. BUG_ON(channel->channel >= efx->n_channels);
  295. BUG_ON(!channel->enabled);
  296. BUG_ON(!efx->loopback_selftest);
  297. /* Disable interrupts and wait for ISRs to complete */
  298. efx_nic_disable_interrupts(efx);
  299. if (efx->legacy_irq) {
  300. synchronize_irq(efx->legacy_irq);
  301. efx->legacy_irq_enabled = false;
  302. }
  303. if (channel->irq)
  304. synchronize_irq(channel->irq);
  305. /* Wait for any NAPI processing to complete */
  306. napi_disable(&channel->napi_str);
  307. /* Poll the channel */
  308. efx_process_channel(channel, channel->eventq_mask + 1);
  309. /* Ack the eventq. This may cause an interrupt to be generated
  310. * when they are reenabled */
  311. efx_channel_processed(channel);
  312. napi_enable(&channel->napi_str);
  313. if (efx->legacy_irq)
  314. efx->legacy_irq_enabled = true;
  315. efx_nic_enable_interrupts(efx);
  316. }
  317. /* Create event queue
  318. * Event queue memory allocations are done only once. If the channel
  319. * is reset, the memory buffer will be reused; this guards against
  320. * errors during channel reset and also simplifies interrupt handling.
  321. */
  322. static int efx_probe_eventq(struct efx_channel *channel)
  323. {
  324. struct efx_nic *efx = channel->efx;
  325. unsigned long entries;
  326. netif_dbg(efx, probe, efx->net_dev,
  327. "chan %d create event queue\n", channel->channel);
  328. /* Build an event queue with room for one event per tx and rx buffer,
  329. * plus some extra for link state events and MCDI completions. */
  330. entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
  331. EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
  332. channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
  333. return efx_nic_probe_eventq(channel);
  334. }
  335. /* Prepare channel's event queue */
  336. static void efx_init_eventq(struct efx_channel *channel)
  337. {
  338. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  339. "chan %d init event queue\n", channel->channel);
  340. channel->eventq_read_ptr = 0;
  341. efx_nic_init_eventq(channel);
  342. }
  343. /* Enable event queue processing and NAPI */
  344. static void efx_start_eventq(struct efx_channel *channel)
  345. {
  346. netif_dbg(channel->efx, ifup, channel->efx->net_dev,
  347. "chan %d start event queue\n", channel->channel);
  348. /* The interrupt handler for this channel may set work_pending
  349. * as soon as we enable it. Make sure it's cleared before
  350. * then. Similarly, make sure it sees the enabled flag set.
  351. */
  352. channel->work_pending = false;
  353. channel->enabled = true;
  354. smp_wmb();
  355. napi_enable(&channel->napi_str);
  356. efx_nic_eventq_read_ack(channel);
  357. }
  358. /* Disable event queue processing and NAPI */
  359. static void efx_stop_eventq(struct efx_channel *channel)
  360. {
  361. if (!channel->enabled)
  362. return;
  363. napi_disable(&channel->napi_str);
  364. channel->enabled = false;
  365. }
  366. static void efx_fini_eventq(struct efx_channel *channel)
  367. {
  368. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  369. "chan %d fini event queue\n", channel->channel);
  370. efx_nic_fini_eventq(channel);
  371. }
  372. static void efx_remove_eventq(struct efx_channel *channel)
  373. {
  374. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  375. "chan %d remove event queue\n", channel->channel);
  376. efx_nic_remove_eventq(channel);
  377. }
  378. /**************************************************************************
  379. *
  380. * Channel handling
  381. *
  382. *************************************************************************/
  383. /* Allocate and initialise a channel structure. */
  384. static struct efx_channel *
  385. efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
  386. {
  387. struct efx_channel *channel;
  388. struct efx_rx_queue *rx_queue;
  389. struct efx_tx_queue *tx_queue;
  390. int j;
  391. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  392. if (!channel)
  393. return NULL;
  394. channel->efx = efx;
  395. channel->channel = i;
  396. channel->type = &efx_default_channel_type;
  397. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  398. tx_queue = &channel->tx_queue[j];
  399. tx_queue->efx = efx;
  400. tx_queue->queue = i * EFX_TXQ_TYPES + j;
  401. tx_queue->channel = channel;
  402. }
  403. rx_queue = &channel->rx_queue;
  404. rx_queue->efx = efx;
  405. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  406. (unsigned long)rx_queue);
  407. return channel;
  408. }
  409. /* Allocate and initialise a channel structure, copying parameters
  410. * (but not resources) from an old channel structure.
  411. */
  412. static struct efx_channel *
  413. efx_copy_channel(const struct efx_channel *old_channel)
  414. {
  415. struct efx_channel *channel;
  416. struct efx_rx_queue *rx_queue;
  417. struct efx_tx_queue *tx_queue;
  418. int j;
  419. channel = kmalloc(sizeof(*channel), GFP_KERNEL);
  420. if (!channel)
  421. return NULL;
  422. *channel = *old_channel;
  423. channel->napi_dev = NULL;
  424. memset(&channel->eventq, 0, sizeof(channel->eventq));
  425. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  426. tx_queue = &channel->tx_queue[j];
  427. if (tx_queue->channel)
  428. tx_queue->channel = channel;
  429. tx_queue->buffer = NULL;
  430. memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
  431. }
  432. rx_queue = &channel->rx_queue;
  433. rx_queue->buffer = NULL;
  434. memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
  435. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  436. (unsigned long)rx_queue);
  437. return channel;
  438. }
  439. static int efx_probe_channel(struct efx_channel *channel)
  440. {
  441. struct efx_tx_queue *tx_queue;
  442. struct efx_rx_queue *rx_queue;
  443. int rc;
  444. netif_dbg(channel->efx, probe, channel->efx->net_dev,
  445. "creating channel %d\n", channel->channel);
  446. rc = channel->type->pre_probe(channel);
  447. if (rc)
  448. goto fail;
  449. rc = efx_probe_eventq(channel);
  450. if (rc)
  451. goto fail;
  452. efx_for_each_channel_tx_queue(tx_queue, channel) {
  453. rc = efx_probe_tx_queue(tx_queue);
  454. if (rc)
  455. goto fail;
  456. }
  457. efx_for_each_channel_rx_queue(rx_queue, channel) {
  458. rc = efx_probe_rx_queue(rx_queue);
  459. if (rc)
  460. goto fail;
  461. }
  462. channel->n_rx_frm_trunc = 0;
  463. return 0;
  464. fail:
  465. efx_remove_channel(channel);
  466. return rc;
  467. }
  468. static void
  469. efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
  470. {
  471. struct efx_nic *efx = channel->efx;
  472. const char *type;
  473. int number;
  474. number = channel->channel;
  475. if (efx->tx_channel_offset == 0) {
  476. type = "";
  477. } else if (channel->channel < efx->tx_channel_offset) {
  478. type = "-rx";
  479. } else {
  480. type = "-tx";
  481. number -= efx->tx_channel_offset;
  482. }
  483. snprintf(buf, len, "%s%s-%d", efx->name, type, number);
  484. }
  485. static void efx_set_channel_names(struct efx_nic *efx)
  486. {
  487. struct efx_channel *channel;
  488. efx_for_each_channel(channel, efx)
  489. channel->type->get_name(channel,
  490. efx->channel_name[channel->channel],
  491. sizeof(efx->channel_name[0]));
  492. }
  493. static int efx_probe_channels(struct efx_nic *efx)
  494. {
  495. struct efx_channel *channel;
  496. int rc;
  497. /* Restart special buffer allocation */
  498. efx->next_buffer_table = 0;
  499. /* Probe channels in reverse, so that any 'extra' channels
  500. * use the start of the buffer table. This allows the traffic
  501. * channels to be resized without moving them or wasting the
  502. * entries before them.
  503. */
  504. efx_for_each_channel_rev(channel, efx) {
  505. rc = efx_probe_channel(channel);
  506. if (rc) {
  507. netif_err(efx, probe, efx->net_dev,
  508. "failed to create channel %d\n",
  509. channel->channel);
  510. goto fail;
  511. }
  512. }
  513. efx_set_channel_names(efx);
  514. return 0;
  515. fail:
  516. efx_remove_channels(efx);
  517. return rc;
  518. }
  519. /* Channels are shutdown and reinitialised whilst the NIC is running
  520. * to propagate configuration changes (mtu, checksum offload), or
  521. * to clear hardware error conditions
  522. */
  523. static void efx_start_datapath(struct efx_nic *efx)
  524. {
  525. struct efx_tx_queue *tx_queue;
  526. struct efx_rx_queue *rx_queue;
  527. struct efx_channel *channel;
  528. /* Calculate the rx buffer allocation parameters required to
  529. * support the current MTU, including padding for header
  530. * alignment and overruns.
  531. */
  532. efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
  533. EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
  534. efx->type->rx_buffer_hash_size +
  535. efx->type->rx_buffer_padding);
  536. efx->rx_buffer_order = get_order(efx->rx_buffer_len +
  537. sizeof(struct efx_rx_page_state));
  538. /* Initialise the channels */
  539. efx_for_each_channel(channel, efx) {
  540. efx_for_each_channel_tx_queue(tx_queue, channel)
  541. efx_init_tx_queue(tx_queue);
  542. /* The rx buffer allocation strategy is MTU dependent */
  543. efx_rx_strategy(channel);
  544. efx_for_each_channel_rx_queue(rx_queue, channel) {
  545. efx_init_rx_queue(rx_queue);
  546. efx_nic_generate_fill_event(rx_queue);
  547. }
  548. WARN_ON(channel->rx_pkt != NULL);
  549. efx_rx_strategy(channel);
  550. }
  551. if (netif_device_present(efx->net_dev))
  552. netif_tx_wake_all_queues(efx->net_dev);
  553. }
  554. static void efx_stop_datapath(struct efx_nic *efx)
  555. {
  556. struct efx_channel *channel;
  557. struct efx_tx_queue *tx_queue;
  558. struct efx_rx_queue *rx_queue;
  559. struct pci_dev *dev = efx->pci_dev;
  560. int rc;
  561. EFX_ASSERT_RESET_SERIALISED(efx);
  562. BUG_ON(efx->port_enabled);
  563. /* Only perform flush if dma is enabled */
  564. if (dev->is_busmaster) {
  565. rc = efx_nic_flush_queues(efx);
  566. if (rc && EFX_WORKAROUND_7803(efx)) {
  567. /* Schedule a reset to recover from the flush failure. The
  568. * descriptor caches reference memory we're about to free,
  569. * but falcon_reconfigure_mac_wrapper() won't reconnect
  570. * the MACs because of the pending reset. */
  571. netif_err(efx, drv, efx->net_dev,
  572. "Resetting to recover from flush failure\n");
  573. efx_schedule_reset(efx, RESET_TYPE_ALL);
  574. } else if (rc) {
  575. netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
  576. } else {
  577. netif_dbg(efx, drv, efx->net_dev,
  578. "successfully flushed all queues\n");
  579. }
  580. }
  581. efx_for_each_channel(channel, efx) {
  582. /* RX packet processing is pipelined, so wait for the
  583. * NAPI handler to complete. At least event queue 0
  584. * might be kept active by non-data events, so don't
  585. * use napi_synchronize() but actually disable NAPI
  586. * temporarily.
  587. */
  588. if (efx_channel_has_rx_queue(channel)) {
  589. efx_stop_eventq(channel);
  590. efx_start_eventq(channel);
  591. }
  592. efx_for_each_channel_rx_queue(rx_queue, channel)
  593. efx_fini_rx_queue(rx_queue);
  594. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  595. efx_fini_tx_queue(tx_queue);
  596. }
  597. }
  598. static void efx_remove_channel(struct efx_channel *channel)
  599. {
  600. struct efx_tx_queue *tx_queue;
  601. struct efx_rx_queue *rx_queue;
  602. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  603. "destroy chan %d\n", channel->channel);
  604. efx_for_each_channel_rx_queue(rx_queue, channel)
  605. efx_remove_rx_queue(rx_queue);
  606. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  607. efx_remove_tx_queue(tx_queue);
  608. efx_remove_eventq(channel);
  609. }
  610. static void efx_remove_channels(struct efx_nic *efx)
  611. {
  612. struct efx_channel *channel;
  613. efx_for_each_channel(channel, efx)
  614. efx_remove_channel(channel);
  615. }
  616. int
  617. efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
  618. {
  619. struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
  620. u32 old_rxq_entries, old_txq_entries;
  621. unsigned i, next_buffer_table = 0;
  622. int rc = 0;
  623. /* Not all channels should be reallocated. We must avoid
  624. * reallocating their buffer table entries.
  625. */
  626. efx_for_each_channel(channel, efx) {
  627. struct efx_rx_queue *rx_queue;
  628. struct efx_tx_queue *tx_queue;
  629. if (channel->type->copy)
  630. continue;
  631. next_buffer_table = max(next_buffer_table,
  632. channel->eventq.index +
  633. channel->eventq.entries);
  634. efx_for_each_channel_rx_queue(rx_queue, channel)
  635. next_buffer_table = max(next_buffer_table,
  636. rx_queue->rxd.index +
  637. rx_queue->rxd.entries);
  638. efx_for_each_channel_tx_queue(tx_queue, channel)
  639. next_buffer_table = max(next_buffer_table,
  640. tx_queue->txd.index +
  641. tx_queue->txd.entries);
  642. }
  643. efx_device_detach_sync(efx);
  644. efx_stop_all(efx);
  645. efx_stop_interrupts(efx, true);
  646. /* Clone channels (where possible) */
  647. memset(other_channel, 0, sizeof(other_channel));
  648. for (i = 0; i < efx->n_channels; i++) {
  649. channel = efx->channel[i];
  650. if (channel->type->copy)
  651. channel = channel->type->copy(channel);
  652. if (!channel) {
  653. rc = -ENOMEM;
  654. goto out;
  655. }
  656. other_channel[i] = channel;
  657. }
  658. /* Swap entry counts and channel pointers */
  659. old_rxq_entries = efx->rxq_entries;
  660. old_txq_entries = efx->txq_entries;
  661. efx->rxq_entries = rxq_entries;
  662. efx->txq_entries = txq_entries;
  663. for (i = 0; i < efx->n_channels; i++) {
  664. channel = efx->channel[i];
  665. efx->channel[i] = other_channel[i];
  666. other_channel[i] = channel;
  667. }
  668. /* Restart buffer table allocation */
  669. efx->next_buffer_table = next_buffer_table;
  670. for (i = 0; i < efx->n_channels; i++) {
  671. channel = efx->channel[i];
  672. if (!channel->type->copy)
  673. continue;
  674. rc = efx_probe_channel(channel);
  675. if (rc)
  676. goto rollback;
  677. efx_init_napi_channel(efx->channel[i]);
  678. }
  679. out:
  680. /* Destroy unused channel structures */
  681. for (i = 0; i < efx->n_channels; i++) {
  682. channel = other_channel[i];
  683. if (channel && channel->type->copy) {
  684. efx_fini_napi_channel(channel);
  685. efx_remove_channel(channel);
  686. kfree(channel);
  687. }
  688. }
  689. efx_start_interrupts(efx, true);
  690. efx_start_all(efx);
  691. netif_device_attach(efx->net_dev);
  692. return rc;
  693. rollback:
  694. /* Swap back */
  695. efx->rxq_entries = old_rxq_entries;
  696. efx->txq_entries = old_txq_entries;
  697. for (i = 0; i < efx->n_channels; i++) {
  698. channel = efx->channel[i];
  699. efx->channel[i] = other_channel[i];
  700. other_channel[i] = channel;
  701. }
  702. goto out;
  703. }
  704. void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
  705. {
  706. mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
  707. }
  708. static const struct efx_channel_type efx_default_channel_type = {
  709. .pre_probe = efx_channel_dummy_op_int,
  710. .get_name = efx_get_channel_name,
  711. .copy = efx_copy_channel,
  712. .keep_eventq = false,
  713. };
  714. int efx_channel_dummy_op_int(struct efx_channel *channel)
  715. {
  716. return 0;
  717. }
  718. /**************************************************************************
  719. *
  720. * Port handling
  721. *
  722. **************************************************************************/
  723. /* This ensures that the kernel is kept informed (via
  724. * netif_carrier_on/off) of the link status, and also maintains the
  725. * link status's stop on the port's TX queue.
  726. */
  727. void efx_link_status_changed(struct efx_nic *efx)
  728. {
  729. struct efx_link_state *link_state = &efx->link_state;
  730. /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
  731. * that no events are triggered between unregister_netdev() and the
  732. * driver unloading. A more general condition is that NETDEV_CHANGE
  733. * can only be generated between NETDEV_UP and NETDEV_DOWN */
  734. if (!netif_running(efx->net_dev))
  735. return;
  736. if (link_state->up != netif_carrier_ok(efx->net_dev)) {
  737. efx->n_link_state_changes++;
  738. if (link_state->up)
  739. netif_carrier_on(efx->net_dev);
  740. else
  741. netif_carrier_off(efx->net_dev);
  742. }
  743. /* Status message for kernel log */
  744. if (link_state->up)
  745. netif_info(efx, link, efx->net_dev,
  746. "link up at %uMbps %s-duplex (MTU %d)%s\n",
  747. link_state->speed, link_state->fd ? "full" : "half",
  748. efx->net_dev->mtu,
  749. (efx->promiscuous ? " [PROMISC]" : ""));
  750. else
  751. netif_info(efx, link, efx->net_dev, "link down\n");
  752. }
  753. void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
  754. {
  755. efx->link_advertising = advertising;
  756. if (advertising) {
  757. if (advertising & ADVERTISED_Pause)
  758. efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
  759. else
  760. efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
  761. if (advertising & ADVERTISED_Asym_Pause)
  762. efx->wanted_fc ^= EFX_FC_TX;
  763. }
  764. }
  765. void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
  766. {
  767. efx->wanted_fc = wanted_fc;
  768. if (efx->link_advertising) {
  769. if (wanted_fc & EFX_FC_RX)
  770. efx->link_advertising |= (ADVERTISED_Pause |
  771. ADVERTISED_Asym_Pause);
  772. else
  773. efx->link_advertising &= ~(ADVERTISED_Pause |
  774. ADVERTISED_Asym_Pause);
  775. if (wanted_fc & EFX_FC_TX)
  776. efx->link_advertising ^= ADVERTISED_Asym_Pause;
  777. }
  778. }
  779. static void efx_fini_port(struct efx_nic *efx);
  780. /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
  781. * the MAC appropriately. All other PHY configuration changes are pushed
  782. * through phy_op->set_settings(), and pushed asynchronously to the MAC
  783. * through efx_monitor().
  784. *
  785. * Callers must hold the mac_lock
  786. */
  787. int __efx_reconfigure_port(struct efx_nic *efx)
  788. {
  789. enum efx_phy_mode phy_mode;
  790. int rc;
  791. WARN_ON(!mutex_is_locked(&efx->mac_lock));
  792. /* Serialise the promiscuous flag with efx_set_rx_mode. */
  793. netif_addr_lock_bh(efx->net_dev);
  794. netif_addr_unlock_bh(efx->net_dev);
  795. /* Disable PHY transmit in mac level loopbacks */
  796. phy_mode = efx->phy_mode;
  797. if (LOOPBACK_INTERNAL(efx))
  798. efx->phy_mode |= PHY_MODE_TX_DISABLED;
  799. else
  800. efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
  801. rc = efx->type->reconfigure_port(efx);
  802. if (rc)
  803. efx->phy_mode = phy_mode;
  804. return rc;
  805. }
  806. /* Reinitialise the MAC to pick up new PHY settings, even if the port is
  807. * disabled. */
  808. int efx_reconfigure_port(struct efx_nic *efx)
  809. {
  810. int rc;
  811. EFX_ASSERT_RESET_SERIALISED(efx);
  812. mutex_lock(&efx->mac_lock);
  813. rc = __efx_reconfigure_port(efx);
  814. mutex_unlock(&efx->mac_lock);
  815. return rc;
  816. }
  817. /* Asynchronous work item for changing MAC promiscuity and multicast
  818. * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
  819. * MAC directly. */
  820. static void efx_mac_work(struct work_struct *data)
  821. {
  822. struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
  823. mutex_lock(&efx->mac_lock);
  824. if (efx->port_enabled)
  825. efx->type->reconfigure_mac(efx);
  826. mutex_unlock(&efx->mac_lock);
  827. }
  828. static int efx_probe_port(struct efx_nic *efx)
  829. {
  830. int rc;
  831. netif_dbg(efx, probe, efx->net_dev, "create port\n");
  832. if (phy_flash_cfg)
  833. efx->phy_mode = PHY_MODE_SPECIAL;
  834. /* Connect up MAC/PHY operations table */
  835. rc = efx->type->probe_port(efx);
  836. if (rc)
  837. return rc;
  838. /* Initialise MAC address to permanent address */
  839. memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
  840. return 0;
  841. }
  842. static int efx_init_port(struct efx_nic *efx)
  843. {
  844. int rc;
  845. netif_dbg(efx, drv, efx->net_dev, "init port\n");
  846. mutex_lock(&efx->mac_lock);
  847. rc = efx->phy_op->init(efx);
  848. if (rc)
  849. goto fail1;
  850. efx->port_initialized = true;
  851. /* Reconfigure the MAC before creating dma queues (required for
  852. * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
  853. efx->type->reconfigure_mac(efx);
  854. /* Ensure the PHY advertises the correct flow control settings */
  855. rc = efx->phy_op->reconfigure(efx);
  856. if (rc)
  857. goto fail2;
  858. mutex_unlock(&efx->mac_lock);
  859. return 0;
  860. fail2:
  861. efx->phy_op->fini(efx);
  862. fail1:
  863. mutex_unlock(&efx->mac_lock);
  864. return rc;
  865. }
  866. static void efx_start_port(struct efx_nic *efx)
  867. {
  868. netif_dbg(efx, ifup, efx->net_dev, "start port\n");
  869. BUG_ON(efx->port_enabled);
  870. mutex_lock(&efx->mac_lock);
  871. efx->port_enabled = true;
  872. /* efx_mac_work() might have been scheduled after efx_stop_port(),
  873. * and then cancelled by efx_flush_all() */
  874. efx->type->reconfigure_mac(efx);
  875. mutex_unlock(&efx->mac_lock);
  876. }
  877. /* Prevent efx_mac_work() and efx_monitor() from working */
  878. static void efx_stop_port(struct efx_nic *efx)
  879. {
  880. netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
  881. mutex_lock(&efx->mac_lock);
  882. efx->port_enabled = false;
  883. mutex_unlock(&efx->mac_lock);
  884. /* Serialise against efx_set_multicast_list() */
  885. netif_addr_lock_bh(efx->net_dev);
  886. netif_addr_unlock_bh(efx->net_dev);
  887. }
  888. static void efx_fini_port(struct efx_nic *efx)
  889. {
  890. netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
  891. if (!efx->port_initialized)
  892. return;
  893. efx->phy_op->fini(efx);
  894. efx->port_initialized = false;
  895. efx->link_state.up = false;
  896. efx_link_status_changed(efx);
  897. }
  898. static void efx_remove_port(struct efx_nic *efx)
  899. {
  900. netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
  901. efx->type->remove_port(efx);
  902. }
  903. /**************************************************************************
  904. *
  905. * NIC handling
  906. *
  907. **************************************************************************/
  908. /* This configures the PCI device to enable I/O and DMA. */
  909. static int efx_init_io(struct efx_nic *efx)
  910. {
  911. struct pci_dev *pci_dev = efx->pci_dev;
  912. dma_addr_t dma_mask = efx->type->max_dma_mask;
  913. int rc;
  914. netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
  915. rc = pci_enable_device(pci_dev);
  916. if (rc) {
  917. netif_err(efx, probe, efx->net_dev,
  918. "failed to enable PCI device\n");
  919. goto fail1;
  920. }
  921. pci_set_master(pci_dev);
  922. /* Set the PCI DMA mask. Try all possibilities from our
  923. * genuine mask down to 32 bits, because some architectures
  924. * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
  925. * masks event though they reject 46 bit masks.
  926. */
  927. while (dma_mask > 0x7fffffffUL) {
  928. if (pci_dma_supported(pci_dev, dma_mask)) {
  929. rc = pci_set_dma_mask(pci_dev, dma_mask);
  930. if (rc == 0)
  931. break;
  932. }
  933. dma_mask >>= 1;
  934. }
  935. if (rc) {
  936. netif_err(efx, probe, efx->net_dev,
  937. "could not find a suitable DMA mask\n");
  938. goto fail2;
  939. }
  940. netif_dbg(efx, probe, efx->net_dev,
  941. "using DMA mask %llx\n", (unsigned long long) dma_mask);
  942. rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
  943. if (rc) {
  944. /* pci_set_consistent_dma_mask() is not *allowed* to
  945. * fail with a mask that pci_set_dma_mask() accepted,
  946. * but just in case...
  947. */
  948. netif_err(efx, probe, efx->net_dev,
  949. "failed to set consistent DMA mask\n");
  950. goto fail2;
  951. }
  952. efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
  953. rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
  954. if (rc) {
  955. netif_err(efx, probe, efx->net_dev,
  956. "request for memory BAR failed\n");
  957. rc = -EIO;
  958. goto fail3;
  959. }
  960. efx->membase = ioremap_nocache(efx->membase_phys,
  961. efx->type->mem_map_size);
  962. if (!efx->membase) {
  963. netif_err(efx, probe, efx->net_dev,
  964. "could not map memory BAR at %llx+%x\n",
  965. (unsigned long long)efx->membase_phys,
  966. efx->type->mem_map_size);
  967. rc = -ENOMEM;
  968. goto fail4;
  969. }
  970. netif_dbg(efx, probe, efx->net_dev,
  971. "memory BAR at %llx+%x (virtual %p)\n",
  972. (unsigned long long)efx->membase_phys,
  973. efx->type->mem_map_size, efx->membase);
  974. return 0;
  975. fail4:
  976. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  977. fail3:
  978. efx->membase_phys = 0;
  979. fail2:
  980. pci_disable_device(efx->pci_dev);
  981. fail1:
  982. return rc;
  983. }
  984. static void efx_fini_io(struct efx_nic *efx)
  985. {
  986. netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
  987. if (efx->membase) {
  988. iounmap(efx->membase);
  989. efx->membase = NULL;
  990. }
  991. if (efx->membase_phys) {
  992. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  993. efx->membase_phys = 0;
  994. }
  995. pci_disable_device(efx->pci_dev);
  996. }
  997. static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
  998. {
  999. cpumask_var_t thread_mask;
  1000. unsigned int count;
  1001. int cpu;
  1002. if (rss_cpus) {
  1003. count = rss_cpus;
  1004. } else {
  1005. if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
  1006. netif_warn(efx, probe, efx->net_dev,
  1007. "RSS disabled due to allocation failure\n");
  1008. return 1;
  1009. }
  1010. count = 0;
  1011. for_each_online_cpu(cpu) {
  1012. if (!cpumask_test_cpu(cpu, thread_mask)) {
  1013. ++count;
  1014. cpumask_or(thread_mask, thread_mask,
  1015. topology_thread_cpumask(cpu));
  1016. }
  1017. }
  1018. free_cpumask_var(thread_mask);
  1019. }
  1020. /* If RSS is requested for the PF *and* VFs then we can't write RSS
  1021. * table entries that are inaccessible to VFs
  1022. */
  1023. if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
  1024. count > efx_vf_size(efx)) {
  1025. netif_warn(efx, probe, efx->net_dev,
  1026. "Reducing number of RSS channels from %u to %u for "
  1027. "VF support. Increase vf-msix-limit to use more "
  1028. "channels on the PF.\n",
  1029. count, efx_vf_size(efx));
  1030. count = efx_vf_size(efx);
  1031. }
  1032. return count;
  1033. }
  1034. static int
  1035. efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
  1036. {
  1037. #ifdef CONFIG_RFS_ACCEL
  1038. unsigned int i;
  1039. int rc;
  1040. efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
  1041. if (!efx->net_dev->rx_cpu_rmap)
  1042. return -ENOMEM;
  1043. for (i = 0; i < efx->n_rx_channels; i++) {
  1044. rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
  1045. xentries[i].vector);
  1046. if (rc) {
  1047. free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
  1048. efx->net_dev->rx_cpu_rmap = NULL;
  1049. return rc;
  1050. }
  1051. }
  1052. #endif
  1053. return 0;
  1054. }
  1055. /* Probe the number and type of interrupts we are able to obtain, and
  1056. * the resulting numbers of channels and RX queues.
  1057. */
  1058. static int efx_probe_interrupts(struct efx_nic *efx)
  1059. {
  1060. unsigned int max_channels =
  1061. min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
  1062. unsigned int extra_channels = 0;
  1063. unsigned int i, j;
  1064. int rc;
  1065. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
  1066. if (efx->extra_channel_type[i])
  1067. ++extra_channels;
  1068. if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
  1069. struct msix_entry xentries[EFX_MAX_CHANNELS];
  1070. unsigned int n_channels;
  1071. n_channels = efx_wanted_parallelism(efx);
  1072. if (separate_tx_channels)
  1073. n_channels *= 2;
  1074. n_channels += extra_channels;
  1075. n_channels = min(n_channels, max_channels);
  1076. for (i = 0; i < n_channels; i++)
  1077. xentries[i].entry = i;
  1078. rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
  1079. if (rc > 0) {
  1080. netif_err(efx, drv, efx->net_dev,
  1081. "WARNING: Insufficient MSI-X vectors"
  1082. " available (%d < %u).\n", rc, n_channels);
  1083. netif_err(efx, drv, efx->net_dev,
  1084. "WARNING: Performance may be reduced.\n");
  1085. EFX_BUG_ON_PARANOID(rc >= n_channels);
  1086. n_channels = rc;
  1087. rc = pci_enable_msix(efx->pci_dev, xentries,
  1088. n_channels);
  1089. }
  1090. if (rc == 0) {
  1091. efx->n_channels = n_channels;
  1092. if (n_channels > extra_channels)
  1093. n_channels -= extra_channels;
  1094. if (separate_tx_channels) {
  1095. efx->n_tx_channels = max(n_channels / 2, 1U);
  1096. efx->n_rx_channels = max(n_channels -
  1097. efx->n_tx_channels,
  1098. 1U);
  1099. } else {
  1100. efx->n_tx_channels = n_channels;
  1101. efx->n_rx_channels = n_channels;
  1102. }
  1103. rc = efx_init_rx_cpu_rmap(efx, xentries);
  1104. if (rc) {
  1105. pci_disable_msix(efx->pci_dev);
  1106. return rc;
  1107. }
  1108. for (i = 0; i < efx->n_channels; i++)
  1109. efx_get_channel(efx, i)->irq =
  1110. xentries[i].vector;
  1111. } else {
  1112. /* Fall back to single channel MSI */
  1113. efx->interrupt_mode = EFX_INT_MODE_MSI;
  1114. netif_err(efx, drv, efx->net_dev,
  1115. "could not enable MSI-X\n");
  1116. }
  1117. }
  1118. /* Try single interrupt MSI */
  1119. if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
  1120. efx->n_channels = 1;
  1121. efx->n_rx_channels = 1;
  1122. efx->n_tx_channels = 1;
  1123. rc = pci_enable_msi(efx->pci_dev);
  1124. if (rc == 0) {
  1125. efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
  1126. } else {
  1127. netif_err(efx, drv, efx->net_dev,
  1128. "could not enable MSI\n");
  1129. efx->interrupt_mode = EFX_INT_MODE_LEGACY;
  1130. }
  1131. }
  1132. /* Assume legacy interrupts */
  1133. if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
  1134. efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
  1135. efx->n_rx_channels = 1;
  1136. efx->n_tx_channels = 1;
  1137. efx->legacy_irq = efx->pci_dev->irq;
  1138. }
  1139. /* Assign extra channels if possible */
  1140. j = efx->n_channels;
  1141. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
  1142. if (!efx->extra_channel_type[i])
  1143. continue;
  1144. if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
  1145. efx->n_channels <= extra_channels) {
  1146. efx->extra_channel_type[i]->handle_no_channel(efx);
  1147. } else {
  1148. --j;
  1149. efx_get_channel(efx, j)->type =
  1150. efx->extra_channel_type[i];
  1151. }
  1152. }
  1153. /* RSS might be usable on VFs even if it is disabled on the PF */
  1154. efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
  1155. efx->n_rx_channels : efx_vf_size(efx));
  1156. return 0;
  1157. }
  1158. /* Enable interrupts, then probe and start the event queues */
  1159. static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
  1160. {
  1161. struct efx_channel *channel;
  1162. if (efx->legacy_irq)
  1163. efx->legacy_irq_enabled = true;
  1164. efx_nic_enable_interrupts(efx);
  1165. efx_for_each_channel(channel, efx) {
  1166. if (!channel->type->keep_eventq || !may_keep_eventq)
  1167. efx_init_eventq(channel);
  1168. efx_start_eventq(channel);
  1169. }
  1170. efx_mcdi_mode_event(efx);
  1171. }
  1172. static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
  1173. {
  1174. struct efx_channel *channel;
  1175. efx_mcdi_mode_poll(efx);
  1176. efx_nic_disable_interrupts(efx);
  1177. if (efx->legacy_irq) {
  1178. synchronize_irq(efx->legacy_irq);
  1179. efx->legacy_irq_enabled = false;
  1180. }
  1181. efx_for_each_channel(channel, efx) {
  1182. if (channel->irq)
  1183. synchronize_irq(channel->irq);
  1184. efx_stop_eventq(channel);
  1185. if (!channel->type->keep_eventq || !may_keep_eventq)
  1186. efx_fini_eventq(channel);
  1187. }
  1188. }
  1189. static void efx_remove_interrupts(struct efx_nic *efx)
  1190. {
  1191. struct efx_channel *channel;
  1192. /* Remove MSI/MSI-X interrupts */
  1193. efx_for_each_channel(channel, efx)
  1194. channel->irq = 0;
  1195. pci_disable_msi(efx->pci_dev);
  1196. pci_disable_msix(efx->pci_dev);
  1197. /* Remove legacy interrupt */
  1198. efx->legacy_irq = 0;
  1199. }
  1200. static void efx_set_channels(struct efx_nic *efx)
  1201. {
  1202. struct efx_channel *channel;
  1203. struct efx_tx_queue *tx_queue;
  1204. efx->tx_channel_offset =
  1205. separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
  1206. /* We need to adjust the TX queue numbers if we have separate
  1207. * RX-only and TX-only channels.
  1208. */
  1209. efx_for_each_channel(channel, efx) {
  1210. efx_for_each_channel_tx_queue(tx_queue, channel)
  1211. tx_queue->queue -= (efx->tx_channel_offset *
  1212. EFX_TXQ_TYPES);
  1213. }
  1214. }
  1215. static int efx_probe_nic(struct efx_nic *efx)
  1216. {
  1217. size_t i;
  1218. int rc;
  1219. netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
  1220. /* Carry out hardware-type specific initialisation */
  1221. rc = efx->type->probe(efx);
  1222. if (rc)
  1223. return rc;
  1224. /* Determine the number of channels and queues by trying to hook
  1225. * in MSI-X interrupts. */
  1226. rc = efx_probe_interrupts(efx);
  1227. if (rc)
  1228. goto fail;
  1229. efx->type->dimension_resources(efx);
  1230. if (efx->n_channels > 1)
  1231. get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
  1232. for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
  1233. efx->rx_indir_table[i] =
  1234. ethtool_rxfh_indir_default(i, efx->rss_spread);
  1235. efx_set_channels(efx);
  1236. netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
  1237. netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
  1238. /* Initialise the interrupt moderation settings */
  1239. efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
  1240. true);
  1241. return 0;
  1242. fail:
  1243. efx->type->remove(efx);
  1244. return rc;
  1245. }
  1246. static void efx_remove_nic(struct efx_nic *efx)
  1247. {
  1248. netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
  1249. efx_remove_interrupts(efx);
  1250. efx->type->remove(efx);
  1251. }
  1252. /**************************************************************************
  1253. *
  1254. * NIC startup/shutdown
  1255. *
  1256. *************************************************************************/
  1257. static int efx_probe_all(struct efx_nic *efx)
  1258. {
  1259. int rc;
  1260. rc = efx_probe_nic(efx);
  1261. if (rc) {
  1262. netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
  1263. goto fail1;
  1264. }
  1265. rc = efx_probe_port(efx);
  1266. if (rc) {
  1267. netif_err(efx, probe, efx->net_dev, "failed to create port\n");
  1268. goto fail2;
  1269. }
  1270. BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
  1271. if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
  1272. rc = -EINVAL;
  1273. goto fail3;
  1274. }
  1275. efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
  1276. rc = efx_probe_filters(efx);
  1277. if (rc) {
  1278. netif_err(efx, probe, efx->net_dev,
  1279. "failed to create filter tables\n");
  1280. goto fail3;
  1281. }
  1282. rc = efx_probe_channels(efx);
  1283. if (rc)
  1284. goto fail4;
  1285. return 0;
  1286. fail4:
  1287. efx_remove_filters(efx);
  1288. fail3:
  1289. efx_remove_port(efx);
  1290. fail2:
  1291. efx_remove_nic(efx);
  1292. fail1:
  1293. return rc;
  1294. }
  1295. /* Called after previous invocation(s) of efx_stop_all, restarts the port,
  1296. * kernel transmit queues and NAPI processing, and ensures that the port is
  1297. * scheduled to be reconfigured. This function is safe to call multiple
  1298. * times when the NIC is in any state.
  1299. */
  1300. static void efx_start_all(struct efx_nic *efx)
  1301. {
  1302. EFX_ASSERT_RESET_SERIALISED(efx);
  1303. /* Check that it is appropriate to restart the interface. All
  1304. * of these flags are safe to read under just the rtnl lock */
  1305. if (efx->port_enabled)
  1306. return;
  1307. if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
  1308. return;
  1309. if (!netif_running(efx->net_dev))
  1310. return;
  1311. efx_start_port(efx);
  1312. efx_start_datapath(efx);
  1313. /* Start the hardware monitor if there is one. Otherwise (we're link
  1314. * event driven), we have to poll the PHY because after an event queue
  1315. * flush, we could have a missed a link state change */
  1316. if (efx->type->monitor != NULL) {
  1317. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1318. efx_monitor_interval);
  1319. } else {
  1320. mutex_lock(&efx->mac_lock);
  1321. if (efx->phy_op->poll(efx))
  1322. efx_link_status_changed(efx);
  1323. mutex_unlock(&efx->mac_lock);
  1324. }
  1325. efx->type->start_stats(efx);
  1326. }
  1327. /* Flush all delayed work. Should only be called when no more delayed work
  1328. * will be scheduled. This doesn't flush pending online resets (efx_reset),
  1329. * since we're holding the rtnl_lock at this point. */
  1330. static void efx_flush_all(struct efx_nic *efx)
  1331. {
  1332. /* Make sure the hardware monitor and event self-test are stopped */
  1333. cancel_delayed_work_sync(&efx->monitor_work);
  1334. efx_selftest_async_cancel(efx);
  1335. /* Stop scheduled port reconfigurations */
  1336. cancel_work_sync(&efx->mac_work);
  1337. }
  1338. /* Quiesce hardware and software without bringing the link down.
  1339. * Safe to call multiple times, when the nic and interface is in any
  1340. * state. The caller is guaranteed to subsequently be in a position
  1341. * to modify any hardware and software state they see fit without
  1342. * taking locks. */
  1343. static void efx_stop_all(struct efx_nic *efx)
  1344. {
  1345. EFX_ASSERT_RESET_SERIALISED(efx);
  1346. /* port_enabled can be read safely under the rtnl lock */
  1347. if (!efx->port_enabled)
  1348. return;
  1349. efx->type->stop_stats(efx);
  1350. efx_stop_port(efx);
  1351. /* Flush efx_mac_work(), refill_workqueue, monitor_work */
  1352. efx_flush_all(efx);
  1353. /* Stop the kernel transmit interface. This is only valid if
  1354. * the device is stopped or detached; otherwise the watchdog
  1355. * may fire immediately.
  1356. */
  1357. WARN_ON(netif_running(efx->net_dev) &&
  1358. netif_device_present(efx->net_dev));
  1359. netif_tx_disable(efx->net_dev);
  1360. efx_stop_datapath(efx);
  1361. }
  1362. static void efx_remove_all(struct efx_nic *efx)
  1363. {
  1364. efx_remove_channels(efx);
  1365. efx_remove_filters(efx);
  1366. efx_remove_port(efx);
  1367. efx_remove_nic(efx);
  1368. }
  1369. /**************************************************************************
  1370. *
  1371. * Interrupt moderation
  1372. *
  1373. **************************************************************************/
  1374. static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
  1375. {
  1376. if (usecs == 0)
  1377. return 0;
  1378. if (usecs * 1000 < quantum_ns)
  1379. return 1; /* never round down to 0 */
  1380. return usecs * 1000 / quantum_ns;
  1381. }
  1382. /* Set interrupt moderation parameters */
  1383. int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
  1384. unsigned int rx_usecs, bool rx_adaptive,
  1385. bool rx_may_override_tx)
  1386. {
  1387. struct efx_channel *channel;
  1388. unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
  1389. efx->timer_quantum_ns,
  1390. 1000);
  1391. unsigned int tx_ticks;
  1392. unsigned int rx_ticks;
  1393. EFX_ASSERT_RESET_SERIALISED(efx);
  1394. if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
  1395. return -EINVAL;
  1396. tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
  1397. rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
  1398. if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
  1399. !rx_may_override_tx) {
  1400. netif_err(efx, drv, efx->net_dev, "Channels are shared. "
  1401. "RX and TX IRQ moderation must be equal\n");
  1402. return -EINVAL;
  1403. }
  1404. efx->irq_rx_adaptive = rx_adaptive;
  1405. efx->irq_rx_moderation = rx_ticks;
  1406. efx_for_each_channel(channel, efx) {
  1407. if (efx_channel_has_rx_queue(channel))
  1408. channel->irq_moderation = rx_ticks;
  1409. else if (efx_channel_has_tx_queues(channel))
  1410. channel->irq_moderation = tx_ticks;
  1411. }
  1412. return 0;
  1413. }
  1414. void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
  1415. unsigned int *rx_usecs, bool *rx_adaptive)
  1416. {
  1417. /* We must round up when converting ticks to microseconds
  1418. * because we round down when converting the other way.
  1419. */
  1420. *rx_adaptive = efx->irq_rx_adaptive;
  1421. *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
  1422. efx->timer_quantum_ns,
  1423. 1000);
  1424. /* If channels are shared between RX and TX, so is IRQ
  1425. * moderation. Otherwise, IRQ moderation is the same for all
  1426. * TX channels and is not adaptive.
  1427. */
  1428. if (efx->tx_channel_offset == 0)
  1429. *tx_usecs = *rx_usecs;
  1430. else
  1431. *tx_usecs = DIV_ROUND_UP(
  1432. efx->channel[efx->tx_channel_offset]->irq_moderation *
  1433. efx->timer_quantum_ns,
  1434. 1000);
  1435. }
  1436. /**************************************************************************
  1437. *
  1438. * Hardware monitor
  1439. *
  1440. **************************************************************************/
  1441. /* Run periodically off the general workqueue */
  1442. static void efx_monitor(struct work_struct *data)
  1443. {
  1444. struct efx_nic *efx = container_of(data, struct efx_nic,
  1445. monitor_work.work);
  1446. netif_vdbg(efx, timer, efx->net_dev,
  1447. "hardware monitor executing on CPU %d\n",
  1448. raw_smp_processor_id());
  1449. BUG_ON(efx->type->monitor == NULL);
  1450. /* If the mac_lock is already held then it is likely a port
  1451. * reconfiguration is already in place, which will likely do
  1452. * most of the work of monitor() anyway. */
  1453. if (mutex_trylock(&efx->mac_lock)) {
  1454. if (efx->port_enabled)
  1455. efx->type->monitor(efx);
  1456. mutex_unlock(&efx->mac_lock);
  1457. }
  1458. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1459. efx_monitor_interval);
  1460. }
  1461. /**************************************************************************
  1462. *
  1463. * ioctls
  1464. *
  1465. *************************************************************************/
  1466. /* Net device ioctl
  1467. * Context: process, rtnl_lock() held.
  1468. */
  1469. static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1470. {
  1471. struct efx_nic *efx = netdev_priv(net_dev);
  1472. struct mii_ioctl_data *data = if_mii(ifr);
  1473. EFX_ASSERT_RESET_SERIALISED(efx);
  1474. /* Convert phy_id from older PRTAD/DEVAD format */
  1475. if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
  1476. (data->phy_id & 0xfc00) == 0x0400)
  1477. data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
  1478. return mdio_mii_ioctl(&efx->mdio, data, cmd);
  1479. }
  1480. /**************************************************************************
  1481. *
  1482. * NAPI interface
  1483. *
  1484. **************************************************************************/
  1485. static void efx_init_napi_channel(struct efx_channel *channel)
  1486. {
  1487. struct efx_nic *efx = channel->efx;
  1488. channel->napi_dev = efx->net_dev;
  1489. netif_napi_add(channel->napi_dev, &channel->napi_str,
  1490. efx_poll, napi_weight);
  1491. }
  1492. static void efx_init_napi(struct efx_nic *efx)
  1493. {
  1494. struct efx_channel *channel;
  1495. efx_for_each_channel(channel, efx)
  1496. efx_init_napi_channel(channel);
  1497. }
  1498. static void efx_fini_napi_channel(struct efx_channel *channel)
  1499. {
  1500. if (channel->napi_dev)
  1501. netif_napi_del(&channel->napi_str);
  1502. channel->napi_dev = NULL;
  1503. }
  1504. static void efx_fini_napi(struct efx_nic *efx)
  1505. {
  1506. struct efx_channel *channel;
  1507. efx_for_each_channel(channel, efx)
  1508. efx_fini_napi_channel(channel);
  1509. }
  1510. /**************************************************************************
  1511. *
  1512. * Kernel netpoll interface
  1513. *
  1514. *************************************************************************/
  1515. #ifdef CONFIG_NET_POLL_CONTROLLER
  1516. /* Although in the common case interrupts will be disabled, this is not
  1517. * guaranteed. However, all our work happens inside the NAPI callback,
  1518. * so no locking is required.
  1519. */
  1520. static void efx_netpoll(struct net_device *net_dev)
  1521. {
  1522. struct efx_nic *efx = netdev_priv(net_dev);
  1523. struct efx_channel *channel;
  1524. efx_for_each_channel(channel, efx)
  1525. efx_schedule_channel(channel);
  1526. }
  1527. #endif
  1528. /**************************************************************************
  1529. *
  1530. * Kernel net device interface
  1531. *
  1532. *************************************************************************/
  1533. /* Context: process, rtnl_lock() held. */
  1534. static int efx_net_open(struct net_device *net_dev)
  1535. {
  1536. struct efx_nic *efx = netdev_priv(net_dev);
  1537. EFX_ASSERT_RESET_SERIALISED(efx);
  1538. netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
  1539. raw_smp_processor_id());
  1540. if (efx->state == STATE_DISABLED)
  1541. return -EIO;
  1542. if (efx->phy_mode & PHY_MODE_SPECIAL)
  1543. return -EBUSY;
  1544. if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
  1545. return -EIO;
  1546. /* Notify the kernel of the link state polled during driver load,
  1547. * before the monitor starts running */
  1548. efx_link_status_changed(efx);
  1549. efx_start_all(efx);
  1550. efx_selftest_async_start(efx);
  1551. return 0;
  1552. }
  1553. /* Context: process, rtnl_lock() held.
  1554. * Note that the kernel will ignore our return code; this method
  1555. * should really be a void.
  1556. */
  1557. static int efx_net_stop(struct net_device *net_dev)
  1558. {
  1559. struct efx_nic *efx = netdev_priv(net_dev);
  1560. netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
  1561. raw_smp_processor_id());
  1562. if (efx->state != STATE_DISABLED) {
  1563. /* Stop the device and flush all the channels */
  1564. efx_stop_all(efx);
  1565. }
  1566. return 0;
  1567. }
  1568. /* Context: process, dev_base_lock or RTNL held, non-blocking. */
  1569. static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
  1570. struct rtnl_link_stats64 *stats)
  1571. {
  1572. struct efx_nic *efx = netdev_priv(net_dev);
  1573. struct efx_mac_stats *mac_stats = &efx->mac_stats;
  1574. spin_lock_bh(&efx->stats_lock);
  1575. efx->type->update_stats(efx);
  1576. stats->rx_packets = mac_stats->rx_packets;
  1577. stats->tx_packets = mac_stats->tx_packets;
  1578. stats->rx_bytes = mac_stats->rx_bytes;
  1579. stats->tx_bytes = mac_stats->tx_bytes;
  1580. stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
  1581. stats->multicast = mac_stats->rx_multicast;
  1582. stats->collisions = mac_stats->tx_collision;
  1583. stats->rx_length_errors = (mac_stats->rx_gtjumbo +
  1584. mac_stats->rx_length_error);
  1585. stats->rx_crc_errors = mac_stats->rx_bad;
  1586. stats->rx_frame_errors = mac_stats->rx_align_error;
  1587. stats->rx_fifo_errors = mac_stats->rx_overflow;
  1588. stats->rx_missed_errors = mac_stats->rx_missed;
  1589. stats->tx_window_errors = mac_stats->tx_late_collision;
  1590. stats->rx_errors = (stats->rx_length_errors +
  1591. stats->rx_crc_errors +
  1592. stats->rx_frame_errors +
  1593. mac_stats->rx_symbol_error);
  1594. stats->tx_errors = (stats->tx_window_errors +
  1595. mac_stats->tx_bad);
  1596. spin_unlock_bh(&efx->stats_lock);
  1597. return stats;
  1598. }
  1599. /* Context: netif_tx_lock held, BHs disabled. */
  1600. static void efx_watchdog(struct net_device *net_dev)
  1601. {
  1602. struct efx_nic *efx = netdev_priv(net_dev);
  1603. netif_err(efx, tx_err, efx->net_dev,
  1604. "TX stuck with port_enabled=%d: resetting channels\n",
  1605. efx->port_enabled);
  1606. efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
  1607. }
  1608. /* Context: process, rtnl_lock() held. */
  1609. static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
  1610. {
  1611. struct efx_nic *efx = netdev_priv(net_dev);
  1612. EFX_ASSERT_RESET_SERIALISED(efx);
  1613. if (new_mtu > EFX_MAX_MTU)
  1614. return -EINVAL;
  1615. netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
  1616. efx_device_detach_sync(efx);
  1617. efx_stop_all(efx);
  1618. mutex_lock(&efx->mac_lock);
  1619. /* Reconfigure the MAC before enabling the dma queues so that
  1620. * the RX buffers don't overflow */
  1621. net_dev->mtu = new_mtu;
  1622. efx->type->reconfigure_mac(efx);
  1623. mutex_unlock(&efx->mac_lock);
  1624. efx_start_all(efx);
  1625. netif_device_attach(efx->net_dev);
  1626. return 0;
  1627. }
  1628. static int efx_set_mac_address(struct net_device *net_dev, void *data)
  1629. {
  1630. struct efx_nic *efx = netdev_priv(net_dev);
  1631. struct sockaddr *addr = data;
  1632. char *new_addr = addr->sa_data;
  1633. EFX_ASSERT_RESET_SERIALISED(efx);
  1634. if (!is_valid_ether_addr(new_addr)) {
  1635. netif_err(efx, drv, efx->net_dev,
  1636. "invalid ethernet MAC address requested: %pM\n",
  1637. new_addr);
  1638. return -EADDRNOTAVAIL;
  1639. }
  1640. memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
  1641. efx_sriov_mac_address_changed(efx);
  1642. /* Reconfigure the MAC */
  1643. mutex_lock(&efx->mac_lock);
  1644. efx->type->reconfigure_mac(efx);
  1645. mutex_unlock(&efx->mac_lock);
  1646. return 0;
  1647. }
  1648. /* Context: netif_addr_lock held, BHs disabled. */
  1649. static void efx_set_rx_mode(struct net_device *net_dev)
  1650. {
  1651. struct efx_nic *efx = netdev_priv(net_dev);
  1652. struct netdev_hw_addr *ha;
  1653. union efx_multicast_hash *mc_hash = &efx->multicast_hash;
  1654. u32 crc;
  1655. int bit;
  1656. efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
  1657. /* Build multicast hash table */
  1658. if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
  1659. memset(mc_hash, 0xff, sizeof(*mc_hash));
  1660. } else {
  1661. memset(mc_hash, 0x00, sizeof(*mc_hash));
  1662. netdev_for_each_mc_addr(ha, net_dev) {
  1663. crc = ether_crc_le(ETH_ALEN, ha->addr);
  1664. bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
  1665. set_bit_le(bit, mc_hash->byte);
  1666. }
  1667. /* Broadcast packets go through the multicast hash filter.
  1668. * ether_crc_le() of the broadcast address is 0xbe2612ff
  1669. * so we always add bit 0xff to the mask.
  1670. */
  1671. set_bit_le(0xff, mc_hash->byte);
  1672. }
  1673. if (efx->port_enabled)
  1674. queue_work(efx->workqueue, &efx->mac_work);
  1675. /* Otherwise efx_start_port() will do this */
  1676. }
  1677. static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
  1678. {
  1679. struct efx_nic *efx = netdev_priv(net_dev);
  1680. /* If disabling RX n-tuple filtering, clear existing filters */
  1681. if (net_dev->features & ~data & NETIF_F_NTUPLE)
  1682. efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
  1683. return 0;
  1684. }
  1685. static const struct net_device_ops efx_netdev_ops = {
  1686. .ndo_open = efx_net_open,
  1687. .ndo_stop = efx_net_stop,
  1688. .ndo_get_stats64 = efx_net_stats,
  1689. .ndo_tx_timeout = efx_watchdog,
  1690. .ndo_start_xmit = efx_hard_start_xmit,
  1691. .ndo_validate_addr = eth_validate_addr,
  1692. .ndo_do_ioctl = efx_ioctl,
  1693. .ndo_change_mtu = efx_change_mtu,
  1694. .ndo_set_mac_address = efx_set_mac_address,
  1695. .ndo_set_rx_mode = efx_set_rx_mode,
  1696. .ndo_set_features = efx_set_features,
  1697. #ifdef CONFIG_SFC_SRIOV
  1698. .ndo_set_vf_mac = efx_sriov_set_vf_mac,
  1699. .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
  1700. .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
  1701. .ndo_get_vf_config = efx_sriov_get_vf_config,
  1702. #endif
  1703. #ifdef CONFIG_NET_POLL_CONTROLLER
  1704. .ndo_poll_controller = efx_netpoll,
  1705. #endif
  1706. .ndo_setup_tc = efx_setup_tc,
  1707. #ifdef CONFIG_RFS_ACCEL
  1708. .ndo_rx_flow_steer = efx_filter_rfs,
  1709. #endif
  1710. };
  1711. static void efx_update_name(struct efx_nic *efx)
  1712. {
  1713. strcpy(efx->name, efx->net_dev->name);
  1714. efx_mtd_rename(efx);
  1715. efx_set_channel_names(efx);
  1716. }
  1717. static int efx_netdev_event(struct notifier_block *this,
  1718. unsigned long event, void *ptr)
  1719. {
  1720. struct net_device *net_dev = ptr;
  1721. if (net_dev->netdev_ops == &efx_netdev_ops &&
  1722. event == NETDEV_CHANGENAME)
  1723. efx_update_name(netdev_priv(net_dev));
  1724. return NOTIFY_DONE;
  1725. }
  1726. static struct notifier_block efx_netdev_notifier = {
  1727. .notifier_call = efx_netdev_event,
  1728. };
  1729. static ssize_t
  1730. show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
  1731. {
  1732. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  1733. return sprintf(buf, "%d\n", efx->phy_type);
  1734. }
  1735. static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
  1736. static int efx_register_netdev(struct efx_nic *efx)
  1737. {
  1738. struct net_device *net_dev = efx->net_dev;
  1739. struct efx_channel *channel;
  1740. int rc;
  1741. net_dev->watchdog_timeo = 5 * HZ;
  1742. net_dev->irq = efx->pci_dev->irq;
  1743. net_dev->netdev_ops = &efx_netdev_ops;
  1744. SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
  1745. net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
  1746. rtnl_lock();
  1747. rc = dev_alloc_name(net_dev, net_dev->name);
  1748. if (rc < 0)
  1749. goto fail_locked;
  1750. efx_update_name(efx);
  1751. rc = register_netdevice(net_dev);
  1752. if (rc)
  1753. goto fail_locked;
  1754. efx_for_each_channel(channel, efx) {
  1755. struct efx_tx_queue *tx_queue;
  1756. efx_for_each_channel_tx_queue(tx_queue, channel)
  1757. efx_init_tx_queue_core_txq(tx_queue);
  1758. }
  1759. /* Always start with carrier off; PHY events will detect the link */
  1760. netif_carrier_off(net_dev);
  1761. rtnl_unlock();
  1762. rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1763. if (rc) {
  1764. netif_err(efx, drv, efx->net_dev,
  1765. "failed to init net dev attributes\n");
  1766. goto fail_registered;
  1767. }
  1768. return 0;
  1769. fail_locked:
  1770. rtnl_unlock();
  1771. netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
  1772. return rc;
  1773. fail_registered:
  1774. unregister_netdev(net_dev);
  1775. return rc;
  1776. }
  1777. static void efx_unregister_netdev(struct efx_nic *efx)
  1778. {
  1779. struct efx_channel *channel;
  1780. struct efx_tx_queue *tx_queue;
  1781. if (!efx->net_dev)
  1782. return;
  1783. BUG_ON(netdev_priv(efx->net_dev) != efx);
  1784. /* Free up any skbs still remaining. This has to happen before
  1785. * we try to unregister the netdev as running their destructors
  1786. * may be needed to get the device ref. count to 0. */
  1787. efx_for_each_channel(channel, efx) {
  1788. efx_for_each_channel_tx_queue(tx_queue, channel)
  1789. efx_release_tx_buffers(tx_queue);
  1790. }
  1791. strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
  1792. device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1793. unregister_netdev(efx->net_dev);
  1794. }
  1795. /**************************************************************************
  1796. *
  1797. * Device reset and suspend
  1798. *
  1799. **************************************************************************/
  1800. /* Tears down the entire software state and most of the hardware state
  1801. * before reset. */
  1802. void efx_reset_down(struct efx_nic *efx, enum reset_type method)
  1803. {
  1804. EFX_ASSERT_RESET_SERIALISED(efx);
  1805. efx_stop_all(efx);
  1806. mutex_lock(&efx->mac_lock);
  1807. efx_stop_interrupts(efx, false);
  1808. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
  1809. efx->phy_op->fini(efx);
  1810. efx->type->fini(efx);
  1811. }
  1812. /* This function will always ensure that the locks acquired in
  1813. * efx_reset_down() are released. A failure return code indicates
  1814. * that we were unable to reinitialise the hardware, and the
  1815. * driver should be disabled. If ok is false, then the rx and tx
  1816. * engines are not restarted, pending a RESET_DISABLE. */
  1817. int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
  1818. {
  1819. int rc;
  1820. EFX_ASSERT_RESET_SERIALISED(efx);
  1821. rc = efx->type->init(efx);
  1822. if (rc) {
  1823. netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
  1824. goto fail;
  1825. }
  1826. if (!ok)
  1827. goto fail;
  1828. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
  1829. rc = efx->phy_op->init(efx);
  1830. if (rc)
  1831. goto fail;
  1832. if (efx->phy_op->reconfigure(efx))
  1833. netif_err(efx, drv, efx->net_dev,
  1834. "could not restore PHY settings\n");
  1835. }
  1836. efx->type->reconfigure_mac(efx);
  1837. efx_start_interrupts(efx, false);
  1838. efx_restore_filters(efx);
  1839. efx_sriov_reset(efx);
  1840. mutex_unlock(&efx->mac_lock);
  1841. efx_start_all(efx);
  1842. return 0;
  1843. fail:
  1844. efx->port_initialized = false;
  1845. mutex_unlock(&efx->mac_lock);
  1846. return rc;
  1847. }
  1848. /* Reset the NIC using the specified method. Note that the reset may
  1849. * fail, in which case the card will be left in an unusable state.
  1850. *
  1851. * Caller must hold the rtnl_lock.
  1852. */
  1853. int efx_reset(struct efx_nic *efx, enum reset_type method)
  1854. {
  1855. int rc, rc2;
  1856. bool disabled;
  1857. netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
  1858. RESET_TYPE(method));
  1859. efx_device_detach_sync(efx);
  1860. efx_reset_down(efx, method);
  1861. rc = efx->type->reset(efx, method);
  1862. if (rc) {
  1863. netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
  1864. goto out;
  1865. }
  1866. /* Clear flags for the scopes we covered. We assume the NIC and
  1867. * driver are now quiescent so that there is no race here.
  1868. */
  1869. efx->reset_pending &= -(1 << (method + 1));
  1870. /* Reinitialise bus-mastering, which may have been turned off before
  1871. * the reset was scheduled. This is still appropriate, even in the
  1872. * RESET_TYPE_DISABLE since this driver generally assumes the hardware
  1873. * can respond to requests. */
  1874. pci_set_master(efx->pci_dev);
  1875. out:
  1876. /* Leave device stopped if necessary */
  1877. disabled = rc || method == RESET_TYPE_DISABLE;
  1878. rc2 = efx_reset_up(efx, method, !disabled);
  1879. if (rc2) {
  1880. disabled = true;
  1881. if (!rc)
  1882. rc = rc2;
  1883. }
  1884. if (disabled) {
  1885. dev_close(efx->net_dev);
  1886. netif_err(efx, drv, efx->net_dev, "has been disabled\n");
  1887. efx->state = STATE_DISABLED;
  1888. } else {
  1889. netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
  1890. netif_device_attach(efx->net_dev);
  1891. }
  1892. return rc;
  1893. }
  1894. /* The worker thread exists so that code that cannot sleep can
  1895. * schedule a reset for later.
  1896. */
  1897. static void efx_reset_work(struct work_struct *data)
  1898. {
  1899. struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
  1900. unsigned long pending = ACCESS_ONCE(efx->reset_pending);
  1901. if (!pending)
  1902. return;
  1903. /* If we're not RUNNING then don't reset. Leave the reset_pending
  1904. * flags set so that efx_pci_probe_main will be retried */
  1905. if (efx->state != STATE_RUNNING) {
  1906. netif_info(efx, drv, efx->net_dev,
  1907. "scheduled reset quenched. NIC not RUNNING\n");
  1908. return;
  1909. }
  1910. rtnl_lock();
  1911. (void)efx_reset(efx, fls(pending) - 1);
  1912. rtnl_unlock();
  1913. }
  1914. void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
  1915. {
  1916. enum reset_type method;
  1917. switch (type) {
  1918. case RESET_TYPE_INVISIBLE:
  1919. case RESET_TYPE_ALL:
  1920. case RESET_TYPE_WORLD:
  1921. case RESET_TYPE_DISABLE:
  1922. method = type;
  1923. netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
  1924. RESET_TYPE(method));
  1925. break;
  1926. default:
  1927. method = efx->type->map_reset_reason(type);
  1928. netif_dbg(efx, drv, efx->net_dev,
  1929. "scheduling %s reset for %s\n",
  1930. RESET_TYPE(method), RESET_TYPE(type));
  1931. break;
  1932. }
  1933. set_bit(method, &efx->reset_pending);
  1934. /* efx_process_channel() will no longer read events once a
  1935. * reset is scheduled. So switch back to poll'd MCDI completions. */
  1936. efx_mcdi_mode_poll(efx);
  1937. queue_work(reset_workqueue, &efx->reset_work);
  1938. }
  1939. /**************************************************************************
  1940. *
  1941. * List of NICs we support
  1942. *
  1943. **************************************************************************/
  1944. /* PCI device ID table */
  1945. static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
  1946. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  1947. PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
  1948. .driver_data = (unsigned long) &falcon_a1_nic_type},
  1949. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  1950. PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
  1951. .driver_data = (unsigned long) &falcon_b0_nic_type},
  1952. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
  1953. .driver_data = (unsigned long) &siena_a0_nic_type},
  1954. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
  1955. .driver_data = (unsigned long) &siena_a0_nic_type},
  1956. {0} /* end of list */
  1957. };
  1958. /**************************************************************************
  1959. *
  1960. * Dummy PHY/MAC operations
  1961. *
  1962. * Can be used for some unimplemented operations
  1963. * Needed so all function pointers are valid and do not have to be tested
  1964. * before use
  1965. *
  1966. **************************************************************************/
  1967. int efx_port_dummy_op_int(struct efx_nic *efx)
  1968. {
  1969. return 0;
  1970. }
  1971. void efx_port_dummy_op_void(struct efx_nic *efx) {}
  1972. static bool efx_port_dummy_op_poll(struct efx_nic *efx)
  1973. {
  1974. return false;
  1975. }
  1976. static const struct efx_phy_operations efx_dummy_phy_operations = {
  1977. .init = efx_port_dummy_op_int,
  1978. .reconfigure = efx_port_dummy_op_int,
  1979. .poll = efx_port_dummy_op_poll,
  1980. .fini = efx_port_dummy_op_void,
  1981. };
  1982. /**************************************************************************
  1983. *
  1984. * Data housekeeping
  1985. *
  1986. **************************************************************************/
  1987. /* This zeroes out and then fills in the invariants in a struct
  1988. * efx_nic (including all sub-structures).
  1989. */
  1990. static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
  1991. struct pci_dev *pci_dev, struct net_device *net_dev)
  1992. {
  1993. int i;
  1994. /* Initialise common structures */
  1995. memset(efx, 0, sizeof(*efx));
  1996. spin_lock_init(&efx->biu_lock);
  1997. #ifdef CONFIG_SFC_MTD
  1998. INIT_LIST_HEAD(&efx->mtd_list);
  1999. #endif
  2000. INIT_WORK(&efx->reset_work, efx_reset_work);
  2001. INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
  2002. INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
  2003. efx->pci_dev = pci_dev;
  2004. efx->msg_enable = debug;
  2005. efx->state = STATE_INIT;
  2006. strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
  2007. efx->net_dev = net_dev;
  2008. spin_lock_init(&efx->stats_lock);
  2009. mutex_init(&efx->mac_lock);
  2010. efx->phy_op = &efx_dummy_phy_operations;
  2011. efx->mdio.dev = net_dev;
  2012. INIT_WORK(&efx->mac_work, efx_mac_work);
  2013. init_waitqueue_head(&efx->flush_wq);
  2014. for (i = 0; i < EFX_MAX_CHANNELS; i++) {
  2015. efx->channel[i] = efx_alloc_channel(efx, i, NULL);
  2016. if (!efx->channel[i])
  2017. goto fail;
  2018. }
  2019. efx->type = type;
  2020. EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
  2021. /* Higher numbered interrupt modes are less capable! */
  2022. efx->interrupt_mode = max(efx->type->max_interrupt_mode,
  2023. interrupt_mode);
  2024. /* Would be good to use the net_dev name, but we're too early */
  2025. snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
  2026. pci_name(pci_dev));
  2027. efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
  2028. if (!efx->workqueue)
  2029. goto fail;
  2030. return 0;
  2031. fail:
  2032. efx_fini_struct(efx);
  2033. return -ENOMEM;
  2034. }
  2035. static void efx_fini_struct(struct efx_nic *efx)
  2036. {
  2037. int i;
  2038. for (i = 0; i < EFX_MAX_CHANNELS; i++)
  2039. kfree(efx->channel[i]);
  2040. if (efx->workqueue) {
  2041. destroy_workqueue(efx->workqueue);
  2042. efx->workqueue = NULL;
  2043. }
  2044. }
  2045. /**************************************************************************
  2046. *
  2047. * PCI interface
  2048. *
  2049. **************************************************************************/
  2050. /* Main body of final NIC shutdown code
  2051. * This is called only at module unload (or hotplug removal).
  2052. */
  2053. static void efx_pci_remove_main(struct efx_nic *efx)
  2054. {
  2055. #ifdef CONFIG_RFS_ACCEL
  2056. free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
  2057. efx->net_dev->rx_cpu_rmap = NULL;
  2058. #endif
  2059. efx_stop_interrupts(efx, false);
  2060. efx_nic_fini_interrupt(efx);
  2061. efx_fini_port(efx);
  2062. efx->type->fini(efx);
  2063. efx_fini_napi(efx);
  2064. efx_remove_all(efx);
  2065. }
  2066. /* Final NIC shutdown
  2067. * This is called only at module unload (or hotplug removal).
  2068. */
  2069. static void efx_pci_remove(struct pci_dev *pci_dev)
  2070. {
  2071. struct efx_nic *efx;
  2072. efx = pci_get_drvdata(pci_dev);
  2073. if (!efx)
  2074. return;
  2075. /* Mark the NIC as fini, then stop the interface */
  2076. rtnl_lock();
  2077. efx->state = STATE_FINI;
  2078. dev_close(efx->net_dev);
  2079. /* Allow any queued efx_resets() to complete */
  2080. rtnl_unlock();
  2081. efx_stop_interrupts(efx, false);
  2082. efx_sriov_fini(efx);
  2083. efx_unregister_netdev(efx);
  2084. efx_mtd_remove(efx);
  2085. /* Wait for any scheduled resets to complete. No more will be
  2086. * scheduled from this point because efx_stop_all() has been
  2087. * called, we are no longer registered with driverlink, and
  2088. * the net_device's have been removed. */
  2089. cancel_work_sync(&efx->reset_work);
  2090. efx_pci_remove_main(efx);
  2091. efx_fini_io(efx);
  2092. netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
  2093. pci_set_drvdata(pci_dev, NULL);
  2094. efx_fini_struct(efx);
  2095. free_netdev(efx->net_dev);
  2096. };
  2097. /* NIC VPD information
  2098. * Called during probe to display the part number of the
  2099. * installed NIC. VPD is potentially very large but this should
  2100. * always appear within the first 512 bytes.
  2101. */
  2102. #define SFC_VPD_LEN 512
  2103. static void efx_print_product_vpd(struct efx_nic *efx)
  2104. {
  2105. struct pci_dev *dev = efx->pci_dev;
  2106. char vpd_data[SFC_VPD_LEN];
  2107. ssize_t vpd_size;
  2108. int i, j;
  2109. /* Get the vpd data from the device */
  2110. vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
  2111. if (vpd_size <= 0) {
  2112. netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
  2113. return;
  2114. }
  2115. /* Get the Read only section */
  2116. i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
  2117. if (i < 0) {
  2118. netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
  2119. return;
  2120. }
  2121. j = pci_vpd_lrdt_size(&vpd_data[i]);
  2122. i += PCI_VPD_LRDT_TAG_SIZE;
  2123. if (i + j > vpd_size)
  2124. j = vpd_size - i;
  2125. /* Get the Part number */
  2126. i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
  2127. if (i < 0) {
  2128. netif_err(efx, drv, efx->net_dev, "Part number not found\n");
  2129. return;
  2130. }
  2131. j = pci_vpd_info_field_size(&vpd_data[i]);
  2132. i += PCI_VPD_INFO_FLD_HDR_SIZE;
  2133. if (i + j > vpd_size) {
  2134. netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
  2135. return;
  2136. }
  2137. netif_info(efx, drv, efx->net_dev,
  2138. "Part Number : %.*s\n", j, &vpd_data[i]);
  2139. }
  2140. /* Main body of NIC initialisation
  2141. * This is called at module load (or hotplug insertion, theoretically).
  2142. */
  2143. static int efx_pci_probe_main(struct efx_nic *efx)
  2144. {
  2145. int rc;
  2146. /* Do start-of-day initialisation */
  2147. rc = efx_probe_all(efx);
  2148. if (rc)
  2149. goto fail1;
  2150. efx_init_napi(efx);
  2151. rc = efx->type->init(efx);
  2152. if (rc) {
  2153. netif_err(efx, probe, efx->net_dev,
  2154. "failed to initialise NIC\n");
  2155. goto fail3;
  2156. }
  2157. rc = efx_init_port(efx);
  2158. if (rc) {
  2159. netif_err(efx, probe, efx->net_dev,
  2160. "failed to initialise port\n");
  2161. goto fail4;
  2162. }
  2163. rc = efx_nic_init_interrupt(efx);
  2164. if (rc)
  2165. goto fail5;
  2166. efx_start_interrupts(efx, false);
  2167. return 0;
  2168. fail5:
  2169. efx_fini_port(efx);
  2170. fail4:
  2171. efx->type->fini(efx);
  2172. fail3:
  2173. efx_fini_napi(efx);
  2174. efx_remove_all(efx);
  2175. fail1:
  2176. return rc;
  2177. }
  2178. /* NIC initialisation
  2179. *
  2180. * This is called at module load (or hotplug insertion,
  2181. * theoretically). It sets up PCI mappings, resets the NIC,
  2182. * sets up and registers the network devices with the kernel and hooks
  2183. * the interrupt service routine. It does not prepare the device for
  2184. * transmission; this is left to the first time one of the network
  2185. * interfaces is brought up (i.e. efx_net_open).
  2186. */
  2187. static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
  2188. const struct pci_device_id *entry)
  2189. {
  2190. const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
  2191. struct net_device *net_dev;
  2192. struct efx_nic *efx;
  2193. int rc;
  2194. /* Allocate and initialise a struct net_device and struct efx_nic */
  2195. net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
  2196. EFX_MAX_RX_QUEUES);
  2197. if (!net_dev)
  2198. return -ENOMEM;
  2199. net_dev->features |= (type->offload_features | NETIF_F_SG |
  2200. NETIF_F_HIGHDMA | NETIF_F_TSO |
  2201. NETIF_F_RXCSUM);
  2202. if (type->offload_features & NETIF_F_V6_CSUM)
  2203. net_dev->features |= NETIF_F_TSO6;
  2204. /* Mask for features that also apply to VLAN devices */
  2205. net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
  2206. NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
  2207. NETIF_F_RXCSUM);
  2208. /* All offloads can be toggled */
  2209. net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
  2210. efx = netdev_priv(net_dev);
  2211. pci_set_drvdata(pci_dev, efx);
  2212. SET_NETDEV_DEV(net_dev, &pci_dev->dev);
  2213. rc = efx_init_struct(efx, type, pci_dev, net_dev);
  2214. if (rc)
  2215. goto fail1;
  2216. netif_info(efx, probe, efx->net_dev,
  2217. "Solarflare NIC detected\n");
  2218. efx_print_product_vpd(efx);
  2219. /* Set up basic I/O (BAR mappings etc) */
  2220. rc = efx_init_io(efx);
  2221. if (rc)
  2222. goto fail2;
  2223. rc = efx_pci_probe_main(efx);
  2224. /* Serialise against efx_reset(). No more resets will be
  2225. * scheduled since efx_stop_all() has been called, and we have
  2226. * not and never have been registered.
  2227. */
  2228. cancel_work_sync(&efx->reset_work);
  2229. if (rc)
  2230. goto fail3;
  2231. /* If there was a scheduled reset during probe, the NIC is
  2232. * probably hosed anyway.
  2233. */
  2234. if (efx->reset_pending) {
  2235. rc = -EIO;
  2236. goto fail4;
  2237. }
  2238. /* Switch to the running state before we expose the device to the OS,
  2239. * so that dev_open()|efx_start_all() will actually start the device */
  2240. efx->state = STATE_RUNNING;
  2241. rc = efx_register_netdev(efx);
  2242. if (rc)
  2243. goto fail4;
  2244. rc = efx_sriov_init(efx);
  2245. if (rc)
  2246. netif_err(efx, probe, efx->net_dev,
  2247. "SR-IOV can't be enabled rc %d\n", rc);
  2248. netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
  2249. /* Try to create MTDs, but allow this to fail */
  2250. rtnl_lock();
  2251. rc = efx_mtd_probe(efx);
  2252. rtnl_unlock();
  2253. if (rc)
  2254. netif_warn(efx, probe, efx->net_dev,
  2255. "failed to create MTDs (%d)\n", rc);
  2256. return 0;
  2257. fail4:
  2258. efx_pci_remove_main(efx);
  2259. fail3:
  2260. efx_fini_io(efx);
  2261. fail2:
  2262. efx_fini_struct(efx);
  2263. fail1:
  2264. WARN_ON(rc > 0);
  2265. netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
  2266. free_netdev(net_dev);
  2267. return rc;
  2268. }
  2269. static int efx_pm_freeze(struct device *dev)
  2270. {
  2271. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2272. efx->state = STATE_FINI;
  2273. efx_device_detach_sync(efx);
  2274. efx_stop_all(efx);
  2275. efx_stop_interrupts(efx, false);
  2276. return 0;
  2277. }
  2278. static int efx_pm_thaw(struct device *dev)
  2279. {
  2280. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2281. efx->state = STATE_INIT;
  2282. efx_start_interrupts(efx, false);
  2283. mutex_lock(&efx->mac_lock);
  2284. efx->phy_op->reconfigure(efx);
  2285. mutex_unlock(&efx->mac_lock);
  2286. efx_start_all(efx);
  2287. netif_device_attach(efx->net_dev);
  2288. efx->state = STATE_RUNNING;
  2289. efx->type->resume_wol(efx);
  2290. /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
  2291. queue_work(reset_workqueue, &efx->reset_work);
  2292. return 0;
  2293. }
  2294. static int efx_pm_poweroff(struct device *dev)
  2295. {
  2296. struct pci_dev *pci_dev = to_pci_dev(dev);
  2297. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2298. efx->type->fini(efx);
  2299. efx->reset_pending = 0;
  2300. pci_save_state(pci_dev);
  2301. return pci_set_power_state(pci_dev, PCI_D3hot);
  2302. }
  2303. /* Used for both resume and restore */
  2304. static int efx_pm_resume(struct device *dev)
  2305. {
  2306. struct pci_dev *pci_dev = to_pci_dev(dev);
  2307. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2308. int rc;
  2309. rc = pci_set_power_state(pci_dev, PCI_D0);
  2310. if (rc)
  2311. return rc;
  2312. pci_restore_state(pci_dev);
  2313. rc = pci_enable_device(pci_dev);
  2314. if (rc)
  2315. return rc;
  2316. pci_set_master(efx->pci_dev);
  2317. rc = efx->type->reset(efx, RESET_TYPE_ALL);
  2318. if (rc)
  2319. return rc;
  2320. rc = efx->type->init(efx);
  2321. if (rc)
  2322. return rc;
  2323. efx_pm_thaw(dev);
  2324. return 0;
  2325. }
  2326. static int efx_pm_suspend(struct device *dev)
  2327. {
  2328. int rc;
  2329. efx_pm_freeze(dev);
  2330. rc = efx_pm_poweroff(dev);
  2331. if (rc)
  2332. efx_pm_resume(dev);
  2333. return rc;
  2334. }
  2335. static const struct dev_pm_ops efx_pm_ops = {
  2336. .suspend = efx_pm_suspend,
  2337. .resume = efx_pm_resume,
  2338. .freeze = efx_pm_freeze,
  2339. .thaw = efx_pm_thaw,
  2340. .poweroff = efx_pm_poweroff,
  2341. .restore = efx_pm_resume,
  2342. };
  2343. static struct pci_driver efx_pci_driver = {
  2344. .name = KBUILD_MODNAME,
  2345. .id_table = efx_pci_table,
  2346. .probe = efx_pci_probe,
  2347. .remove = efx_pci_remove,
  2348. .driver.pm = &efx_pm_ops,
  2349. };
  2350. /**************************************************************************
  2351. *
  2352. * Kernel module interface
  2353. *
  2354. *************************************************************************/
  2355. module_param(interrupt_mode, uint, 0444);
  2356. MODULE_PARM_DESC(interrupt_mode,
  2357. "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
  2358. static int __init efx_init_module(void)
  2359. {
  2360. int rc;
  2361. printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
  2362. rc = register_netdevice_notifier(&efx_netdev_notifier);
  2363. if (rc)
  2364. goto err_notifier;
  2365. rc = efx_init_sriov();
  2366. if (rc)
  2367. goto err_sriov;
  2368. reset_workqueue = create_singlethread_workqueue("sfc_reset");
  2369. if (!reset_workqueue) {
  2370. rc = -ENOMEM;
  2371. goto err_reset;
  2372. }
  2373. rc = pci_register_driver(&efx_pci_driver);
  2374. if (rc < 0)
  2375. goto err_pci;
  2376. return 0;
  2377. err_pci:
  2378. destroy_workqueue(reset_workqueue);
  2379. err_reset:
  2380. efx_fini_sriov();
  2381. err_sriov:
  2382. unregister_netdevice_notifier(&efx_netdev_notifier);
  2383. err_notifier:
  2384. return rc;
  2385. }
  2386. static void __exit efx_exit_module(void)
  2387. {
  2388. printk(KERN_INFO "Solarflare NET driver unloading\n");
  2389. pci_unregister_driver(&efx_pci_driver);
  2390. destroy_workqueue(reset_workqueue);
  2391. efx_fini_sriov();
  2392. unregister_netdevice_notifier(&efx_netdev_notifier);
  2393. }
  2394. module_init(efx_init_module);
  2395. module_exit(efx_exit_module);
  2396. MODULE_AUTHOR("Solarflare Communications and "
  2397. "Michael Brown <mbrown@fensystems.co.uk>");
  2398. MODULE_DESCRIPTION("Solarflare Communications network driver");
  2399. MODULE_LICENSE("GPL");
  2400. MODULE_DEVICE_TABLE(pci, efx_pci_table);