myri10ge.c 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237
  1. /*************************************************************************
  2. * myri10ge.c: Myricom Myri-10G Ethernet driver.
  3. *
  4. * Copyright (C) 2005 - 2011 Myricom, Inc.
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Myricom, Inc. nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  20. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  23. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  24. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  25. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  26. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  27. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  28. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  29. * POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. *
  32. * If the eeprom on your board is not recent enough, you will need to get a
  33. * newer firmware image at:
  34. * http://www.myri.com/scs/download-Myri10GE.html
  35. *
  36. * Contact Information:
  37. * <help@myri.com>
  38. * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
  39. *************************************************************************/
  40. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  41. #include <linux/tcp.h>
  42. #include <linux/netdevice.h>
  43. #include <linux/skbuff.h>
  44. #include <linux/string.h>
  45. #include <linux/module.h>
  46. #include <linux/pci.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/etherdevice.h>
  49. #include <linux/if_ether.h>
  50. #include <linux/if_vlan.h>
  51. #include <linux/inet_lro.h>
  52. #include <linux/dca.h>
  53. #include <linux/ip.h>
  54. #include <linux/inet.h>
  55. #include <linux/in.h>
  56. #include <linux/ethtool.h>
  57. #include <linux/firmware.h>
  58. #include <linux/delay.h>
  59. #include <linux/timer.h>
  60. #include <linux/vmalloc.h>
  61. #include <linux/crc32.h>
  62. #include <linux/moduleparam.h>
  63. #include <linux/io.h>
  64. #include <linux/log2.h>
  65. #include <linux/slab.h>
  66. #include <linux/prefetch.h>
  67. #include <net/checksum.h>
  68. #include <net/ip.h>
  69. #include <net/tcp.h>
  70. #include <asm/byteorder.h>
  71. #include <asm/io.h>
  72. #include <asm/processor.h>
  73. #ifdef CONFIG_MTRR
  74. #include <asm/mtrr.h>
  75. #endif
  76. #include "myri10ge_mcp.h"
  77. #include "myri10ge_mcp_gen_header.h"
  78. #define MYRI10GE_VERSION_STR "1.5.3-1.534"
  79. MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
  80. MODULE_AUTHOR("Maintainer: help@myri.com");
  81. MODULE_VERSION(MYRI10GE_VERSION_STR);
  82. MODULE_LICENSE("Dual BSD/GPL");
  83. #define MYRI10GE_MAX_ETHER_MTU 9014
  84. #define MYRI10GE_ETH_STOPPED 0
  85. #define MYRI10GE_ETH_STOPPING 1
  86. #define MYRI10GE_ETH_STARTING 2
  87. #define MYRI10GE_ETH_RUNNING 3
  88. #define MYRI10GE_ETH_OPEN_FAILED 4
  89. #define MYRI10GE_EEPROM_STRINGS_SIZE 256
  90. #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
  91. #define MYRI10GE_MAX_LRO_DESCRIPTORS 8
  92. #define MYRI10GE_LRO_MAX_PKTS 64
  93. #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
  94. #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
  95. #define MYRI10GE_ALLOC_ORDER 0
  96. #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
  97. #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
  98. #define MYRI10GE_MAX_SLICES 32
  99. struct myri10ge_rx_buffer_state {
  100. struct page *page;
  101. int page_offset;
  102. DEFINE_DMA_UNMAP_ADDR(bus);
  103. DEFINE_DMA_UNMAP_LEN(len);
  104. };
  105. struct myri10ge_tx_buffer_state {
  106. struct sk_buff *skb;
  107. int last;
  108. DEFINE_DMA_UNMAP_ADDR(bus);
  109. DEFINE_DMA_UNMAP_LEN(len);
  110. };
  111. struct myri10ge_cmd {
  112. u32 data0;
  113. u32 data1;
  114. u32 data2;
  115. };
  116. struct myri10ge_rx_buf {
  117. struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
  118. struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
  119. struct myri10ge_rx_buffer_state *info;
  120. struct page *page;
  121. dma_addr_t bus;
  122. int page_offset;
  123. int cnt;
  124. int fill_cnt;
  125. int alloc_fail;
  126. int mask; /* number of rx slots -1 */
  127. int watchdog_needed;
  128. };
  129. struct myri10ge_tx_buf {
  130. struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
  131. __be32 __iomem *send_go; /* "go" doorbell ptr */
  132. __be32 __iomem *send_stop; /* "stop" doorbell ptr */
  133. struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
  134. char *req_bytes;
  135. struct myri10ge_tx_buffer_state *info;
  136. int mask; /* number of transmit slots -1 */
  137. int req ____cacheline_aligned; /* transmit slots submitted */
  138. int pkt_start; /* packets started */
  139. int stop_queue;
  140. int linearized;
  141. int done ____cacheline_aligned; /* transmit slots completed */
  142. int pkt_done; /* packets completed */
  143. int wake_queue;
  144. int queue_active;
  145. };
  146. struct myri10ge_rx_done {
  147. struct mcp_slot *entry;
  148. dma_addr_t bus;
  149. int cnt;
  150. int idx;
  151. struct net_lro_mgr lro_mgr;
  152. struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
  153. };
  154. struct myri10ge_slice_netstats {
  155. unsigned long rx_packets;
  156. unsigned long tx_packets;
  157. unsigned long rx_bytes;
  158. unsigned long tx_bytes;
  159. unsigned long rx_dropped;
  160. unsigned long tx_dropped;
  161. };
  162. struct myri10ge_slice_state {
  163. struct myri10ge_tx_buf tx; /* transmit ring */
  164. struct myri10ge_rx_buf rx_small;
  165. struct myri10ge_rx_buf rx_big;
  166. struct myri10ge_rx_done rx_done;
  167. struct net_device *dev;
  168. struct napi_struct napi;
  169. struct myri10ge_priv *mgp;
  170. struct myri10ge_slice_netstats stats;
  171. __be32 __iomem *irq_claim;
  172. struct mcp_irq_data *fw_stats;
  173. dma_addr_t fw_stats_bus;
  174. int watchdog_tx_done;
  175. int watchdog_tx_req;
  176. int watchdog_rx_done;
  177. int stuck;
  178. #ifdef CONFIG_MYRI10GE_DCA
  179. int cached_dca_tag;
  180. int cpu;
  181. __be32 __iomem *dca_tag;
  182. #endif
  183. char irq_desc[32];
  184. };
  185. struct myri10ge_priv {
  186. struct myri10ge_slice_state *ss;
  187. int tx_boundary; /* boundary transmits cannot cross */
  188. int num_slices;
  189. int running; /* running? */
  190. int small_bytes;
  191. int big_bytes;
  192. int max_intr_slots;
  193. struct net_device *dev;
  194. u8 __iomem *sram;
  195. int sram_size;
  196. unsigned long board_span;
  197. unsigned long iomem_base;
  198. __be32 __iomem *irq_deassert;
  199. char *mac_addr_string;
  200. struct mcp_cmd_response *cmd;
  201. dma_addr_t cmd_bus;
  202. struct pci_dev *pdev;
  203. int msi_enabled;
  204. int msix_enabled;
  205. struct msix_entry *msix_vectors;
  206. #ifdef CONFIG_MYRI10GE_DCA
  207. int dca_enabled;
  208. int relaxed_order;
  209. #endif
  210. u32 link_state;
  211. unsigned int rdma_tags_available;
  212. int intr_coal_delay;
  213. __be32 __iomem *intr_coal_delay_ptr;
  214. int mtrr;
  215. int wc_enabled;
  216. int down_cnt;
  217. wait_queue_head_t down_wq;
  218. struct work_struct watchdog_work;
  219. struct timer_list watchdog_timer;
  220. int watchdog_resets;
  221. int watchdog_pause;
  222. int pause;
  223. bool fw_name_allocated;
  224. char *fw_name;
  225. char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
  226. char *product_code_string;
  227. char fw_version[128];
  228. int fw_ver_major;
  229. int fw_ver_minor;
  230. int fw_ver_tiny;
  231. int adopted_rx_filter_bug;
  232. u8 mac_addr[6]; /* eeprom mac address */
  233. unsigned long serial_number;
  234. int vendor_specific_offset;
  235. int fw_multicast_support;
  236. u32 features;
  237. u32 max_tso6;
  238. u32 read_dma;
  239. u32 write_dma;
  240. u32 read_write_dma;
  241. u32 link_changes;
  242. u32 msg_enable;
  243. unsigned int board_number;
  244. int rebooted;
  245. };
  246. static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
  247. static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
  248. static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
  249. static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
  250. MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
  251. MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
  252. MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
  253. MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
  254. /* Careful: must be accessed under kparam_block_sysfs_write */
  255. static char *myri10ge_fw_name = NULL;
  256. module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
  257. MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
  258. #define MYRI10GE_MAX_BOARDS 8
  259. static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
  260. {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
  261. module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
  262. 0444);
  263. MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
  264. static int myri10ge_ecrc_enable = 1;
  265. module_param(myri10ge_ecrc_enable, int, S_IRUGO);
  266. MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
  267. static int myri10ge_small_bytes = -1; /* -1 == auto */
  268. module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
  269. MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
  270. static int myri10ge_msi = 1; /* enable msi by default */
  271. module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
  272. MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
  273. static int myri10ge_intr_coal_delay = 75;
  274. module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
  275. MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
  276. static int myri10ge_flow_control = 1;
  277. module_param(myri10ge_flow_control, int, S_IRUGO);
  278. MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
  279. static int myri10ge_deassert_wait = 1;
  280. module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
  281. MODULE_PARM_DESC(myri10ge_deassert_wait,
  282. "Wait when deasserting legacy interrupts");
  283. static int myri10ge_force_firmware = 0;
  284. module_param(myri10ge_force_firmware, int, S_IRUGO);
  285. MODULE_PARM_DESC(myri10ge_force_firmware,
  286. "Force firmware to assume aligned completions");
  287. static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
  288. module_param(myri10ge_initial_mtu, int, S_IRUGO);
  289. MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
  290. static int myri10ge_napi_weight = 64;
  291. module_param(myri10ge_napi_weight, int, S_IRUGO);
  292. MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
  293. static int myri10ge_watchdog_timeout = 1;
  294. module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
  295. MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
  296. static int myri10ge_max_irq_loops = 1048576;
  297. module_param(myri10ge_max_irq_loops, int, S_IRUGO);
  298. MODULE_PARM_DESC(myri10ge_max_irq_loops,
  299. "Set stuck legacy IRQ detection threshold");
  300. #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
  301. static int myri10ge_debug = -1; /* defaults above */
  302. module_param(myri10ge_debug, int, 0);
  303. MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
  304. static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
  305. module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
  306. MODULE_PARM_DESC(myri10ge_lro_max_pkts,
  307. "Number of LRO packets to be aggregated");
  308. static int myri10ge_fill_thresh = 256;
  309. module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
  310. MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
  311. static int myri10ge_reset_recover = 1;
  312. static int myri10ge_max_slices = 1;
  313. module_param(myri10ge_max_slices, int, S_IRUGO);
  314. MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
  315. static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
  316. module_param(myri10ge_rss_hash, int, S_IRUGO);
  317. MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
  318. static int myri10ge_dca = 1;
  319. module_param(myri10ge_dca, int, S_IRUGO);
  320. MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
  321. #define MYRI10GE_FW_OFFSET 1024*1024
  322. #define MYRI10GE_HIGHPART_TO_U32(X) \
  323. (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
  324. #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
  325. #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
  326. static void myri10ge_set_multicast_list(struct net_device *dev);
  327. static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
  328. struct net_device *dev);
  329. static inline void put_be32(__be32 val, __be32 __iomem * p)
  330. {
  331. __raw_writel((__force __u32) val, (__force void __iomem *)p);
  332. }
  333. static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
  334. struct rtnl_link_stats64 *stats);
  335. static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
  336. {
  337. if (mgp->fw_name_allocated)
  338. kfree(mgp->fw_name);
  339. mgp->fw_name = name;
  340. mgp->fw_name_allocated = allocated;
  341. }
  342. static int
  343. myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
  344. struct myri10ge_cmd *data, int atomic)
  345. {
  346. struct mcp_cmd *buf;
  347. char buf_bytes[sizeof(*buf) + 8];
  348. struct mcp_cmd_response *response = mgp->cmd;
  349. char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
  350. u32 dma_low, dma_high, result, value;
  351. int sleep_total = 0;
  352. /* ensure buf is aligned to 8 bytes */
  353. buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
  354. buf->data0 = htonl(data->data0);
  355. buf->data1 = htonl(data->data1);
  356. buf->data2 = htonl(data->data2);
  357. buf->cmd = htonl(cmd);
  358. dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
  359. dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
  360. buf->response_addr.low = htonl(dma_low);
  361. buf->response_addr.high = htonl(dma_high);
  362. response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
  363. mb();
  364. myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
  365. /* wait up to 15ms. Longest command is the DMA benchmark,
  366. * which is capped at 5ms, but runs from a timeout handler
  367. * that runs every 7.8ms. So a 15ms timeout leaves us with
  368. * a 2.2ms margin
  369. */
  370. if (atomic) {
  371. /* if atomic is set, do not sleep,
  372. * and try to get the completion quickly
  373. * (1ms will be enough for those commands) */
  374. for (sleep_total = 0;
  375. sleep_total < 1000 &&
  376. response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
  377. sleep_total += 10) {
  378. udelay(10);
  379. mb();
  380. }
  381. } else {
  382. /* use msleep for most command */
  383. for (sleep_total = 0;
  384. sleep_total < 15 &&
  385. response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
  386. sleep_total++)
  387. msleep(1);
  388. }
  389. result = ntohl(response->result);
  390. value = ntohl(response->data);
  391. if (result != MYRI10GE_NO_RESPONSE_RESULT) {
  392. if (result == 0) {
  393. data->data0 = value;
  394. return 0;
  395. } else if (result == MXGEFW_CMD_UNKNOWN) {
  396. return -ENOSYS;
  397. } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
  398. return -E2BIG;
  399. } else if (result == MXGEFW_CMD_ERROR_RANGE &&
  400. cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
  401. (data->
  402. data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
  403. 0) {
  404. return -ERANGE;
  405. } else {
  406. dev_err(&mgp->pdev->dev,
  407. "command %d failed, result = %d\n",
  408. cmd, result);
  409. return -ENXIO;
  410. }
  411. }
  412. dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
  413. cmd, result);
  414. return -EAGAIN;
  415. }
  416. /*
  417. * The eeprom strings on the lanaiX have the format
  418. * SN=x\0
  419. * MAC=x:x:x:x:x:x\0
  420. * PT:ddd mmm xx xx:xx:xx xx\0
  421. * PV:ddd mmm xx xx:xx:xx xx\0
  422. */
  423. static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
  424. {
  425. char *ptr, *limit;
  426. int i;
  427. ptr = mgp->eeprom_strings;
  428. limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
  429. while (*ptr != '\0' && ptr < limit) {
  430. if (memcmp(ptr, "MAC=", 4) == 0) {
  431. ptr += 4;
  432. mgp->mac_addr_string = ptr;
  433. for (i = 0; i < 6; i++) {
  434. if ((ptr + 2) > limit)
  435. goto abort;
  436. mgp->mac_addr[i] =
  437. simple_strtoul(ptr, &ptr, 16);
  438. ptr += 1;
  439. }
  440. }
  441. if (memcmp(ptr, "PC=", 3) == 0) {
  442. ptr += 3;
  443. mgp->product_code_string = ptr;
  444. }
  445. if (memcmp((const void *)ptr, "SN=", 3) == 0) {
  446. ptr += 3;
  447. mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
  448. }
  449. while (ptr < limit && *ptr++) ;
  450. }
  451. return 0;
  452. abort:
  453. dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
  454. return -ENXIO;
  455. }
  456. /*
  457. * Enable or disable periodic RDMAs from the host to make certain
  458. * chipsets resend dropped PCIe messages
  459. */
  460. static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
  461. {
  462. char __iomem *submit;
  463. __be32 buf[16] __attribute__ ((__aligned__(8)));
  464. u32 dma_low, dma_high;
  465. int i;
  466. /* clear confirmation addr */
  467. mgp->cmd->data = 0;
  468. mb();
  469. /* send a rdma command to the PCIe engine, and wait for the
  470. * response in the confirmation address. The firmware should
  471. * write a -1 there to indicate it is alive and well
  472. */
  473. dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
  474. dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
  475. buf[0] = htonl(dma_high); /* confirm addr MSW */
  476. buf[1] = htonl(dma_low); /* confirm addr LSW */
  477. buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
  478. buf[3] = htonl(dma_high); /* dummy addr MSW */
  479. buf[4] = htonl(dma_low); /* dummy addr LSW */
  480. buf[5] = htonl(enable); /* enable? */
  481. submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
  482. myri10ge_pio_copy(submit, &buf, sizeof(buf));
  483. for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
  484. msleep(1);
  485. if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
  486. dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
  487. (enable ? "enable" : "disable"));
  488. }
  489. static int
  490. myri10ge_validate_firmware(struct myri10ge_priv *mgp,
  491. struct mcp_gen_header *hdr)
  492. {
  493. struct device *dev = &mgp->pdev->dev;
  494. /* check firmware type */
  495. if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
  496. dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
  497. return -EINVAL;
  498. }
  499. /* save firmware version for ethtool */
  500. strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
  501. sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
  502. &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
  503. if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
  504. mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
  505. dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
  506. dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
  507. MXGEFW_VERSION_MINOR);
  508. return -EINVAL;
  509. }
  510. return 0;
  511. }
  512. static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
  513. {
  514. unsigned crc, reread_crc;
  515. const struct firmware *fw;
  516. struct device *dev = &mgp->pdev->dev;
  517. unsigned char *fw_readback;
  518. struct mcp_gen_header *hdr;
  519. size_t hdr_offset;
  520. int status;
  521. unsigned i;
  522. if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
  523. dev_err(dev, "Unable to load %s firmware image via hotplug\n",
  524. mgp->fw_name);
  525. status = -EINVAL;
  526. goto abort_with_nothing;
  527. }
  528. /* check size */
  529. if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
  530. fw->size < MCP_HEADER_PTR_OFFSET + 4) {
  531. dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
  532. status = -EINVAL;
  533. goto abort_with_fw;
  534. }
  535. /* check id */
  536. hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
  537. if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
  538. dev_err(dev, "Bad firmware file\n");
  539. status = -EINVAL;
  540. goto abort_with_fw;
  541. }
  542. hdr = (void *)(fw->data + hdr_offset);
  543. status = myri10ge_validate_firmware(mgp, hdr);
  544. if (status != 0)
  545. goto abort_with_fw;
  546. crc = crc32(~0, fw->data, fw->size);
  547. for (i = 0; i < fw->size; i += 256) {
  548. myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
  549. fw->data + i,
  550. min(256U, (unsigned)(fw->size - i)));
  551. mb();
  552. readb(mgp->sram);
  553. }
  554. fw_readback = vmalloc(fw->size);
  555. if (!fw_readback) {
  556. status = -ENOMEM;
  557. goto abort_with_fw;
  558. }
  559. /* corruption checking is good for parity recovery and buggy chipset */
  560. memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
  561. reread_crc = crc32(~0, fw_readback, fw->size);
  562. vfree(fw_readback);
  563. if (crc != reread_crc) {
  564. dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
  565. (unsigned)fw->size, reread_crc, crc);
  566. status = -EIO;
  567. goto abort_with_fw;
  568. }
  569. *size = (u32) fw->size;
  570. abort_with_fw:
  571. release_firmware(fw);
  572. abort_with_nothing:
  573. return status;
  574. }
  575. static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
  576. {
  577. struct mcp_gen_header *hdr;
  578. struct device *dev = &mgp->pdev->dev;
  579. const size_t bytes = sizeof(struct mcp_gen_header);
  580. size_t hdr_offset;
  581. int status;
  582. /* find running firmware header */
  583. hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
  584. if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
  585. dev_err(dev, "Running firmware has bad header offset (%d)\n",
  586. (int)hdr_offset);
  587. return -EIO;
  588. }
  589. /* copy header of running firmware from SRAM to host memory to
  590. * validate firmware */
  591. hdr = kmalloc(bytes, GFP_KERNEL);
  592. if (hdr == NULL) {
  593. dev_err(dev, "could not malloc firmware hdr\n");
  594. return -ENOMEM;
  595. }
  596. memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
  597. status = myri10ge_validate_firmware(mgp, hdr);
  598. kfree(hdr);
  599. /* check to see if adopted firmware has bug where adopting
  600. * it will cause broadcasts to be filtered unless the NIC
  601. * is kept in ALLMULTI mode */
  602. if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
  603. mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
  604. mgp->adopted_rx_filter_bug = 1;
  605. dev_warn(dev, "Adopting fw %d.%d.%d: "
  606. "working around rx filter bug\n",
  607. mgp->fw_ver_major, mgp->fw_ver_minor,
  608. mgp->fw_ver_tiny);
  609. }
  610. return status;
  611. }
  612. static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
  613. {
  614. struct myri10ge_cmd cmd;
  615. int status;
  616. /* probe for IPv6 TSO support */
  617. mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
  618. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
  619. &cmd, 0);
  620. if (status == 0) {
  621. mgp->max_tso6 = cmd.data0;
  622. mgp->features |= NETIF_F_TSO6;
  623. }
  624. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
  625. if (status != 0) {
  626. dev_err(&mgp->pdev->dev,
  627. "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
  628. return -ENXIO;
  629. }
  630. mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
  631. return 0;
  632. }
  633. static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
  634. {
  635. char __iomem *submit;
  636. __be32 buf[16] __attribute__ ((__aligned__(8)));
  637. u32 dma_low, dma_high, size;
  638. int status, i;
  639. size = 0;
  640. status = myri10ge_load_hotplug_firmware(mgp, &size);
  641. if (status) {
  642. if (!adopt)
  643. return status;
  644. dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
  645. /* Do not attempt to adopt firmware if there
  646. * was a bad crc */
  647. if (status == -EIO)
  648. return status;
  649. status = myri10ge_adopt_running_firmware(mgp);
  650. if (status != 0) {
  651. dev_err(&mgp->pdev->dev,
  652. "failed to adopt running firmware\n");
  653. return status;
  654. }
  655. dev_info(&mgp->pdev->dev,
  656. "Successfully adopted running firmware\n");
  657. if (mgp->tx_boundary == 4096) {
  658. dev_warn(&mgp->pdev->dev,
  659. "Using firmware currently running on NIC"
  660. ". For optimal\n");
  661. dev_warn(&mgp->pdev->dev,
  662. "performance consider loading optimized "
  663. "firmware\n");
  664. dev_warn(&mgp->pdev->dev, "via hotplug\n");
  665. }
  666. set_fw_name(mgp, "adopted", false);
  667. mgp->tx_boundary = 2048;
  668. myri10ge_dummy_rdma(mgp, 1);
  669. status = myri10ge_get_firmware_capabilities(mgp);
  670. return status;
  671. }
  672. /* clear confirmation addr */
  673. mgp->cmd->data = 0;
  674. mb();
  675. /* send a reload command to the bootstrap MCP, and wait for the
  676. * response in the confirmation address. The firmware should
  677. * write a -1 there to indicate it is alive and well
  678. */
  679. dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
  680. dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
  681. buf[0] = htonl(dma_high); /* confirm addr MSW */
  682. buf[1] = htonl(dma_low); /* confirm addr LSW */
  683. buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
  684. /* FIX: All newest firmware should un-protect the bottom of
  685. * the sram before handoff. However, the very first interfaces
  686. * do not. Therefore the handoff copy must skip the first 8 bytes
  687. */
  688. buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
  689. buf[4] = htonl(size - 8); /* length of code */
  690. buf[5] = htonl(8); /* where to copy to */
  691. buf[6] = htonl(0); /* where to jump to */
  692. submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
  693. myri10ge_pio_copy(submit, &buf, sizeof(buf));
  694. mb();
  695. msleep(1);
  696. mb();
  697. i = 0;
  698. while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
  699. msleep(1 << i);
  700. i++;
  701. }
  702. if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
  703. dev_err(&mgp->pdev->dev, "handoff failed\n");
  704. return -ENXIO;
  705. }
  706. myri10ge_dummy_rdma(mgp, 1);
  707. status = myri10ge_get_firmware_capabilities(mgp);
  708. return status;
  709. }
  710. static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
  711. {
  712. struct myri10ge_cmd cmd;
  713. int status;
  714. cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
  715. | (addr[2] << 8) | addr[3]);
  716. cmd.data1 = ((addr[4] << 8) | (addr[5]));
  717. status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
  718. return status;
  719. }
  720. static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
  721. {
  722. struct myri10ge_cmd cmd;
  723. int status, ctl;
  724. ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
  725. status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
  726. if (status) {
  727. netdev_err(mgp->dev, "Failed to set flow control mode\n");
  728. return status;
  729. }
  730. mgp->pause = pause;
  731. return 0;
  732. }
  733. static void
  734. myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
  735. {
  736. struct myri10ge_cmd cmd;
  737. int status, ctl;
  738. ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
  739. status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
  740. if (status)
  741. netdev_err(mgp->dev, "Failed to set promisc mode\n");
  742. }
  743. static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
  744. {
  745. struct myri10ge_cmd cmd;
  746. int status;
  747. u32 len;
  748. struct page *dmatest_page;
  749. dma_addr_t dmatest_bus;
  750. char *test = " ";
  751. dmatest_page = alloc_page(GFP_KERNEL);
  752. if (!dmatest_page)
  753. return -ENOMEM;
  754. dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
  755. DMA_BIDIRECTIONAL);
  756. /* Run a small DMA test.
  757. * The magic multipliers to the length tell the firmware
  758. * to do DMA read, write, or read+write tests. The
  759. * results are returned in cmd.data0. The upper 16
  760. * bits or the return is the number of transfers completed.
  761. * The lower 16 bits is the time in 0.5us ticks that the
  762. * transfers took to complete.
  763. */
  764. len = mgp->tx_boundary;
  765. cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
  766. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
  767. cmd.data2 = len * 0x10000;
  768. status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
  769. if (status != 0) {
  770. test = "read";
  771. goto abort;
  772. }
  773. mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
  774. cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
  775. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
  776. cmd.data2 = len * 0x1;
  777. status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
  778. if (status != 0) {
  779. test = "write";
  780. goto abort;
  781. }
  782. mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
  783. cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
  784. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
  785. cmd.data2 = len * 0x10001;
  786. status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
  787. if (status != 0) {
  788. test = "read/write";
  789. goto abort;
  790. }
  791. mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
  792. (cmd.data0 & 0xffff);
  793. abort:
  794. pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
  795. put_page(dmatest_page);
  796. if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
  797. dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
  798. test, status);
  799. return status;
  800. }
  801. static int myri10ge_reset(struct myri10ge_priv *mgp)
  802. {
  803. struct myri10ge_cmd cmd;
  804. struct myri10ge_slice_state *ss;
  805. int i, status;
  806. size_t bytes;
  807. #ifdef CONFIG_MYRI10GE_DCA
  808. unsigned long dca_tag_off;
  809. #endif
  810. /* try to send a reset command to the card to see if it
  811. * is alive */
  812. memset(&cmd, 0, sizeof(cmd));
  813. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
  814. if (status != 0) {
  815. dev_err(&mgp->pdev->dev, "failed reset\n");
  816. return -ENXIO;
  817. }
  818. (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
  819. /*
  820. * Use non-ndis mcp_slot (eg, 4 bytes total,
  821. * no toeplitz hash value returned. Older firmware will
  822. * not understand this command, but will use the correct
  823. * sized mcp_slot, so we ignore error returns
  824. */
  825. cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
  826. (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
  827. /* Now exchange information about interrupts */
  828. bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
  829. cmd.data0 = (u32) bytes;
  830. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
  831. /*
  832. * Even though we already know how many slices are supported
  833. * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
  834. * has magic side effects, and must be called after a reset.
  835. * It must be called prior to calling any RSS related cmds,
  836. * including assigning an interrupt queue for anything but
  837. * slice 0. It must also be called *after*
  838. * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
  839. * the firmware to compute offsets.
  840. */
  841. if (mgp->num_slices > 1) {
  842. /* ask the maximum number of slices it supports */
  843. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
  844. &cmd, 0);
  845. if (status != 0) {
  846. dev_err(&mgp->pdev->dev,
  847. "failed to get number of slices\n");
  848. }
  849. /*
  850. * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
  851. * to setting up the interrupt queue DMA
  852. */
  853. cmd.data0 = mgp->num_slices;
  854. cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
  855. if (mgp->dev->real_num_tx_queues > 1)
  856. cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
  857. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
  858. &cmd, 0);
  859. /* Firmware older than 1.4.32 only supports multiple
  860. * RX queues, so if we get an error, first retry using a
  861. * single TX queue before giving up */
  862. if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
  863. netif_set_real_num_tx_queues(mgp->dev, 1);
  864. cmd.data0 = mgp->num_slices;
  865. cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
  866. status = myri10ge_send_cmd(mgp,
  867. MXGEFW_CMD_ENABLE_RSS_QUEUES,
  868. &cmd, 0);
  869. }
  870. if (status != 0) {
  871. dev_err(&mgp->pdev->dev,
  872. "failed to set number of slices\n");
  873. return status;
  874. }
  875. }
  876. for (i = 0; i < mgp->num_slices; i++) {
  877. ss = &mgp->ss[i];
  878. cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
  879. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
  880. cmd.data2 = i;
  881. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
  882. &cmd, 0);
  883. }
  884. status |=
  885. myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
  886. for (i = 0; i < mgp->num_slices; i++) {
  887. ss = &mgp->ss[i];
  888. ss->irq_claim =
  889. (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
  890. }
  891. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
  892. &cmd, 0);
  893. mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
  894. status |= myri10ge_send_cmd
  895. (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
  896. mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
  897. if (status != 0) {
  898. dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
  899. return status;
  900. }
  901. put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
  902. #ifdef CONFIG_MYRI10GE_DCA
  903. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
  904. dca_tag_off = cmd.data0;
  905. for (i = 0; i < mgp->num_slices; i++) {
  906. ss = &mgp->ss[i];
  907. if (status == 0) {
  908. ss->dca_tag = (__iomem __be32 *)
  909. (mgp->sram + dca_tag_off + 4 * i);
  910. } else {
  911. ss->dca_tag = NULL;
  912. }
  913. }
  914. #endif /* CONFIG_MYRI10GE_DCA */
  915. /* reset mcp/driver shared state back to 0 */
  916. mgp->link_changes = 0;
  917. for (i = 0; i < mgp->num_slices; i++) {
  918. ss = &mgp->ss[i];
  919. memset(ss->rx_done.entry, 0, bytes);
  920. ss->tx.req = 0;
  921. ss->tx.done = 0;
  922. ss->tx.pkt_start = 0;
  923. ss->tx.pkt_done = 0;
  924. ss->rx_big.cnt = 0;
  925. ss->rx_small.cnt = 0;
  926. ss->rx_done.idx = 0;
  927. ss->rx_done.cnt = 0;
  928. ss->tx.wake_queue = 0;
  929. ss->tx.stop_queue = 0;
  930. }
  931. status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
  932. myri10ge_change_pause(mgp, mgp->pause);
  933. myri10ge_set_multicast_list(mgp->dev);
  934. return status;
  935. }
  936. #ifdef CONFIG_MYRI10GE_DCA
  937. static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
  938. {
  939. int ret, cap, err;
  940. u16 ctl;
  941. cap = pci_pcie_cap(pdev);
  942. if (!cap)
  943. return 0;
  944. err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
  945. if (err)
  946. return 0;
  947. ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
  948. if (ret != on) {
  949. ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
  950. ctl |= (on << 4);
  951. pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
  952. }
  953. return ret;
  954. }
  955. static void
  956. myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
  957. {
  958. ss->cached_dca_tag = tag;
  959. put_be32(htonl(tag), ss->dca_tag);
  960. }
  961. static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
  962. {
  963. int cpu = get_cpu();
  964. int tag;
  965. if (cpu != ss->cpu) {
  966. tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
  967. if (ss->cached_dca_tag != tag)
  968. myri10ge_write_dca(ss, cpu, tag);
  969. ss->cpu = cpu;
  970. }
  971. put_cpu();
  972. }
  973. static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
  974. {
  975. int err, i;
  976. struct pci_dev *pdev = mgp->pdev;
  977. if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
  978. return;
  979. if (!myri10ge_dca) {
  980. dev_err(&pdev->dev, "dca disabled by administrator\n");
  981. return;
  982. }
  983. err = dca_add_requester(&pdev->dev);
  984. if (err) {
  985. if (err != -ENODEV)
  986. dev_err(&pdev->dev,
  987. "dca_add_requester() failed, err=%d\n", err);
  988. return;
  989. }
  990. mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
  991. mgp->dca_enabled = 1;
  992. for (i = 0; i < mgp->num_slices; i++) {
  993. mgp->ss[i].cpu = -1;
  994. mgp->ss[i].cached_dca_tag = -1;
  995. myri10ge_update_dca(&mgp->ss[i]);
  996. }
  997. }
  998. static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
  999. {
  1000. struct pci_dev *pdev = mgp->pdev;
  1001. if (!mgp->dca_enabled)
  1002. return;
  1003. mgp->dca_enabled = 0;
  1004. if (mgp->relaxed_order)
  1005. myri10ge_toggle_relaxed(pdev, 1);
  1006. dca_remove_requester(&pdev->dev);
  1007. }
  1008. static int myri10ge_notify_dca_device(struct device *dev, void *data)
  1009. {
  1010. struct myri10ge_priv *mgp;
  1011. unsigned long event;
  1012. mgp = dev_get_drvdata(dev);
  1013. event = *(unsigned long *)data;
  1014. if (event == DCA_PROVIDER_ADD)
  1015. myri10ge_setup_dca(mgp);
  1016. else if (event == DCA_PROVIDER_REMOVE)
  1017. myri10ge_teardown_dca(mgp);
  1018. return 0;
  1019. }
  1020. #endif /* CONFIG_MYRI10GE_DCA */
  1021. static inline void
  1022. myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
  1023. struct mcp_kreq_ether_recv *src)
  1024. {
  1025. __be32 low;
  1026. low = src->addr_low;
  1027. src->addr_low = htonl(DMA_BIT_MASK(32));
  1028. myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
  1029. mb();
  1030. myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
  1031. mb();
  1032. src->addr_low = low;
  1033. put_be32(low, &dst->addr_low);
  1034. mb();
  1035. }
  1036. static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
  1037. {
  1038. struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
  1039. if ((skb->protocol == htons(ETH_P_8021Q)) &&
  1040. (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
  1041. vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
  1042. skb->csum = hw_csum;
  1043. skb->ip_summed = CHECKSUM_COMPLETE;
  1044. }
  1045. }
  1046. static inline void
  1047. myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
  1048. struct skb_frag_struct *rx_frags, int len, int hlen)
  1049. {
  1050. struct skb_frag_struct *skb_frags;
  1051. skb->len = skb->data_len = len;
  1052. /* attach the page(s) */
  1053. skb_frags = skb_shinfo(skb)->frags;
  1054. while (len > 0) {
  1055. memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
  1056. len -= skb_frag_size(rx_frags);
  1057. skb_frags++;
  1058. rx_frags++;
  1059. skb_shinfo(skb)->nr_frags++;
  1060. }
  1061. /* pskb_may_pull is not available in irq context, but
  1062. * skb_pull() (for ether_pad and eth_type_trans()) requires
  1063. * the beginning of the packet in skb_headlen(), move it
  1064. * manually */
  1065. skb_copy_to_linear_data(skb, va, hlen);
  1066. skb_shinfo(skb)->frags[0].page_offset += hlen;
  1067. skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen);
  1068. skb->data_len -= hlen;
  1069. skb->tail += hlen;
  1070. skb_pull(skb, MXGEFW_PAD);
  1071. }
  1072. static void
  1073. myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
  1074. int bytes, int watchdog)
  1075. {
  1076. struct page *page;
  1077. int idx;
  1078. #if MYRI10GE_ALLOC_SIZE > 4096
  1079. int end_offset;
  1080. #endif
  1081. if (unlikely(rx->watchdog_needed && !watchdog))
  1082. return;
  1083. /* try to refill entire ring */
  1084. while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
  1085. idx = rx->fill_cnt & rx->mask;
  1086. if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
  1087. /* we can use part of previous page */
  1088. get_page(rx->page);
  1089. } else {
  1090. /* we need a new page */
  1091. page =
  1092. alloc_pages(GFP_ATOMIC | __GFP_COMP,
  1093. MYRI10GE_ALLOC_ORDER);
  1094. if (unlikely(page == NULL)) {
  1095. if (rx->fill_cnt - rx->cnt < 16)
  1096. rx->watchdog_needed = 1;
  1097. return;
  1098. }
  1099. rx->page = page;
  1100. rx->page_offset = 0;
  1101. rx->bus = pci_map_page(mgp->pdev, page, 0,
  1102. MYRI10GE_ALLOC_SIZE,
  1103. PCI_DMA_FROMDEVICE);
  1104. }
  1105. rx->info[idx].page = rx->page;
  1106. rx->info[idx].page_offset = rx->page_offset;
  1107. /* note that this is the address of the start of the
  1108. * page */
  1109. dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
  1110. rx->shadow[idx].addr_low =
  1111. htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
  1112. rx->shadow[idx].addr_high =
  1113. htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
  1114. /* start next packet on a cacheline boundary */
  1115. rx->page_offset += SKB_DATA_ALIGN(bytes);
  1116. #if MYRI10GE_ALLOC_SIZE > 4096
  1117. /* don't cross a 4KB boundary */
  1118. end_offset = rx->page_offset + bytes - 1;
  1119. if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
  1120. rx->page_offset = end_offset & ~4095;
  1121. #endif
  1122. rx->fill_cnt++;
  1123. /* copy 8 descriptors to the firmware at a time */
  1124. if ((idx & 7) == 7) {
  1125. myri10ge_submit_8rx(&rx->lanai[idx - 7],
  1126. &rx->shadow[idx - 7]);
  1127. }
  1128. }
  1129. }
  1130. static inline void
  1131. myri10ge_unmap_rx_page(struct pci_dev *pdev,
  1132. struct myri10ge_rx_buffer_state *info, int bytes)
  1133. {
  1134. /* unmap the recvd page if we're the only or last user of it */
  1135. if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
  1136. (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
  1137. pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
  1138. & ~(MYRI10GE_ALLOC_SIZE - 1)),
  1139. MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
  1140. }
  1141. }
  1142. #define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
  1143. * page into an skb */
  1144. static inline int
  1145. myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
  1146. bool lro_enabled)
  1147. {
  1148. struct myri10ge_priv *mgp = ss->mgp;
  1149. struct sk_buff *skb;
  1150. struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
  1151. struct myri10ge_rx_buf *rx;
  1152. int i, idx, hlen, remainder, bytes;
  1153. struct pci_dev *pdev = mgp->pdev;
  1154. struct net_device *dev = mgp->dev;
  1155. u8 *va;
  1156. if (len <= mgp->small_bytes) {
  1157. rx = &ss->rx_small;
  1158. bytes = mgp->small_bytes;
  1159. } else {
  1160. rx = &ss->rx_big;
  1161. bytes = mgp->big_bytes;
  1162. }
  1163. len += MXGEFW_PAD;
  1164. idx = rx->cnt & rx->mask;
  1165. va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
  1166. prefetch(va);
  1167. /* Fill skb_frag_struct(s) with data from our receive */
  1168. for (i = 0, remainder = len; remainder > 0; i++) {
  1169. myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
  1170. __skb_frag_set_page(&rx_frags[i], rx->info[idx].page);
  1171. rx_frags[i].page_offset = rx->info[idx].page_offset;
  1172. if (remainder < MYRI10GE_ALLOC_SIZE)
  1173. skb_frag_size_set(&rx_frags[i], remainder);
  1174. else
  1175. skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE);
  1176. rx->cnt++;
  1177. idx = rx->cnt & rx->mask;
  1178. remainder -= MYRI10GE_ALLOC_SIZE;
  1179. }
  1180. if (lro_enabled) {
  1181. rx_frags[0].page_offset += MXGEFW_PAD;
  1182. skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
  1183. len -= MXGEFW_PAD;
  1184. lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
  1185. /* opaque, will come back in get_frag_header */
  1186. len, len,
  1187. (void *)(__force unsigned long)csum, csum);
  1188. return 1;
  1189. }
  1190. hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
  1191. /* allocate an skb to attach the page(s) to. This is done
  1192. * after trying LRO, so as to avoid skb allocation overheads */
  1193. skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
  1194. if (unlikely(skb == NULL)) {
  1195. ss->stats.rx_dropped++;
  1196. do {
  1197. i--;
  1198. __skb_frag_unref(&rx_frags[i]);
  1199. } while (i != 0);
  1200. return 0;
  1201. }
  1202. /* Attach the pages to the skb, and trim off any padding */
  1203. myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
  1204. if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) {
  1205. skb_frag_unref(skb, 0);
  1206. skb_shinfo(skb)->nr_frags = 0;
  1207. } else {
  1208. skb->truesize += bytes * skb_shinfo(skb)->nr_frags;
  1209. }
  1210. skb->protocol = eth_type_trans(skb, dev);
  1211. skb_record_rx_queue(skb, ss - &mgp->ss[0]);
  1212. if (dev->features & NETIF_F_RXCSUM) {
  1213. if ((skb->protocol == htons(ETH_P_IP)) ||
  1214. (skb->protocol == htons(ETH_P_IPV6))) {
  1215. skb->csum = csum;
  1216. skb->ip_summed = CHECKSUM_COMPLETE;
  1217. } else
  1218. myri10ge_vlan_ip_csum(skb, csum);
  1219. }
  1220. netif_receive_skb(skb);
  1221. return 1;
  1222. }
  1223. static inline void
  1224. myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
  1225. {
  1226. struct pci_dev *pdev = ss->mgp->pdev;
  1227. struct myri10ge_tx_buf *tx = &ss->tx;
  1228. struct netdev_queue *dev_queue;
  1229. struct sk_buff *skb;
  1230. int idx, len;
  1231. while (tx->pkt_done != mcp_index) {
  1232. idx = tx->done & tx->mask;
  1233. skb = tx->info[idx].skb;
  1234. /* Mark as free */
  1235. tx->info[idx].skb = NULL;
  1236. if (tx->info[idx].last) {
  1237. tx->pkt_done++;
  1238. tx->info[idx].last = 0;
  1239. }
  1240. tx->done++;
  1241. len = dma_unmap_len(&tx->info[idx], len);
  1242. dma_unmap_len_set(&tx->info[idx], len, 0);
  1243. if (skb) {
  1244. ss->stats.tx_bytes += skb->len;
  1245. ss->stats.tx_packets++;
  1246. dev_kfree_skb_irq(skb);
  1247. if (len)
  1248. pci_unmap_single(pdev,
  1249. dma_unmap_addr(&tx->info[idx],
  1250. bus), len,
  1251. PCI_DMA_TODEVICE);
  1252. } else {
  1253. if (len)
  1254. pci_unmap_page(pdev,
  1255. dma_unmap_addr(&tx->info[idx],
  1256. bus), len,
  1257. PCI_DMA_TODEVICE);
  1258. }
  1259. }
  1260. dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
  1261. /*
  1262. * Make a minimal effort to prevent the NIC from polling an
  1263. * idle tx queue. If we can't get the lock we leave the queue
  1264. * active. In this case, either a thread was about to start
  1265. * using the queue anyway, or we lost a race and the NIC will
  1266. * waste some of its resources polling an inactive queue for a
  1267. * while.
  1268. */
  1269. if ((ss->mgp->dev->real_num_tx_queues > 1) &&
  1270. __netif_tx_trylock(dev_queue)) {
  1271. if (tx->req == tx->done) {
  1272. tx->queue_active = 0;
  1273. put_be32(htonl(1), tx->send_stop);
  1274. mb();
  1275. mmiowb();
  1276. }
  1277. __netif_tx_unlock(dev_queue);
  1278. }
  1279. /* start the queue if we've stopped it */
  1280. if (netif_tx_queue_stopped(dev_queue) &&
  1281. tx->req - tx->done < (tx->mask >> 1) &&
  1282. ss->mgp->running == MYRI10GE_ETH_RUNNING) {
  1283. tx->wake_queue++;
  1284. netif_tx_wake_queue(dev_queue);
  1285. }
  1286. }
  1287. static inline int
  1288. myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
  1289. {
  1290. struct myri10ge_rx_done *rx_done = &ss->rx_done;
  1291. struct myri10ge_priv *mgp = ss->mgp;
  1292. unsigned long rx_bytes = 0;
  1293. unsigned long rx_packets = 0;
  1294. unsigned long rx_ok;
  1295. int idx = rx_done->idx;
  1296. int cnt = rx_done->cnt;
  1297. int work_done = 0;
  1298. u16 length;
  1299. __wsum checksum;
  1300. /*
  1301. * Prevent compiler from generating more than one ->features memory
  1302. * access to avoid theoretical race condition with functions that
  1303. * change NETIF_F_LRO flag at runtime.
  1304. */
  1305. bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
  1306. while (rx_done->entry[idx].length != 0 && work_done < budget) {
  1307. length = ntohs(rx_done->entry[idx].length);
  1308. rx_done->entry[idx].length = 0;
  1309. checksum = csum_unfold(rx_done->entry[idx].checksum);
  1310. rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
  1311. rx_packets += rx_ok;
  1312. rx_bytes += rx_ok * (unsigned long)length;
  1313. cnt++;
  1314. idx = cnt & (mgp->max_intr_slots - 1);
  1315. work_done++;
  1316. }
  1317. rx_done->idx = idx;
  1318. rx_done->cnt = cnt;
  1319. ss->stats.rx_packets += rx_packets;
  1320. ss->stats.rx_bytes += rx_bytes;
  1321. if (lro_enabled)
  1322. lro_flush_all(&rx_done->lro_mgr);
  1323. /* restock receive rings if needed */
  1324. if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
  1325. myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
  1326. mgp->small_bytes + MXGEFW_PAD, 0);
  1327. if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
  1328. myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
  1329. return work_done;
  1330. }
  1331. static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
  1332. {
  1333. struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
  1334. if (unlikely(stats->stats_updated)) {
  1335. unsigned link_up = ntohl(stats->link_up);
  1336. if (mgp->link_state != link_up) {
  1337. mgp->link_state = link_up;
  1338. if (mgp->link_state == MXGEFW_LINK_UP) {
  1339. netif_info(mgp, link, mgp->dev, "link up\n");
  1340. netif_carrier_on(mgp->dev);
  1341. mgp->link_changes++;
  1342. } else {
  1343. netif_info(mgp, link, mgp->dev, "link %s\n",
  1344. (link_up == MXGEFW_LINK_MYRINET ?
  1345. "mismatch (Myrinet detected)" :
  1346. "down"));
  1347. netif_carrier_off(mgp->dev);
  1348. mgp->link_changes++;
  1349. }
  1350. }
  1351. if (mgp->rdma_tags_available !=
  1352. ntohl(stats->rdma_tags_available)) {
  1353. mgp->rdma_tags_available =
  1354. ntohl(stats->rdma_tags_available);
  1355. netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
  1356. mgp->rdma_tags_available);
  1357. }
  1358. mgp->down_cnt += stats->link_down;
  1359. if (stats->link_down)
  1360. wake_up(&mgp->down_wq);
  1361. }
  1362. }
  1363. static int myri10ge_poll(struct napi_struct *napi, int budget)
  1364. {
  1365. struct myri10ge_slice_state *ss =
  1366. container_of(napi, struct myri10ge_slice_state, napi);
  1367. int work_done;
  1368. #ifdef CONFIG_MYRI10GE_DCA
  1369. if (ss->mgp->dca_enabled)
  1370. myri10ge_update_dca(ss);
  1371. #endif
  1372. /* process as many rx events as NAPI will allow */
  1373. work_done = myri10ge_clean_rx_done(ss, budget);
  1374. if (work_done < budget) {
  1375. napi_complete(napi);
  1376. put_be32(htonl(3), ss->irq_claim);
  1377. }
  1378. return work_done;
  1379. }
  1380. static irqreturn_t myri10ge_intr(int irq, void *arg)
  1381. {
  1382. struct myri10ge_slice_state *ss = arg;
  1383. struct myri10ge_priv *mgp = ss->mgp;
  1384. struct mcp_irq_data *stats = ss->fw_stats;
  1385. struct myri10ge_tx_buf *tx = &ss->tx;
  1386. u32 send_done_count;
  1387. int i;
  1388. /* an interrupt on a non-zero receive-only slice is implicitly
  1389. * valid since MSI-X irqs are not shared */
  1390. if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
  1391. napi_schedule(&ss->napi);
  1392. return IRQ_HANDLED;
  1393. }
  1394. /* make sure it is our IRQ, and that the DMA has finished */
  1395. if (unlikely(!stats->valid))
  1396. return IRQ_NONE;
  1397. /* low bit indicates receives are present, so schedule
  1398. * napi poll handler */
  1399. if (stats->valid & 1)
  1400. napi_schedule(&ss->napi);
  1401. if (!mgp->msi_enabled && !mgp->msix_enabled) {
  1402. put_be32(0, mgp->irq_deassert);
  1403. if (!myri10ge_deassert_wait)
  1404. stats->valid = 0;
  1405. mb();
  1406. } else
  1407. stats->valid = 0;
  1408. /* Wait for IRQ line to go low, if using INTx */
  1409. i = 0;
  1410. while (1) {
  1411. i++;
  1412. /* check for transmit completes and receives */
  1413. send_done_count = ntohl(stats->send_done_count);
  1414. if (send_done_count != tx->pkt_done)
  1415. myri10ge_tx_done(ss, (int)send_done_count);
  1416. if (unlikely(i > myri10ge_max_irq_loops)) {
  1417. netdev_warn(mgp->dev, "irq stuck?\n");
  1418. stats->valid = 0;
  1419. schedule_work(&mgp->watchdog_work);
  1420. }
  1421. if (likely(stats->valid == 0))
  1422. break;
  1423. cpu_relax();
  1424. barrier();
  1425. }
  1426. /* Only slice 0 updates stats */
  1427. if (ss == mgp->ss)
  1428. myri10ge_check_statblock(mgp);
  1429. put_be32(htonl(3), ss->irq_claim + 1);
  1430. return IRQ_HANDLED;
  1431. }
  1432. static int
  1433. myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
  1434. {
  1435. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1436. char *ptr;
  1437. int i;
  1438. cmd->autoneg = AUTONEG_DISABLE;
  1439. ethtool_cmd_speed_set(cmd, SPEED_10000);
  1440. cmd->duplex = DUPLEX_FULL;
  1441. /*
  1442. * parse the product code to deterimine the interface type
  1443. * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
  1444. * after the 3rd dash in the driver's cached copy of the
  1445. * EEPROM's product code string.
  1446. */
  1447. ptr = mgp->product_code_string;
  1448. if (ptr == NULL) {
  1449. netdev_err(netdev, "Missing product code\n");
  1450. return 0;
  1451. }
  1452. for (i = 0; i < 3; i++, ptr++) {
  1453. ptr = strchr(ptr, '-');
  1454. if (ptr == NULL) {
  1455. netdev_err(netdev, "Invalid product code %s\n",
  1456. mgp->product_code_string);
  1457. return 0;
  1458. }
  1459. }
  1460. if (*ptr == '2')
  1461. ptr++;
  1462. if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
  1463. /* We've found either an XFP, quad ribbon fiber, or SFP+ */
  1464. cmd->port = PORT_FIBRE;
  1465. cmd->supported |= SUPPORTED_FIBRE;
  1466. cmd->advertising |= ADVERTISED_FIBRE;
  1467. } else {
  1468. cmd->port = PORT_OTHER;
  1469. }
  1470. if (*ptr == 'R' || *ptr == 'S')
  1471. cmd->transceiver = XCVR_EXTERNAL;
  1472. else
  1473. cmd->transceiver = XCVR_INTERNAL;
  1474. return 0;
  1475. }
  1476. static void
  1477. myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
  1478. {
  1479. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1480. strlcpy(info->driver, "myri10ge", sizeof(info->driver));
  1481. strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
  1482. strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
  1483. strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
  1484. }
  1485. static int
  1486. myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
  1487. {
  1488. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1489. coal->rx_coalesce_usecs = mgp->intr_coal_delay;
  1490. return 0;
  1491. }
  1492. static int
  1493. myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
  1494. {
  1495. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1496. mgp->intr_coal_delay = coal->rx_coalesce_usecs;
  1497. put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
  1498. return 0;
  1499. }
  1500. static void
  1501. myri10ge_get_pauseparam(struct net_device *netdev,
  1502. struct ethtool_pauseparam *pause)
  1503. {
  1504. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1505. pause->autoneg = 0;
  1506. pause->rx_pause = mgp->pause;
  1507. pause->tx_pause = mgp->pause;
  1508. }
  1509. static int
  1510. myri10ge_set_pauseparam(struct net_device *netdev,
  1511. struct ethtool_pauseparam *pause)
  1512. {
  1513. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1514. if (pause->tx_pause != mgp->pause)
  1515. return myri10ge_change_pause(mgp, pause->tx_pause);
  1516. if (pause->rx_pause != mgp->pause)
  1517. return myri10ge_change_pause(mgp, pause->rx_pause);
  1518. if (pause->autoneg != 0)
  1519. return -EINVAL;
  1520. return 0;
  1521. }
  1522. static void
  1523. myri10ge_get_ringparam(struct net_device *netdev,
  1524. struct ethtool_ringparam *ring)
  1525. {
  1526. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1527. ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
  1528. ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
  1529. ring->rx_jumbo_max_pending = 0;
  1530. ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
  1531. ring->rx_mini_pending = ring->rx_mini_max_pending;
  1532. ring->rx_pending = ring->rx_max_pending;
  1533. ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
  1534. ring->tx_pending = ring->tx_max_pending;
  1535. }
  1536. static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
  1537. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  1538. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  1539. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  1540. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  1541. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  1542. "tx_heartbeat_errors", "tx_window_errors",
  1543. /* device-specific stats */
  1544. "tx_boundary", "WC", "irq", "MSI", "MSIX",
  1545. "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
  1546. "serial_number", "watchdog_resets",
  1547. #ifdef CONFIG_MYRI10GE_DCA
  1548. "dca_capable_firmware", "dca_device_present",
  1549. #endif
  1550. "link_changes", "link_up", "dropped_link_overflow",
  1551. "dropped_link_error_or_filtered",
  1552. "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
  1553. "dropped_unicast_filtered", "dropped_multicast_filtered",
  1554. "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
  1555. "dropped_no_big_buffer"
  1556. };
  1557. static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
  1558. "----------- slice ---------",
  1559. "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
  1560. "rx_small_cnt", "rx_big_cnt",
  1561. "wake_queue", "stop_queue", "tx_linearized",
  1562. "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no_desc",
  1563. };
  1564. #define MYRI10GE_NET_STATS_LEN 21
  1565. #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
  1566. #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
  1567. static void
  1568. myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
  1569. {
  1570. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1571. int i;
  1572. switch (stringset) {
  1573. case ETH_SS_STATS:
  1574. memcpy(data, *myri10ge_gstrings_main_stats,
  1575. sizeof(myri10ge_gstrings_main_stats));
  1576. data += sizeof(myri10ge_gstrings_main_stats);
  1577. for (i = 0; i < mgp->num_slices; i++) {
  1578. memcpy(data, *myri10ge_gstrings_slice_stats,
  1579. sizeof(myri10ge_gstrings_slice_stats));
  1580. data += sizeof(myri10ge_gstrings_slice_stats);
  1581. }
  1582. break;
  1583. }
  1584. }
  1585. static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
  1586. {
  1587. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1588. switch (sset) {
  1589. case ETH_SS_STATS:
  1590. return MYRI10GE_MAIN_STATS_LEN +
  1591. mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
  1592. default:
  1593. return -EOPNOTSUPP;
  1594. }
  1595. }
  1596. static void
  1597. myri10ge_get_ethtool_stats(struct net_device *netdev,
  1598. struct ethtool_stats *stats, u64 * data)
  1599. {
  1600. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1601. struct myri10ge_slice_state *ss;
  1602. struct rtnl_link_stats64 link_stats;
  1603. int slice;
  1604. int i;
  1605. /* force stats update */
  1606. memset(&link_stats, 0, sizeof(link_stats));
  1607. (void)myri10ge_get_stats(netdev, &link_stats);
  1608. for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
  1609. data[i] = ((u64 *)&link_stats)[i];
  1610. data[i++] = (unsigned int)mgp->tx_boundary;
  1611. data[i++] = (unsigned int)mgp->wc_enabled;
  1612. data[i++] = (unsigned int)mgp->pdev->irq;
  1613. data[i++] = (unsigned int)mgp->msi_enabled;
  1614. data[i++] = (unsigned int)mgp->msix_enabled;
  1615. data[i++] = (unsigned int)mgp->read_dma;
  1616. data[i++] = (unsigned int)mgp->write_dma;
  1617. data[i++] = (unsigned int)mgp->read_write_dma;
  1618. data[i++] = (unsigned int)mgp->serial_number;
  1619. data[i++] = (unsigned int)mgp->watchdog_resets;
  1620. #ifdef CONFIG_MYRI10GE_DCA
  1621. data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
  1622. data[i++] = (unsigned int)(mgp->dca_enabled);
  1623. #endif
  1624. data[i++] = (unsigned int)mgp->link_changes;
  1625. /* firmware stats are useful only in the first slice */
  1626. ss = &mgp->ss[0];
  1627. data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
  1628. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
  1629. data[i++] =
  1630. (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
  1631. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
  1632. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
  1633. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
  1634. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
  1635. data[i++] =
  1636. (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
  1637. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
  1638. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
  1639. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
  1640. data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
  1641. for (slice = 0; slice < mgp->num_slices; slice++) {
  1642. ss = &mgp->ss[slice];
  1643. data[i++] = slice;
  1644. data[i++] = (unsigned int)ss->tx.pkt_start;
  1645. data[i++] = (unsigned int)ss->tx.pkt_done;
  1646. data[i++] = (unsigned int)ss->tx.req;
  1647. data[i++] = (unsigned int)ss->tx.done;
  1648. data[i++] = (unsigned int)ss->rx_small.cnt;
  1649. data[i++] = (unsigned int)ss->rx_big.cnt;
  1650. data[i++] = (unsigned int)ss->tx.wake_queue;
  1651. data[i++] = (unsigned int)ss->tx.stop_queue;
  1652. data[i++] = (unsigned int)ss->tx.linearized;
  1653. data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
  1654. data[i++] = ss->rx_done.lro_mgr.stats.flushed;
  1655. if (ss->rx_done.lro_mgr.stats.flushed)
  1656. data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
  1657. ss->rx_done.lro_mgr.stats.flushed;
  1658. else
  1659. data[i++] = 0;
  1660. data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
  1661. }
  1662. }
  1663. static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
  1664. {
  1665. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1666. mgp->msg_enable = value;
  1667. }
  1668. static u32 myri10ge_get_msglevel(struct net_device *netdev)
  1669. {
  1670. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1671. return mgp->msg_enable;
  1672. }
  1673. /*
  1674. * Use a low-level command to change the LED behavior. Rather than
  1675. * blinking (which is the normal case), when identify is used, the
  1676. * yellow LED turns solid.
  1677. */
  1678. static int myri10ge_led(struct myri10ge_priv *mgp, int on)
  1679. {
  1680. struct mcp_gen_header *hdr;
  1681. struct device *dev = &mgp->pdev->dev;
  1682. size_t hdr_off, pattern_off, hdr_len;
  1683. u32 pattern = 0xfffffffe;
  1684. /* find running firmware header */
  1685. hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
  1686. if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
  1687. dev_err(dev, "Running firmware has bad header offset (%d)\n",
  1688. (int)hdr_off);
  1689. return -EIO;
  1690. }
  1691. hdr_len = swab32(readl(mgp->sram + hdr_off +
  1692. offsetof(struct mcp_gen_header, header_length)));
  1693. pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
  1694. if (pattern_off >= (hdr_len + hdr_off)) {
  1695. dev_info(dev, "Firmware does not support LED identification\n");
  1696. return -EINVAL;
  1697. }
  1698. if (!on)
  1699. pattern = swab32(readl(mgp->sram + pattern_off + 4));
  1700. writel(htonl(pattern), mgp->sram + pattern_off);
  1701. return 0;
  1702. }
  1703. static int
  1704. myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
  1705. {
  1706. struct myri10ge_priv *mgp = netdev_priv(netdev);
  1707. int rc;
  1708. switch (state) {
  1709. case ETHTOOL_ID_ACTIVE:
  1710. rc = myri10ge_led(mgp, 1);
  1711. break;
  1712. case ETHTOOL_ID_INACTIVE:
  1713. rc = myri10ge_led(mgp, 0);
  1714. break;
  1715. default:
  1716. rc = -EINVAL;
  1717. }
  1718. return rc;
  1719. }
  1720. static const struct ethtool_ops myri10ge_ethtool_ops = {
  1721. .get_settings = myri10ge_get_settings,
  1722. .get_drvinfo = myri10ge_get_drvinfo,
  1723. .get_coalesce = myri10ge_get_coalesce,
  1724. .set_coalesce = myri10ge_set_coalesce,
  1725. .get_pauseparam = myri10ge_get_pauseparam,
  1726. .set_pauseparam = myri10ge_set_pauseparam,
  1727. .get_ringparam = myri10ge_get_ringparam,
  1728. .get_link = ethtool_op_get_link,
  1729. .get_strings = myri10ge_get_strings,
  1730. .get_sset_count = myri10ge_get_sset_count,
  1731. .get_ethtool_stats = myri10ge_get_ethtool_stats,
  1732. .set_msglevel = myri10ge_set_msglevel,
  1733. .get_msglevel = myri10ge_get_msglevel,
  1734. .set_phys_id = myri10ge_phys_id,
  1735. };
  1736. static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
  1737. {
  1738. struct myri10ge_priv *mgp = ss->mgp;
  1739. struct myri10ge_cmd cmd;
  1740. struct net_device *dev = mgp->dev;
  1741. int tx_ring_size, rx_ring_size;
  1742. int tx_ring_entries, rx_ring_entries;
  1743. int i, slice, status;
  1744. size_t bytes;
  1745. /* get ring sizes */
  1746. slice = ss - mgp->ss;
  1747. cmd.data0 = slice;
  1748. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
  1749. tx_ring_size = cmd.data0;
  1750. cmd.data0 = slice;
  1751. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
  1752. if (status != 0)
  1753. return status;
  1754. rx_ring_size = cmd.data0;
  1755. tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
  1756. rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
  1757. ss->tx.mask = tx_ring_entries - 1;
  1758. ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
  1759. status = -ENOMEM;
  1760. /* allocate the host shadow rings */
  1761. bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
  1762. * sizeof(*ss->tx.req_list);
  1763. ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
  1764. if (ss->tx.req_bytes == NULL)
  1765. goto abort_with_nothing;
  1766. /* ensure req_list entries are aligned to 8 bytes */
  1767. ss->tx.req_list = (struct mcp_kreq_ether_send *)
  1768. ALIGN((unsigned long)ss->tx.req_bytes, 8);
  1769. ss->tx.queue_active = 0;
  1770. bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
  1771. ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
  1772. if (ss->rx_small.shadow == NULL)
  1773. goto abort_with_tx_req_bytes;
  1774. bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
  1775. ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
  1776. if (ss->rx_big.shadow == NULL)
  1777. goto abort_with_rx_small_shadow;
  1778. /* allocate the host info rings */
  1779. bytes = tx_ring_entries * sizeof(*ss->tx.info);
  1780. ss->tx.info = kzalloc(bytes, GFP_KERNEL);
  1781. if (ss->tx.info == NULL)
  1782. goto abort_with_rx_big_shadow;
  1783. bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
  1784. ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
  1785. if (ss->rx_small.info == NULL)
  1786. goto abort_with_tx_info;
  1787. bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
  1788. ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
  1789. if (ss->rx_big.info == NULL)
  1790. goto abort_with_rx_small_info;
  1791. /* Fill the receive rings */
  1792. ss->rx_big.cnt = 0;
  1793. ss->rx_small.cnt = 0;
  1794. ss->rx_big.fill_cnt = 0;
  1795. ss->rx_small.fill_cnt = 0;
  1796. ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
  1797. ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
  1798. ss->rx_small.watchdog_needed = 0;
  1799. ss->rx_big.watchdog_needed = 0;
  1800. if (mgp->small_bytes == 0) {
  1801. ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
  1802. } else {
  1803. myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
  1804. mgp->small_bytes + MXGEFW_PAD, 0);
  1805. }
  1806. if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
  1807. netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
  1808. slice, ss->rx_small.fill_cnt);
  1809. goto abort_with_rx_small_ring;
  1810. }
  1811. myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
  1812. if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
  1813. netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
  1814. slice, ss->rx_big.fill_cnt);
  1815. goto abort_with_rx_big_ring;
  1816. }
  1817. return 0;
  1818. abort_with_rx_big_ring:
  1819. for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
  1820. int idx = i & ss->rx_big.mask;
  1821. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
  1822. mgp->big_bytes);
  1823. put_page(ss->rx_big.info[idx].page);
  1824. }
  1825. abort_with_rx_small_ring:
  1826. if (mgp->small_bytes == 0)
  1827. ss->rx_small.fill_cnt = ss->rx_small.cnt;
  1828. for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
  1829. int idx = i & ss->rx_small.mask;
  1830. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
  1831. mgp->small_bytes + MXGEFW_PAD);
  1832. put_page(ss->rx_small.info[idx].page);
  1833. }
  1834. kfree(ss->rx_big.info);
  1835. abort_with_rx_small_info:
  1836. kfree(ss->rx_small.info);
  1837. abort_with_tx_info:
  1838. kfree(ss->tx.info);
  1839. abort_with_rx_big_shadow:
  1840. kfree(ss->rx_big.shadow);
  1841. abort_with_rx_small_shadow:
  1842. kfree(ss->rx_small.shadow);
  1843. abort_with_tx_req_bytes:
  1844. kfree(ss->tx.req_bytes);
  1845. ss->tx.req_bytes = NULL;
  1846. ss->tx.req_list = NULL;
  1847. abort_with_nothing:
  1848. return status;
  1849. }
  1850. static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
  1851. {
  1852. struct myri10ge_priv *mgp = ss->mgp;
  1853. struct sk_buff *skb;
  1854. struct myri10ge_tx_buf *tx;
  1855. int i, len, idx;
  1856. /* If not allocated, skip it */
  1857. if (ss->tx.req_list == NULL)
  1858. return;
  1859. for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
  1860. idx = i & ss->rx_big.mask;
  1861. if (i == ss->rx_big.fill_cnt - 1)
  1862. ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
  1863. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
  1864. mgp->big_bytes);
  1865. put_page(ss->rx_big.info[idx].page);
  1866. }
  1867. if (mgp->small_bytes == 0)
  1868. ss->rx_small.fill_cnt = ss->rx_small.cnt;
  1869. for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
  1870. idx = i & ss->rx_small.mask;
  1871. if (i == ss->rx_small.fill_cnt - 1)
  1872. ss->rx_small.info[idx].page_offset =
  1873. MYRI10GE_ALLOC_SIZE;
  1874. myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
  1875. mgp->small_bytes + MXGEFW_PAD);
  1876. put_page(ss->rx_small.info[idx].page);
  1877. }
  1878. tx = &ss->tx;
  1879. while (tx->done != tx->req) {
  1880. idx = tx->done & tx->mask;
  1881. skb = tx->info[idx].skb;
  1882. /* Mark as free */
  1883. tx->info[idx].skb = NULL;
  1884. tx->done++;
  1885. len = dma_unmap_len(&tx->info[idx], len);
  1886. dma_unmap_len_set(&tx->info[idx], len, 0);
  1887. if (skb) {
  1888. ss->stats.tx_dropped++;
  1889. dev_kfree_skb_any(skb);
  1890. if (len)
  1891. pci_unmap_single(mgp->pdev,
  1892. dma_unmap_addr(&tx->info[idx],
  1893. bus), len,
  1894. PCI_DMA_TODEVICE);
  1895. } else {
  1896. if (len)
  1897. pci_unmap_page(mgp->pdev,
  1898. dma_unmap_addr(&tx->info[idx],
  1899. bus), len,
  1900. PCI_DMA_TODEVICE);
  1901. }
  1902. }
  1903. kfree(ss->rx_big.info);
  1904. kfree(ss->rx_small.info);
  1905. kfree(ss->tx.info);
  1906. kfree(ss->rx_big.shadow);
  1907. kfree(ss->rx_small.shadow);
  1908. kfree(ss->tx.req_bytes);
  1909. ss->tx.req_bytes = NULL;
  1910. ss->tx.req_list = NULL;
  1911. }
  1912. static int myri10ge_request_irq(struct myri10ge_priv *mgp)
  1913. {
  1914. struct pci_dev *pdev = mgp->pdev;
  1915. struct myri10ge_slice_state *ss;
  1916. struct net_device *netdev = mgp->dev;
  1917. int i;
  1918. int status;
  1919. mgp->msi_enabled = 0;
  1920. mgp->msix_enabled = 0;
  1921. status = 0;
  1922. if (myri10ge_msi) {
  1923. if (mgp->num_slices > 1) {
  1924. status =
  1925. pci_enable_msix(pdev, mgp->msix_vectors,
  1926. mgp->num_slices);
  1927. if (status == 0) {
  1928. mgp->msix_enabled = 1;
  1929. } else {
  1930. dev_err(&pdev->dev,
  1931. "Error %d setting up MSI-X\n", status);
  1932. return status;
  1933. }
  1934. }
  1935. if (mgp->msix_enabled == 0) {
  1936. status = pci_enable_msi(pdev);
  1937. if (status != 0) {
  1938. dev_err(&pdev->dev,
  1939. "Error %d setting up MSI; falling back to xPIC\n",
  1940. status);
  1941. } else {
  1942. mgp->msi_enabled = 1;
  1943. }
  1944. }
  1945. }
  1946. if (mgp->msix_enabled) {
  1947. for (i = 0; i < mgp->num_slices; i++) {
  1948. ss = &mgp->ss[i];
  1949. snprintf(ss->irq_desc, sizeof(ss->irq_desc),
  1950. "%s:slice-%d", netdev->name, i);
  1951. status = request_irq(mgp->msix_vectors[i].vector,
  1952. myri10ge_intr, 0, ss->irq_desc,
  1953. ss);
  1954. if (status != 0) {
  1955. dev_err(&pdev->dev,
  1956. "slice %d failed to allocate IRQ\n", i);
  1957. i--;
  1958. while (i >= 0) {
  1959. free_irq(mgp->msix_vectors[i].vector,
  1960. &mgp->ss[i]);
  1961. i--;
  1962. }
  1963. pci_disable_msix(pdev);
  1964. return status;
  1965. }
  1966. }
  1967. } else {
  1968. status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
  1969. mgp->dev->name, &mgp->ss[0]);
  1970. if (status != 0) {
  1971. dev_err(&pdev->dev, "failed to allocate IRQ\n");
  1972. if (mgp->msi_enabled)
  1973. pci_disable_msi(pdev);
  1974. }
  1975. }
  1976. return status;
  1977. }
  1978. static void myri10ge_free_irq(struct myri10ge_priv *mgp)
  1979. {
  1980. struct pci_dev *pdev = mgp->pdev;
  1981. int i;
  1982. if (mgp->msix_enabled) {
  1983. for (i = 0; i < mgp->num_slices; i++)
  1984. free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
  1985. } else {
  1986. free_irq(pdev->irq, &mgp->ss[0]);
  1987. }
  1988. if (mgp->msi_enabled)
  1989. pci_disable_msi(pdev);
  1990. if (mgp->msix_enabled)
  1991. pci_disable_msix(pdev);
  1992. }
  1993. static int
  1994. myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
  1995. void **ip_hdr, void **tcpudp_hdr,
  1996. u64 * hdr_flags, void *priv)
  1997. {
  1998. struct ethhdr *eh;
  1999. struct vlan_ethhdr *veh;
  2000. struct iphdr *iph;
  2001. u8 *va = skb_frag_address(frag);
  2002. unsigned long ll_hlen;
  2003. /* passed opaque through lro_receive_frags() */
  2004. __wsum csum = (__force __wsum) (unsigned long)priv;
  2005. /* find the mac header, aborting if not IPv4 */
  2006. eh = (struct ethhdr *)va;
  2007. *mac_hdr = eh;
  2008. ll_hlen = ETH_HLEN;
  2009. if (eh->h_proto != htons(ETH_P_IP)) {
  2010. if (eh->h_proto == htons(ETH_P_8021Q)) {
  2011. veh = (struct vlan_ethhdr *)va;
  2012. if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
  2013. return -1;
  2014. ll_hlen += VLAN_HLEN;
  2015. /*
  2016. * HW checksum starts ETH_HLEN bytes into
  2017. * frame, so we must subtract off the VLAN
  2018. * header's checksum before csum can be used
  2019. */
  2020. csum = csum_sub(csum, csum_partial(va + ETH_HLEN,
  2021. VLAN_HLEN, 0));
  2022. } else {
  2023. return -1;
  2024. }
  2025. }
  2026. *hdr_flags = LRO_IPV4;
  2027. iph = (struct iphdr *)(va + ll_hlen);
  2028. *ip_hdr = iph;
  2029. if (iph->protocol != IPPROTO_TCP)
  2030. return -1;
  2031. if (ip_is_fragment(iph))
  2032. return -1;
  2033. *hdr_flags |= LRO_TCP;
  2034. *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
  2035. /* verify the IP checksum */
  2036. if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
  2037. return -1;
  2038. /* verify the checksum */
  2039. if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
  2040. ntohs(iph->tot_len) - (iph->ihl << 2),
  2041. IPPROTO_TCP, csum)))
  2042. return -1;
  2043. return 0;
  2044. }
  2045. static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
  2046. {
  2047. struct myri10ge_cmd cmd;
  2048. struct myri10ge_slice_state *ss;
  2049. int status;
  2050. ss = &mgp->ss[slice];
  2051. status = 0;
  2052. if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
  2053. cmd.data0 = slice;
  2054. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
  2055. &cmd, 0);
  2056. ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
  2057. (mgp->sram + cmd.data0);
  2058. }
  2059. cmd.data0 = slice;
  2060. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
  2061. &cmd, 0);
  2062. ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
  2063. (mgp->sram + cmd.data0);
  2064. cmd.data0 = slice;
  2065. status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
  2066. ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
  2067. (mgp->sram + cmd.data0);
  2068. ss->tx.send_go = (__iomem __be32 *)
  2069. (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
  2070. ss->tx.send_stop = (__iomem __be32 *)
  2071. (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
  2072. return status;
  2073. }
  2074. static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
  2075. {
  2076. struct myri10ge_cmd cmd;
  2077. struct myri10ge_slice_state *ss;
  2078. int status;
  2079. ss = &mgp->ss[slice];
  2080. cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
  2081. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
  2082. cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
  2083. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
  2084. if (status == -ENOSYS) {
  2085. dma_addr_t bus = ss->fw_stats_bus;
  2086. if (slice != 0)
  2087. return -EINVAL;
  2088. bus += offsetof(struct mcp_irq_data, send_done_count);
  2089. cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
  2090. cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
  2091. status = myri10ge_send_cmd(mgp,
  2092. MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
  2093. &cmd, 0);
  2094. /* Firmware cannot support multicast without STATS_DMA_V2 */
  2095. mgp->fw_multicast_support = 0;
  2096. } else {
  2097. mgp->fw_multicast_support = 1;
  2098. }
  2099. return 0;
  2100. }
  2101. static int myri10ge_open(struct net_device *dev)
  2102. {
  2103. struct myri10ge_slice_state *ss;
  2104. struct myri10ge_priv *mgp = netdev_priv(dev);
  2105. struct myri10ge_cmd cmd;
  2106. int i, status, big_pow2, slice;
  2107. u8 *itable;
  2108. struct net_lro_mgr *lro_mgr;
  2109. if (mgp->running != MYRI10GE_ETH_STOPPED)
  2110. return -EBUSY;
  2111. mgp->running = MYRI10GE_ETH_STARTING;
  2112. status = myri10ge_reset(mgp);
  2113. if (status != 0) {
  2114. netdev_err(dev, "failed reset\n");
  2115. goto abort_with_nothing;
  2116. }
  2117. if (mgp->num_slices > 1) {
  2118. cmd.data0 = mgp->num_slices;
  2119. cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
  2120. if (mgp->dev->real_num_tx_queues > 1)
  2121. cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
  2122. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
  2123. &cmd, 0);
  2124. if (status != 0) {
  2125. netdev_err(dev, "failed to set number of slices\n");
  2126. goto abort_with_nothing;
  2127. }
  2128. /* setup the indirection table */
  2129. cmd.data0 = mgp->num_slices;
  2130. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
  2131. &cmd, 0);
  2132. status |= myri10ge_send_cmd(mgp,
  2133. MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
  2134. &cmd, 0);
  2135. if (status != 0) {
  2136. netdev_err(dev, "failed to setup rss tables\n");
  2137. goto abort_with_nothing;
  2138. }
  2139. /* just enable an identity mapping */
  2140. itable = mgp->sram + cmd.data0;
  2141. for (i = 0; i < mgp->num_slices; i++)
  2142. __raw_writeb(i, &itable[i]);
  2143. cmd.data0 = 1;
  2144. cmd.data1 = myri10ge_rss_hash;
  2145. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
  2146. &cmd, 0);
  2147. if (status != 0) {
  2148. netdev_err(dev, "failed to enable slices\n");
  2149. goto abort_with_nothing;
  2150. }
  2151. }
  2152. status = myri10ge_request_irq(mgp);
  2153. if (status != 0)
  2154. goto abort_with_nothing;
  2155. /* decide what small buffer size to use. For good TCP rx
  2156. * performance, it is important to not receive 1514 byte
  2157. * frames into jumbo buffers, as it confuses the socket buffer
  2158. * accounting code, leading to drops and erratic performance.
  2159. */
  2160. if (dev->mtu <= ETH_DATA_LEN)
  2161. /* enough for a TCP header */
  2162. mgp->small_bytes = (128 > SMP_CACHE_BYTES)
  2163. ? (128 - MXGEFW_PAD)
  2164. : (SMP_CACHE_BYTES - MXGEFW_PAD);
  2165. else
  2166. /* enough for a vlan encapsulated ETH_DATA_LEN frame */
  2167. mgp->small_bytes = VLAN_ETH_FRAME_LEN;
  2168. /* Override the small buffer size? */
  2169. if (myri10ge_small_bytes >= 0)
  2170. mgp->small_bytes = myri10ge_small_bytes;
  2171. /* Firmware needs the big buff size as a power of 2. Lie and
  2172. * tell him the buffer is larger, because we only use 1
  2173. * buffer/pkt, and the mtu will prevent overruns.
  2174. */
  2175. big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
  2176. if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
  2177. while (!is_power_of_2(big_pow2))
  2178. big_pow2++;
  2179. mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
  2180. } else {
  2181. big_pow2 = MYRI10GE_ALLOC_SIZE;
  2182. mgp->big_bytes = big_pow2;
  2183. }
  2184. /* setup the per-slice data structures */
  2185. for (slice = 0; slice < mgp->num_slices; slice++) {
  2186. ss = &mgp->ss[slice];
  2187. status = myri10ge_get_txrx(mgp, slice);
  2188. if (status != 0) {
  2189. netdev_err(dev, "failed to get ring sizes or locations\n");
  2190. goto abort_with_rings;
  2191. }
  2192. status = myri10ge_allocate_rings(ss);
  2193. if (status != 0)
  2194. goto abort_with_rings;
  2195. /* only firmware which supports multiple TX queues
  2196. * supports setting up the tx stats on non-zero
  2197. * slices */
  2198. if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
  2199. status = myri10ge_set_stats(mgp, slice);
  2200. if (status) {
  2201. netdev_err(dev, "Couldn't set stats DMA\n");
  2202. goto abort_with_rings;
  2203. }
  2204. lro_mgr = &ss->rx_done.lro_mgr;
  2205. lro_mgr->dev = dev;
  2206. lro_mgr->features = LRO_F_NAPI;
  2207. lro_mgr->ip_summed = CHECKSUM_COMPLETE;
  2208. lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
  2209. lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
  2210. lro_mgr->lro_arr = ss->rx_done.lro_desc;
  2211. lro_mgr->get_frag_header = myri10ge_get_frag_header;
  2212. lro_mgr->max_aggr = myri10ge_lro_max_pkts;
  2213. lro_mgr->frag_align_pad = 2;
  2214. if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
  2215. lro_mgr->max_aggr = MAX_SKB_FRAGS;
  2216. /* must happen prior to any irq */
  2217. napi_enable(&(ss)->napi);
  2218. }
  2219. /* now give firmware buffers sizes, and MTU */
  2220. cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
  2221. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
  2222. cmd.data0 = mgp->small_bytes;
  2223. status |=
  2224. myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
  2225. cmd.data0 = big_pow2;
  2226. status |=
  2227. myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
  2228. if (status) {
  2229. netdev_err(dev, "Couldn't set buffer sizes\n");
  2230. goto abort_with_rings;
  2231. }
  2232. /*
  2233. * Set Linux style TSO mode; this is needed only on newer
  2234. * firmware versions. Older versions default to Linux
  2235. * style TSO
  2236. */
  2237. cmd.data0 = 0;
  2238. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
  2239. if (status && status != -ENOSYS) {
  2240. netdev_err(dev, "Couldn't set TSO mode\n");
  2241. goto abort_with_rings;
  2242. }
  2243. mgp->link_state = ~0U;
  2244. mgp->rdma_tags_available = 15;
  2245. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
  2246. if (status) {
  2247. netdev_err(dev, "Couldn't bring up link\n");
  2248. goto abort_with_rings;
  2249. }
  2250. mgp->running = MYRI10GE_ETH_RUNNING;
  2251. mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
  2252. add_timer(&mgp->watchdog_timer);
  2253. netif_tx_wake_all_queues(dev);
  2254. return 0;
  2255. abort_with_rings:
  2256. while (slice) {
  2257. slice--;
  2258. napi_disable(&mgp->ss[slice].napi);
  2259. }
  2260. for (i = 0; i < mgp->num_slices; i++)
  2261. myri10ge_free_rings(&mgp->ss[i]);
  2262. myri10ge_free_irq(mgp);
  2263. abort_with_nothing:
  2264. mgp->running = MYRI10GE_ETH_STOPPED;
  2265. return -ENOMEM;
  2266. }
  2267. static int myri10ge_close(struct net_device *dev)
  2268. {
  2269. struct myri10ge_priv *mgp = netdev_priv(dev);
  2270. struct myri10ge_cmd cmd;
  2271. int status, old_down_cnt;
  2272. int i;
  2273. if (mgp->running != MYRI10GE_ETH_RUNNING)
  2274. return 0;
  2275. if (mgp->ss[0].tx.req_bytes == NULL)
  2276. return 0;
  2277. del_timer_sync(&mgp->watchdog_timer);
  2278. mgp->running = MYRI10GE_ETH_STOPPING;
  2279. for (i = 0; i < mgp->num_slices; i++) {
  2280. napi_disable(&mgp->ss[i].napi);
  2281. }
  2282. netif_carrier_off(dev);
  2283. netif_tx_stop_all_queues(dev);
  2284. if (mgp->rebooted == 0) {
  2285. old_down_cnt = mgp->down_cnt;
  2286. mb();
  2287. status =
  2288. myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
  2289. if (status)
  2290. netdev_err(dev, "Couldn't bring down link\n");
  2291. wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
  2292. HZ);
  2293. if (old_down_cnt == mgp->down_cnt)
  2294. netdev_err(dev, "never got down irq\n");
  2295. }
  2296. netif_tx_disable(dev);
  2297. myri10ge_free_irq(mgp);
  2298. for (i = 0; i < mgp->num_slices; i++)
  2299. myri10ge_free_rings(&mgp->ss[i]);
  2300. mgp->running = MYRI10GE_ETH_STOPPED;
  2301. return 0;
  2302. }
  2303. /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
  2304. * backwards one at a time and handle ring wraps */
  2305. static inline void
  2306. myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
  2307. struct mcp_kreq_ether_send *src, int cnt)
  2308. {
  2309. int idx, starting_slot;
  2310. starting_slot = tx->req;
  2311. while (cnt > 1) {
  2312. cnt--;
  2313. idx = (starting_slot + cnt) & tx->mask;
  2314. myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
  2315. mb();
  2316. }
  2317. }
  2318. /*
  2319. * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
  2320. * at most 32 bytes at a time, so as to avoid involving the software
  2321. * pio handler in the nic. We re-write the first segment's flags
  2322. * to mark them valid only after writing the entire chain.
  2323. */
  2324. static inline void
  2325. myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
  2326. int cnt)
  2327. {
  2328. int idx, i;
  2329. struct mcp_kreq_ether_send __iomem *dstp, *dst;
  2330. struct mcp_kreq_ether_send *srcp;
  2331. u8 last_flags;
  2332. idx = tx->req & tx->mask;
  2333. last_flags = src->flags;
  2334. src->flags = 0;
  2335. mb();
  2336. dst = dstp = &tx->lanai[idx];
  2337. srcp = src;
  2338. if ((idx + cnt) < tx->mask) {
  2339. for (i = 0; i < (cnt - 1); i += 2) {
  2340. myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
  2341. mb(); /* force write every 32 bytes */
  2342. srcp += 2;
  2343. dstp += 2;
  2344. }
  2345. } else {
  2346. /* submit all but the first request, and ensure
  2347. * that it is submitted below */
  2348. myri10ge_submit_req_backwards(tx, src, cnt);
  2349. i = 0;
  2350. }
  2351. if (i < cnt) {
  2352. /* submit the first request */
  2353. myri10ge_pio_copy(dstp, srcp, sizeof(*src));
  2354. mb(); /* barrier before setting valid flag */
  2355. }
  2356. /* re-write the last 32-bits with the valid flags */
  2357. src->flags = last_flags;
  2358. put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
  2359. tx->req += cnt;
  2360. mb();
  2361. }
  2362. /*
  2363. * Transmit a packet. We need to split the packet so that a single
  2364. * segment does not cross myri10ge->tx_boundary, so this makes segment
  2365. * counting tricky. So rather than try to count segments up front, we
  2366. * just give up if there are too few segments to hold a reasonably
  2367. * fragmented packet currently available. If we run
  2368. * out of segments while preparing a packet for DMA, we just linearize
  2369. * it and try again.
  2370. */
  2371. static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
  2372. struct net_device *dev)
  2373. {
  2374. struct myri10ge_priv *mgp = netdev_priv(dev);
  2375. struct myri10ge_slice_state *ss;
  2376. struct mcp_kreq_ether_send *req;
  2377. struct myri10ge_tx_buf *tx;
  2378. struct skb_frag_struct *frag;
  2379. struct netdev_queue *netdev_queue;
  2380. dma_addr_t bus;
  2381. u32 low;
  2382. __be32 high_swapped;
  2383. unsigned int len;
  2384. int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
  2385. u16 pseudo_hdr_offset, cksum_offset, queue;
  2386. int cum_len, seglen, boundary, rdma_count;
  2387. u8 flags, odd_flag;
  2388. queue = skb_get_queue_mapping(skb);
  2389. ss = &mgp->ss[queue];
  2390. netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
  2391. tx = &ss->tx;
  2392. again:
  2393. req = tx->req_list;
  2394. avail = tx->mask - 1 - (tx->req - tx->done);
  2395. mss = 0;
  2396. max_segments = MXGEFW_MAX_SEND_DESC;
  2397. if (skb_is_gso(skb)) {
  2398. mss = skb_shinfo(skb)->gso_size;
  2399. max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
  2400. }
  2401. if ((unlikely(avail < max_segments))) {
  2402. /* we are out of transmit resources */
  2403. tx->stop_queue++;
  2404. netif_tx_stop_queue(netdev_queue);
  2405. return NETDEV_TX_BUSY;
  2406. }
  2407. /* Setup checksum offloading, if needed */
  2408. cksum_offset = 0;
  2409. pseudo_hdr_offset = 0;
  2410. odd_flag = 0;
  2411. flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
  2412. if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  2413. cksum_offset = skb_checksum_start_offset(skb);
  2414. pseudo_hdr_offset = cksum_offset + skb->csum_offset;
  2415. /* If the headers are excessively large, then we must
  2416. * fall back to a software checksum */
  2417. if (unlikely(!mss && (cksum_offset > 255 ||
  2418. pseudo_hdr_offset > 127))) {
  2419. if (skb_checksum_help(skb))
  2420. goto drop;
  2421. cksum_offset = 0;
  2422. pseudo_hdr_offset = 0;
  2423. } else {
  2424. odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
  2425. flags |= MXGEFW_FLAGS_CKSUM;
  2426. }
  2427. }
  2428. cum_len = 0;
  2429. if (mss) { /* TSO */
  2430. /* this removes any CKSUM flag from before */
  2431. flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
  2432. /* negative cum_len signifies to the
  2433. * send loop that we are still in the
  2434. * header portion of the TSO packet.
  2435. * TSO header can be at most 1KB long */
  2436. cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
  2437. /* for IPv6 TSO, the checksum offset stores the
  2438. * TCP header length, to save the firmware from
  2439. * the need to parse the headers */
  2440. if (skb_is_gso_v6(skb)) {
  2441. cksum_offset = tcp_hdrlen(skb);
  2442. /* Can only handle headers <= max_tso6 long */
  2443. if (unlikely(-cum_len > mgp->max_tso6))
  2444. return myri10ge_sw_tso(skb, dev);
  2445. }
  2446. /* for TSO, pseudo_hdr_offset holds mss.
  2447. * The firmware figures out where to put
  2448. * the checksum by parsing the header. */
  2449. pseudo_hdr_offset = mss;
  2450. } else
  2451. /* Mark small packets, and pad out tiny packets */
  2452. if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
  2453. flags |= MXGEFW_FLAGS_SMALL;
  2454. /* pad frames to at least ETH_ZLEN bytes */
  2455. if (unlikely(skb->len < ETH_ZLEN)) {
  2456. if (skb_padto(skb, ETH_ZLEN)) {
  2457. /* The packet is gone, so we must
  2458. * return 0 */
  2459. ss->stats.tx_dropped += 1;
  2460. return NETDEV_TX_OK;
  2461. }
  2462. /* adjust the len to account for the zero pad
  2463. * so that the nic can know how long it is */
  2464. skb->len = ETH_ZLEN;
  2465. }
  2466. }
  2467. /* map the skb for DMA */
  2468. len = skb_headlen(skb);
  2469. idx = tx->req & tx->mask;
  2470. tx->info[idx].skb = skb;
  2471. bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
  2472. dma_unmap_addr_set(&tx->info[idx], bus, bus);
  2473. dma_unmap_len_set(&tx->info[idx], len, len);
  2474. frag_cnt = skb_shinfo(skb)->nr_frags;
  2475. frag_idx = 0;
  2476. count = 0;
  2477. rdma_count = 0;
  2478. /* "rdma_count" is the number of RDMAs belonging to the
  2479. * current packet BEFORE the current send request. For
  2480. * non-TSO packets, this is equal to "count".
  2481. * For TSO packets, rdma_count needs to be reset
  2482. * to 0 after a segment cut.
  2483. *
  2484. * The rdma_count field of the send request is
  2485. * the number of RDMAs of the packet starting at
  2486. * that request. For TSO send requests with one ore more cuts
  2487. * in the middle, this is the number of RDMAs starting
  2488. * after the last cut in the request. All previous
  2489. * segments before the last cut implicitly have 1 RDMA.
  2490. *
  2491. * Since the number of RDMAs is not known beforehand,
  2492. * it must be filled-in retroactively - after each
  2493. * segmentation cut or at the end of the entire packet.
  2494. */
  2495. while (1) {
  2496. /* Break the SKB or Fragment up into pieces which
  2497. * do not cross mgp->tx_boundary */
  2498. low = MYRI10GE_LOWPART_TO_U32(bus);
  2499. high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
  2500. while (len) {
  2501. u8 flags_next;
  2502. int cum_len_next;
  2503. if (unlikely(count == max_segments))
  2504. goto abort_linearize;
  2505. boundary =
  2506. (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
  2507. seglen = boundary - low;
  2508. if (seglen > len)
  2509. seglen = len;
  2510. flags_next = flags & ~MXGEFW_FLAGS_FIRST;
  2511. cum_len_next = cum_len + seglen;
  2512. if (mss) { /* TSO */
  2513. (req - rdma_count)->rdma_count = rdma_count + 1;
  2514. if (likely(cum_len >= 0)) { /* payload */
  2515. int next_is_first, chop;
  2516. chop = (cum_len_next > mss);
  2517. cum_len_next = cum_len_next % mss;
  2518. next_is_first = (cum_len_next == 0);
  2519. flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
  2520. flags_next |= next_is_first *
  2521. MXGEFW_FLAGS_FIRST;
  2522. rdma_count |= -(chop | next_is_first);
  2523. rdma_count += chop & !next_is_first;
  2524. } else if (likely(cum_len_next >= 0)) { /* header ends */
  2525. int small;
  2526. rdma_count = -1;
  2527. cum_len_next = 0;
  2528. seglen = -cum_len;
  2529. small = (mss <= MXGEFW_SEND_SMALL_SIZE);
  2530. flags_next = MXGEFW_FLAGS_TSO_PLD |
  2531. MXGEFW_FLAGS_FIRST |
  2532. (small * MXGEFW_FLAGS_SMALL);
  2533. }
  2534. }
  2535. req->addr_high = high_swapped;
  2536. req->addr_low = htonl(low);
  2537. req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
  2538. req->pad = 0; /* complete solid 16-byte block; does this matter? */
  2539. req->rdma_count = 1;
  2540. req->length = htons(seglen);
  2541. req->cksum_offset = cksum_offset;
  2542. req->flags = flags | ((cum_len & 1) * odd_flag);
  2543. low += seglen;
  2544. len -= seglen;
  2545. cum_len = cum_len_next;
  2546. flags = flags_next;
  2547. req++;
  2548. count++;
  2549. rdma_count++;
  2550. if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
  2551. if (unlikely(cksum_offset > seglen))
  2552. cksum_offset -= seglen;
  2553. else
  2554. cksum_offset = 0;
  2555. }
  2556. }
  2557. if (frag_idx == frag_cnt)
  2558. break;
  2559. /* map next fragment for DMA */
  2560. idx = (count + tx->req) & tx->mask;
  2561. frag = &skb_shinfo(skb)->frags[frag_idx];
  2562. frag_idx++;
  2563. len = skb_frag_size(frag);
  2564. bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
  2565. DMA_TO_DEVICE);
  2566. dma_unmap_addr_set(&tx->info[idx], bus, bus);
  2567. dma_unmap_len_set(&tx->info[idx], len, len);
  2568. }
  2569. (req - rdma_count)->rdma_count = rdma_count;
  2570. if (mss)
  2571. do {
  2572. req--;
  2573. req->flags |= MXGEFW_FLAGS_TSO_LAST;
  2574. } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
  2575. MXGEFW_FLAGS_FIRST)));
  2576. idx = ((count - 1) + tx->req) & tx->mask;
  2577. tx->info[idx].last = 1;
  2578. myri10ge_submit_req(tx, tx->req_list, count);
  2579. /* if using multiple tx queues, make sure NIC polls the
  2580. * current slice */
  2581. if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
  2582. tx->queue_active = 1;
  2583. put_be32(htonl(1), tx->send_go);
  2584. mb();
  2585. mmiowb();
  2586. }
  2587. tx->pkt_start++;
  2588. if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
  2589. tx->stop_queue++;
  2590. netif_tx_stop_queue(netdev_queue);
  2591. }
  2592. return NETDEV_TX_OK;
  2593. abort_linearize:
  2594. /* Free any DMA resources we've alloced and clear out the skb
  2595. * slot so as to not trip up assertions, and to avoid a
  2596. * double-free if linearizing fails */
  2597. last_idx = (idx + 1) & tx->mask;
  2598. idx = tx->req & tx->mask;
  2599. tx->info[idx].skb = NULL;
  2600. do {
  2601. len = dma_unmap_len(&tx->info[idx], len);
  2602. if (len) {
  2603. if (tx->info[idx].skb != NULL)
  2604. pci_unmap_single(mgp->pdev,
  2605. dma_unmap_addr(&tx->info[idx],
  2606. bus), len,
  2607. PCI_DMA_TODEVICE);
  2608. else
  2609. pci_unmap_page(mgp->pdev,
  2610. dma_unmap_addr(&tx->info[idx],
  2611. bus), len,
  2612. PCI_DMA_TODEVICE);
  2613. dma_unmap_len_set(&tx->info[idx], len, 0);
  2614. tx->info[idx].skb = NULL;
  2615. }
  2616. idx = (idx + 1) & tx->mask;
  2617. } while (idx != last_idx);
  2618. if (skb_is_gso(skb)) {
  2619. netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
  2620. goto drop;
  2621. }
  2622. if (skb_linearize(skb))
  2623. goto drop;
  2624. tx->linearized++;
  2625. goto again;
  2626. drop:
  2627. dev_kfree_skb_any(skb);
  2628. ss->stats.tx_dropped += 1;
  2629. return NETDEV_TX_OK;
  2630. }
  2631. static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
  2632. struct net_device *dev)
  2633. {
  2634. struct sk_buff *segs, *curr;
  2635. struct myri10ge_priv *mgp = netdev_priv(dev);
  2636. struct myri10ge_slice_state *ss;
  2637. netdev_tx_t status;
  2638. segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
  2639. if (IS_ERR(segs))
  2640. goto drop;
  2641. while (segs) {
  2642. curr = segs;
  2643. segs = segs->next;
  2644. curr->next = NULL;
  2645. status = myri10ge_xmit(curr, dev);
  2646. if (status != 0) {
  2647. dev_kfree_skb_any(curr);
  2648. if (segs != NULL) {
  2649. curr = segs;
  2650. segs = segs->next;
  2651. curr->next = NULL;
  2652. dev_kfree_skb_any(segs);
  2653. }
  2654. goto drop;
  2655. }
  2656. }
  2657. dev_kfree_skb_any(skb);
  2658. return NETDEV_TX_OK;
  2659. drop:
  2660. ss = &mgp->ss[skb_get_queue_mapping(skb)];
  2661. dev_kfree_skb_any(skb);
  2662. ss->stats.tx_dropped += 1;
  2663. return NETDEV_TX_OK;
  2664. }
  2665. static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
  2666. struct rtnl_link_stats64 *stats)
  2667. {
  2668. const struct myri10ge_priv *mgp = netdev_priv(dev);
  2669. const struct myri10ge_slice_netstats *slice_stats;
  2670. int i;
  2671. for (i = 0; i < mgp->num_slices; i++) {
  2672. slice_stats = &mgp->ss[i].stats;
  2673. stats->rx_packets += slice_stats->rx_packets;
  2674. stats->tx_packets += slice_stats->tx_packets;
  2675. stats->rx_bytes += slice_stats->rx_bytes;
  2676. stats->tx_bytes += slice_stats->tx_bytes;
  2677. stats->rx_dropped += slice_stats->rx_dropped;
  2678. stats->tx_dropped += slice_stats->tx_dropped;
  2679. }
  2680. return stats;
  2681. }
  2682. static void myri10ge_set_multicast_list(struct net_device *dev)
  2683. {
  2684. struct myri10ge_priv *mgp = netdev_priv(dev);
  2685. struct myri10ge_cmd cmd;
  2686. struct netdev_hw_addr *ha;
  2687. __be32 data[2] = { 0, 0 };
  2688. int err;
  2689. /* can be called from atomic contexts,
  2690. * pass 1 to force atomicity in myri10ge_send_cmd() */
  2691. myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
  2692. /* This firmware is known to not support multicast */
  2693. if (!mgp->fw_multicast_support)
  2694. return;
  2695. /* Disable multicast filtering */
  2696. err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
  2697. if (err != 0) {
  2698. netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
  2699. err);
  2700. goto abort;
  2701. }
  2702. if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
  2703. /* request to disable multicast filtering, so quit here */
  2704. return;
  2705. }
  2706. /* Flush the filters */
  2707. err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
  2708. &cmd, 1);
  2709. if (err != 0) {
  2710. netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
  2711. err);
  2712. goto abort;
  2713. }
  2714. /* Walk the multicast list, and add each address */
  2715. netdev_for_each_mc_addr(ha, dev) {
  2716. memcpy(data, &ha->addr, 6);
  2717. cmd.data0 = ntohl(data[0]);
  2718. cmd.data1 = ntohl(data[1]);
  2719. err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
  2720. &cmd, 1);
  2721. if (err != 0) {
  2722. netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
  2723. err, ha->addr);
  2724. goto abort;
  2725. }
  2726. }
  2727. /* Enable multicast filtering */
  2728. err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
  2729. if (err != 0) {
  2730. netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
  2731. err);
  2732. goto abort;
  2733. }
  2734. return;
  2735. abort:
  2736. return;
  2737. }
  2738. static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
  2739. {
  2740. struct sockaddr *sa = addr;
  2741. struct myri10ge_priv *mgp = netdev_priv(dev);
  2742. int status;
  2743. if (!is_valid_ether_addr(sa->sa_data))
  2744. return -EADDRNOTAVAIL;
  2745. status = myri10ge_update_mac_address(mgp, sa->sa_data);
  2746. if (status != 0) {
  2747. netdev_err(dev, "changing mac address failed with %d\n",
  2748. status);
  2749. return status;
  2750. }
  2751. /* change the dev structure */
  2752. memcpy(dev->dev_addr, sa->sa_data, 6);
  2753. return 0;
  2754. }
  2755. static netdev_features_t myri10ge_fix_features(struct net_device *dev,
  2756. netdev_features_t features)
  2757. {
  2758. if (!(features & NETIF_F_RXCSUM))
  2759. features &= ~NETIF_F_LRO;
  2760. return features;
  2761. }
  2762. static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
  2763. {
  2764. struct myri10ge_priv *mgp = netdev_priv(dev);
  2765. int error = 0;
  2766. if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
  2767. netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
  2768. return -EINVAL;
  2769. }
  2770. netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
  2771. if (mgp->running) {
  2772. /* if we change the mtu on an active device, we must
  2773. * reset the device so the firmware sees the change */
  2774. myri10ge_close(dev);
  2775. dev->mtu = new_mtu;
  2776. myri10ge_open(dev);
  2777. } else
  2778. dev->mtu = new_mtu;
  2779. return error;
  2780. }
  2781. /*
  2782. * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
  2783. * Only do it if the bridge is a root port since we don't want to disturb
  2784. * any other device, except if forced with myri10ge_ecrc_enable > 1.
  2785. */
  2786. static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
  2787. {
  2788. struct pci_dev *bridge = mgp->pdev->bus->self;
  2789. struct device *dev = &mgp->pdev->dev;
  2790. int cap;
  2791. unsigned err_cap;
  2792. u16 val;
  2793. u8 ext_type;
  2794. int ret;
  2795. if (!myri10ge_ecrc_enable || !bridge)
  2796. return;
  2797. /* check that the bridge is a root port */
  2798. cap = pci_pcie_cap(bridge);
  2799. pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
  2800. ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
  2801. if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
  2802. if (myri10ge_ecrc_enable > 1) {
  2803. struct pci_dev *prev_bridge, *old_bridge = bridge;
  2804. /* Walk the hierarchy up to the root port
  2805. * where ECRC has to be enabled */
  2806. do {
  2807. prev_bridge = bridge;
  2808. bridge = bridge->bus->self;
  2809. if (!bridge || prev_bridge == bridge) {
  2810. dev_err(dev,
  2811. "Failed to find root port"
  2812. " to force ECRC\n");
  2813. return;
  2814. }
  2815. cap = pci_pcie_cap(bridge);
  2816. pci_read_config_word(bridge,
  2817. cap + PCI_CAP_FLAGS, &val);
  2818. ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
  2819. } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
  2820. dev_info(dev,
  2821. "Forcing ECRC on non-root port %s"
  2822. " (enabling on root port %s)\n",
  2823. pci_name(old_bridge), pci_name(bridge));
  2824. } else {
  2825. dev_err(dev,
  2826. "Not enabling ECRC on non-root port %s\n",
  2827. pci_name(bridge));
  2828. return;
  2829. }
  2830. }
  2831. cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
  2832. if (!cap)
  2833. return;
  2834. ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
  2835. if (ret) {
  2836. dev_err(dev, "failed reading ext-conf-space of %s\n",
  2837. pci_name(bridge));
  2838. dev_err(dev, "\t pci=nommconf in use? "
  2839. "or buggy/incomplete/absent ACPI MCFG attr?\n");
  2840. return;
  2841. }
  2842. if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
  2843. return;
  2844. err_cap |= PCI_ERR_CAP_ECRC_GENE;
  2845. pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
  2846. dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
  2847. }
  2848. /*
  2849. * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
  2850. * when the PCI-E Completion packets are aligned on an 8-byte
  2851. * boundary. Some PCI-E chip sets always align Completion packets; on
  2852. * the ones that do not, the alignment can be enforced by enabling
  2853. * ECRC generation (if supported).
  2854. *
  2855. * When PCI-E Completion packets are not aligned, it is actually more
  2856. * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
  2857. *
  2858. * If the driver can neither enable ECRC nor verify that it has
  2859. * already been enabled, then it must use a firmware image which works
  2860. * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
  2861. * should also ensure that it never gives the device a Read-DMA which is
  2862. * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
  2863. * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
  2864. * firmware image, and set tx_boundary to 4KB.
  2865. */
  2866. static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
  2867. {
  2868. struct pci_dev *pdev = mgp->pdev;
  2869. struct device *dev = &pdev->dev;
  2870. int status;
  2871. mgp->tx_boundary = 4096;
  2872. /*
  2873. * Verify the max read request size was set to 4KB
  2874. * before trying the test with 4KB.
  2875. */
  2876. status = pcie_get_readrq(pdev);
  2877. if (status < 0) {
  2878. dev_err(dev, "Couldn't read max read req size: %d\n", status);
  2879. goto abort;
  2880. }
  2881. if (status != 4096) {
  2882. dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
  2883. mgp->tx_boundary = 2048;
  2884. }
  2885. /*
  2886. * load the optimized firmware (which assumes aligned PCIe
  2887. * completions) in order to see if it works on this host.
  2888. */
  2889. set_fw_name(mgp, myri10ge_fw_aligned, false);
  2890. status = myri10ge_load_firmware(mgp, 1);
  2891. if (status != 0) {
  2892. goto abort;
  2893. }
  2894. /*
  2895. * Enable ECRC if possible
  2896. */
  2897. myri10ge_enable_ecrc(mgp);
  2898. /*
  2899. * Run a DMA test which watches for unaligned completions and
  2900. * aborts on the first one seen.
  2901. */
  2902. status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
  2903. if (status == 0)
  2904. return; /* keep the aligned firmware */
  2905. if (status != -E2BIG)
  2906. dev_warn(dev, "DMA test failed: %d\n", status);
  2907. if (status == -ENOSYS)
  2908. dev_warn(dev, "Falling back to ethp! "
  2909. "Please install up to date fw\n");
  2910. abort:
  2911. /* fall back to using the unaligned firmware */
  2912. mgp->tx_boundary = 2048;
  2913. set_fw_name(mgp, myri10ge_fw_unaligned, false);
  2914. }
  2915. static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
  2916. {
  2917. int overridden = 0;
  2918. if (myri10ge_force_firmware == 0) {
  2919. int link_width, exp_cap;
  2920. u16 lnk;
  2921. exp_cap = pci_pcie_cap(mgp->pdev);
  2922. pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
  2923. link_width = (lnk >> 4) & 0x3f;
  2924. /* Check to see if Link is less than 8 or if the
  2925. * upstream bridge is known to provide aligned
  2926. * completions */
  2927. if (link_width < 8) {
  2928. dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
  2929. link_width);
  2930. mgp->tx_boundary = 4096;
  2931. set_fw_name(mgp, myri10ge_fw_aligned, false);
  2932. } else {
  2933. myri10ge_firmware_probe(mgp);
  2934. }
  2935. } else {
  2936. if (myri10ge_force_firmware == 1) {
  2937. dev_info(&mgp->pdev->dev,
  2938. "Assuming aligned completions (forced)\n");
  2939. mgp->tx_boundary = 4096;
  2940. set_fw_name(mgp, myri10ge_fw_aligned, false);
  2941. } else {
  2942. dev_info(&mgp->pdev->dev,
  2943. "Assuming unaligned completions (forced)\n");
  2944. mgp->tx_boundary = 2048;
  2945. set_fw_name(mgp, myri10ge_fw_unaligned, false);
  2946. }
  2947. }
  2948. kparam_block_sysfs_write(myri10ge_fw_name);
  2949. if (myri10ge_fw_name != NULL) {
  2950. char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
  2951. if (fw_name) {
  2952. overridden = 1;
  2953. set_fw_name(mgp, fw_name, true);
  2954. }
  2955. }
  2956. kparam_unblock_sysfs_write(myri10ge_fw_name);
  2957. if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
  2958. myri10ge_fw_names[mgp->board_number] != NULL &&
  2959. strlen(myri10ge_fw_names[mgp->board_number])) {
  2960. set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
  2961. overridden = 1;
  2962. }
  2963. if (overridden)
  2964. dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
  2965. mgp->fw_name);
  2966. }
  2967. static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
  2968. {
  2969. struct pci_dev *bridge = pdev->bus->self;
  2970. int cap;
  2971. u32 mask;
  2972. if (bridge == NULL)
  2973. return;
  2974. cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
  2975. if (cap) {
  2976. /* a sram parity error can cause a surprise link
  2977. * down; since we expect and can recover from sram
  2978. * parity errors, mask surprise link down events */
  2979. pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
  2980. mask |= 0x20;
  2981. pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
  2982. }
  2983. }
  2984. #ifdef CONFIG_PM
  2985. static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
  2986. {
  2987. struct myri10ge_priv *mgp;
  2988. struct net_device *netdev;
  2989. mgp = pci_get_drvdata(pdev);
  2990. if (mgp == NULL)
  2991. return -EINVAL;
  2992. netdev = mgp->dev;
  2993. netif_device_detach(netdev);
  2994. if (netif_running(netdev)) {
  2995. netdev_info(netdev, "closing\n");
  2996. rtnl_lock();
  2997. myri10ge_close(netdev);
  2998. rtnl_unlock();
  2999. }
  3000. myri10ge_dummy_rdma(mgp, 0);
  3001. pci_save_state(pdev);
  3002. pci_disable_device(pdev);
  3003. return pci_set_power_state(pdev, pci_choose_state(pdev, state));
  3004. }
  3005. static int myri10ge_resume(struct pci_dev *pdev)
  3006. {
  3007. struct myri10ge_priv *mgp;
  3008. struct net_device *netdev;
  3009. int status;
  3010. u16 vendor;
  3011. mgp = pci_get_drvdata(pdev);
  3012. if (mgp == NULL)
  3013. return -EINVAL;
  3014. netdev = mgp->dev;
  3015. pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */
  3016. msleep(5); /* give card time to respond */
  3017. pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
  3018. if (vendor == 0xffff) {
  3019. netdev_err(mgp->dev, "device disappeared!\n");
  3020. return -EIO;
  3021. }
  3022. pci_restore_state(pdev);
  3023. status = pci_enable_device(pdev);
  3024. if (status) {
  3025. dev_err(&pdev->dev, "failed to enable device\n");
  3026. return status;
  3027. }
  3028. pci_set_master(pdev);
  3029. myri10ge_reset(mgp);
  3030. myri10ge_dummy_rdma(mgp, 1);
  3031. /* Save configuration space to be restored if the
  3032. * nic resets due to a parity error */
  3033. pci_save_state(pdev);
  3034. if (netif_running(netdev)) {
  3035. rtnl_lock();
  3036. status = myri10ge_open(netdev);
  3037. rtnl_unlock();
  3038. if (status != 0)
  3039. goto abort_with_enabled;
  3040. }
  3041. netif_device_attach(netdev);
  3042. return 0;
  3043. abort_with_enabled:
  3044. pci_disable_device(pdev);
  3045. return -EIO;
  3046. }
  3047. #endif /* CONFIG_PM */
  3048. static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
  3049. {
  3050. struct pci_dev *pdev = mgp->pdev;
  3051. int vs = mgp->vendor_specific_offset;
  3052. u32 reboot;
  3053. /*enter read32 mode */
  3054. pci_write_config_byte(pdev, vs + 0x10, 0x3);
  3055. /*read REBOOT_STATUS (0xfffffff0) */
  3056. pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
  3057. pci_read_config_dword(pdev, vs + 0x14, &reboot);
  3058. return reboot;
  3059. }
  3060. static void
  3061. myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
  3062. int *busy_slice_cnt, u32 rx_pause_cnt)
  3063. {
  3064. struct myri10ge_priv *mgp = ss->mgp;
  3065. int slice = ss - mgp->ss;
  3066. if (ss->tx.req != ss->tx.done &&
  3067. ss->tx.done == ss->watchdog_tx_done &&
  3068. ss->watchdog_tx_req != ss->watchdog_tx_done) {
  3069. /* nic seems like it might be stuck.. */
  3070. if (rx_pause_cnt != mgp->watchdog_pause) {
  3071. if (net_ratelimit())
  3072. netdev_warn(mgp->dev, "slice %d: TX paused, "
  3073. "check link partner\n", slice);
  3074. } else {
  3075. netdev_warn(mgp->dev,
  3076. "slice %d: TX stuck %d %d %d %d %d %d\n",
  3077. slice, ss->tx.queue_active, ss->tx.req,
  3078. ss->tx.done, ss->tx.pkt_start,
  3079. ss->tx.pkt_done,
  3080. (int)ntohl(mgp->ss[slice].fw_stats->
  3081. send_done_count));
  3082. *reset_needed = 1;
  3083. ss->stuck = 1;
  3084. }
  3085. }
  3086. if (ss->watchdog_tx_done != ss->tx.done ||
  3087. ss->watchdog_rx_done != ss->rx_done.cnt) {
  3088. *busy_slice_cnt += 1;
  3089. }
  3090. ss->watchdog_tx_done = ss->tx.done;
  3091. ss->watchdog_tx_req = ss->tx.req;
  3092. ss->watchdog_rx_done = ss->rx_done.cnt;
  3093. }
  3094. /*
  3095. * This watchdog is used to check whether the board has suffered
  3096. * from a parity error and needs to be recovered.
  3097. */
  3098. static void myri10ge_watchdog(struct work_struct *work)
  3099. {
  3100. struct myri10ge_priv *mgp =
  3101. container_of(work, struct myri10ge_priv, watchdog_work);
  3102. struct myri10ge_slice_state *ss;
  3103. u32 reboot, rx_pause_cnt;
  3104. int status, rebooted;
  3105. int i;
  3106. int reset_needed = 0;
  3107. int busy_slice_cnt = 0;
  3108. u16 cmd, vendor;
  3109. mgp->watchdog_resets++;
  3110. pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
  3111. rebooted = 0;
  3112. if ((cmd & PCI_COMMAND_MASTER) == 0) {
  3113. /* Bus master DMA disabled? Check to see
  3114. * if the card rebooted due to a parity error
  3115. * For now, just report it */
  3116. reboot = myri10ge_read_reboot(mgp);
  3117. netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
  3118. reboot, myri10ge_reset_recover ? "" : " not");
  3119. if (myri10ge_reset_recover == 0)
  3120. return;
  3121. rtnl_lock();
  3122. mgp->rebooted = 1;
  3123. rebooted = 1;
  3124. myri10ge_close(mgp->dev);
  3125. myri10ge_reset_recover--;
  3126. mgp->rebooted = 0;
  3127. /*
  3128. * A rebooted nic will come back with config space as
  3129. * it was after power was applied to PCIe bus.
  3130. * Attempt to restore config space which was saved
  3131. * when the driver was loaded, or the last time the
  3132. * nic was resumed from power saving mode.
  3133. */
  3134. pci_restore_state(mgp->pdev);
  3135. /* save state again for accounting reasons */
  3136. pci_save_state(mgp->pdev);
  3137. } else {
  3138. /* if we get back -1's from our slot, perhaps somebody
  3139. * powered off our card. Don't try to reset it in
  3140. * this case */
  3141. if (cmd == 0xffff) {
  3142. pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
  3143. if (vendor == 0xffff) {
  3144. netdev_err(mgp->dev, "device disappeared!\n");
  3145. return;
  3146. }
  3147. }
  3148. /* Perhaps it is a software error. See if stuck slice
  3149. * has recovered, reset if not */
  3150. rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
  3151. for (i = 0; i < mgp->num_slices; i++) {
  3152. ss = mgp->ss;
  3153. if (ss->stuck) {
  3154. myri10ge_check_slice(ss, &reset_needed,
  3155. &busy_slice_cnt,
  3156. rx_pause_cnt);
  3157. ss->stuck = 0;
  3158. }
  3159. }
  3160. if (!reset_needed) {
  3161. netdev_dbg(mgp->dev, "not resetting\n");
  3162. return;
  3163. }
  3164. netdev_err(mgp->dev, "device timeout, resetting\n");
  3165. }
  3166. if (!rebooted) {
  3167. rtnl_lock();
  3168. myri10ge_close(mgp->dev);
  3169. }
  3170. status = myri10ge_load_firmware(mgp, 1);
  3171. if (status != 0)
  3172. netdev_err(mgp->dev, "failed to load firmware\n");
  3173. else
  3174. myri10ge_open(mgp->dev);
  3175. rtnl_unlock();
  3176. }
  3177. /*
  3178. * We use our own timer routine rather than relying upon
  3179. * netdev->tx_timeout because we have a very large hardware transmit
  3180. * queue. Due to the large queue, the netdev->tx_timeout function
  3181. * cannot detect a NIC with a parity error in a timely fashion if the
  3182. * NIC is lightly loaded.
  3183. */
  3184. static void myri10ge_watchdog_timer(unsigned long arg)
  3185. {
  3186. struct myri10ge_priv *mgp;
  3187. struct myri10ge_slice_state *ss;
  3188. int i, reset_needed, busy_slice_cnt;
  3189. u32 rx_pause_cnt;
  3190. u16 cmd;
  3191. mgp = (struct myri10ge_priv *)arg;
  3192. rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
  3193. busy_slice_cnt = 0;
  3194. for (i = 0, reset_needed = 0;
  3195. i < mgp->num_slices && reset_needed == 0; ++i) {
  3196. ss = &mgp->ss[i];
  3197. if (ss->rx_small.watchdog_needed) {
  3198. myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
  3199. mgp->small_bytes + MXGEFW_PAD,
  3200. 1);
  3201. if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
  3202. myri10ge_fill_thresh)
  3203. ss->rx_small.watchdog_needed = 0;
  3204. }
  3205. if (ss->rx_big.watchdog_needed) {
  3206. myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
  3207. mgp->big_bytes, 1);
  3208. if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
  3209. myri10ge_fill_thresh)
  3210. ss->rx_big.watchdog_needed = 0;
  3211. }
  3212. myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
  3213. rx_pause_cnt);
  3214. }
  3215. /* if we've sent or received no traffic, poll the NIC to
  3216. * ensure it is still there. Otherwise, we risk not noticing
  3217. * an error in a timely fashion */
  3218. if (busy_slice_cnt == 0) {
  3219. pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
  3220. if ((cmd & PCI_COMMAND_MASTER) == 0) {
  3221. reset_needed = 1;
  3222. }
  3223. }
  3224. mgp->watchdog_pause = rx_pause_cnt;
  3225. if (reset_needed) {
  3226. schedule_work(&mgp->watchdog_work);
  3227. } else {
  3228. /* rearm timer */
  3229. mod_timer(&mgp->watchdog_timer,
  3230. jiffies + myri10ge_watchdog_timeout * HZ);
  3231. }
  3232. }
  3233. static void myri10ge_free_slices(struct myri10ge_priv *mgp)
  3234. {
  3235. struct myri10ge_slice_state *ss;
  3236. struct pci_dev *pdev = mgp->pdev;
  3237. size_t bytes;
  3238. int i;
  3239. if (mgp->ss == NULL)
  3240. return;
  3241. for (i = 0; i < mgp->num_slices; i++) {
  3242. ss = &mgp->ss[i];
  3243. if (ss->rx_done.entry != NULL) {
  3244. bytes = mgp->max_intr_slots *
  3245. sizeof(*ss->rx_done.entry);
  3246. dma_free_coherent(&pdev->dev, bytes,
  3247. ss->rx_done.entry, ss->rx_done.bus);
  3248. ss->rx_done.entry = NULL;
  3249. }
  3250. if (ss->fw_stats != NULL) {
  3251. bytes = sizeof(*ss->fw_stats);
  3252. dma_free_coherent(&pdev->dev, bytes,
  3253. ss->fw_stats, ss->fw_stats_bus);
  3254. ss->fw_stats = NULL;
  3255. }
  3256. netif_napi_del(&ss->napi);
  3257. }
  3258. kfree(mgp->ss);
  3259. mgp->ss = NULL;
  3260. }
  3261. static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
  3262. {
  3263. struct myri10ge_slice_state *ss;
  3264. struct pci_dev *pdev = mgp->pdev;
  3265. size_t bytes;
  3266. int i;
  3267. bytes = sizeof(*mgp->ss) * mgp->num_slices;
  3268. mgp->ss = kzalloc(bytes, GFP_KERNEL);
  3269. if (mgp->ss == NULL) {
  3270. return -ENOMEM;
  3271. }
  3272. for (i = 0; i < mgp->num_slices; i++) {
  3273. ss = &mgp->ss[i];
  3274. bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
  3275. ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
  3276. &ss->rx_done.bus,
  3277. GFP_KERNEL);
  3278. if (ss->rx_done.entry == NULL)
  3279. goto abort;
  3280. memset(ss->rx_done.entry, 0, bytes);
  3281. bytes = sizeof(*ss->fw_stats);
  3282. ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
  3283. &ss->fw_stats_bus,
  3284. GFP_KERNEL);
  3285. if (ss->fw_stats == NULL)
  3286. goto abort;
  3287. ss->mgp = mgp;
  3288. ss->dev = mgp->dev;
  3289. netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
  3290. myri10ge_napi_weight);
  3291. }
  3292. return 0;
  3293. abort:
  3294. myri10ge_free_slices(mgp);
  3295. return -ENOMEM;
  3296. }
  3297. /*
  3298. * This function determines the number of slices supported.
  3299. * The number slices is the minimum of the number of CPUS,
  3300. * the number of MSI-X irqs supported, the number of slices
  3301. * supported by the firmware
  3302. */
  3303. static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
  3304. {
  3305. struct myri10ge_cmd cmd;
  3306. struct pci_dev *pdev = mgp->pdev;
  3307. char *old_fw;
  3308. bool old_allocated;
  3309. int i, status, ncpus, msix_cap;
  3310. mgp->num_slices = 1;
  3311. msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
  3312. ncpus = num_online_cpus();
  3313. if (myri10ge_max_slices == 1 || msix_cap == 0 ||
  3314. (myri10ge_max_slices == -1 && ncpus < 2))
  3315. return;
  3316. /* try to load the slice aware rss firmware */
  3317. old_fw = mgp->fw_name;
  3318. old_allocated = mgp->fw_name_allocated;
  3319. /* don't free old_fw if we override it. */
  3320. mgp->fw_name_allocated = false;
  3321. if (myri10ge_fw_name != NULL) {
  3322. dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
  3323. myri10ge_fw_name);
  3324. set_fw_name(mgp, myri10ge_fw_name, false);
  3325. } else if (old_fw == myri10ge_fw_aligned)
  3326. set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
  3327. else
  3328. set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
  3329. status = myri10ge_load_firmware(mgp, 0);
  3330. if (status != 0) {
  3331. dev_info(&pdev->dev, "Rss firmware not found\n");
  3332. if (old_allocated)
  3333. kfree(old_fw);
  3334. return;
  3335. }
  3336. /* hit the board with a reset to ensure it is alive */
  3337. memset(&cmd, 0, sizeof(cmd));
  3338. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
  3339. if (status != 0) {
  3340. dev_err(&mgp->pdev->dev, "failed reset\n");
  3341. goto abort_with_fw;
  3342. }
  3343. mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
  3344. /* tell it the size of the interrupt queues */
  3345. cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
  3346. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
  3347. if (status != 0) {
  3348. dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
  3349. goto abort_with_fw;
  3350. }
  3351. /* ask the maximum number of slices it supports */
  3352. status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
  3353. if (status != 0)
  3354. goto abort_with_fw;
  3355. else
  3356. mgp->num_slices = cmd.data0;
  3357. /* Only allow multiple slices if MSI-X is usable */
  3358. if (!myri10ge_msi) {
  3359. goto abort_with_fw;
  3360. }
  3361. /* if the admin did not specify a limit to how many
  3362. * slices we should use, cap it automatically to the
  3363. * number of CPUs currently online */
  3364. if (myri10ge_max_slices == -1)
  3365. myri10ge_max_slices = ncpus;
  3366. if (mgp->num_slices > myri10ge_max_slices)
  3367. mgp->num_slices = myri10ge_max_slices;
  3368. /* Now try to allocate as many MSI-X vectors as we have
  3369. * slices. We give up on MSI-X if we can only get a single
  3370. * vector. */
  3371. mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
  3372. GFP_KERNEL);
  3373. if (mgp->msix_vectors == NULL)
  3374. goto disable_msix;
  3375. for (i = 0; i < mgp->num_slices; i++) {
  3376. mgp->msix_vectors[i].entry = i;
  3377. }
  3378. while (mgp->num_slices > 1) {
  3379. /* make sure it is a power of two */
  3380. while (!is_power_of_2(mgp->num_slices))
  3381. mgp->num_slices--;
  3382. if (mgp->num_slices == 1)
  3383. goto disable_msix;
  3384. status = pci_enable_msix(pdev, mgp->msix_vectors,
  3385. mgp->num_slices);
  3386. if (status == 0) {
  3387. pci_disable_msix(pdev);
  3388. if (old_allocated)
  3389. kfree(old_fw);
  3390. return;
  3391. }
  3392. if (status > 0)
  3393. mgp->num_slices = status;
  3394. else
  3395. goto disable_msix;
  3396. }
  3397. disable_msix:
  3398. if (mgp->msix_vectors != NULL) {
  3399. kfree(mgp->msix_vectors);
  3400. mgp->msix_vectors = NULL;
  3401. }
  3402. abort_with_fw:
  3403. mgp->num_slices = 1;
  3404. set_fw_name(mgp, old_fw, old_allocated);
  3405. myri10ge_load_firmware(mgp, 0);
  3406. }
  3407. static const struct net_device_ops myri10ge_netdev_ops = {
  3408. .ndo_open = myri10ge_open,
  3409. .ndo_stop = myri10ge_close,
  3410. .ndo_start_xmit = myri10ge_xmit,
  3411. .ndo_get_stats64 = myri10ge_get_stats,
  3412. .ndo_validate_addr = eth_validate_addr,
  3413. .ndo_change_mtu = myri10ge_change_mtu,
  3414. .ndo_fix_features = myri10ge_fix_features,
  3415. .ndo_set_rx_mode = myri10ge_set_multicast_list,
  3416. .ndo_set_mac_address = myri10ge_set_mac_address,
  3417. };
  3418. static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  3419. {
  3420. struct net_device *netdev;
  3421. struct myri10ge_priv *mgp;
  3422. struct device *dev = &pdev->dev;
  3423. int i;
  3424. int status = -ENXIO;
  3425. int dac_enabled;
  3426. unsigned hdr_offset, ss_offset;
  3427. static int board_number;
  3428. netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
  3429. if (netdev == NULL)
  3430. return -ENOMEM;
  3431. SET_NETDEV_DEV(netdev, &pdev->dev);
  3432. mgp = netdev_priv(netdev);
  3433. mgp->dev = netdev;
  3434. mgp->pdev = pdev;
  3435. mgp->pause = myri10ge_flow_control;
  3436. mgp->intr_coal_delay = myri10ge_intr_coal_delay;
  3437. mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
  3438. mgp->board_number = board_number;
  3439. init_waitqueue_head(&mgp->down_wq);
  3440. if (pci_enable_device(pdev)) {
  3441. dev_err(&pdev->dev, "pci_enable_device call failed\n");
  3442. status = -ENODEV;
  3443. goto abort_with_netdev;
  3444. }
  3445. /* Find the vendor-specific cap so we can check
  3446. * the reboot register later on */
  3447. mgp->vendor_specific_offset
  3448. = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
  3449. /* Set our max read request to 4KB */
  3450. status = pcie_set_readrq(pdev, 4096);
  3451. if (status != 0) {
  3452. dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
  3453. status);
  3454. goto abort_with_enabled;
  3455. }
  3456. myri10ge_mask_surprise_down(pdev);
  3457. pci_set_master(pdev);
  3458. dac_enabled = 1;
  3459. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  3460. if (status != 0) {
  3461. dac_enabled = 0;
  3462. dev_err(&pdev->dev,
  3463. "64-bit pci address mask was refused, "
  3464. "trying 32-bit\n");
  3465. status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  3466. }
  3467. if (status != 0) {
  3468. dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
  3469. goto abort_with_enabled;
  3470. }
  3471. (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  3472. mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
  3473. &mgp->cmd_bus, GFP_KERNEL);
  3474. if (mgp->cmd == NULL)
  3475. goto abort_with_enabled;
  3476. mgp->board_span = pci_resource_len(pdev, 0);
  3477. mgp->iomem_base = pci_resource_start(pdev, 0);
  3478. mgp->mtrr = -1;
  3479. mgp->wc_enabled = 0;
  3480. #ifdef CONFIG_MTRR
  3481. mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
  3482. MTRR_TYPE_WRCOMB, 1);
  3483. if (mgp->mtrr >= 0)
  3484. mgp->wc_enabled = 1;
  3485. #endif
  3486. mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
  3487. if (mgp->sram == NULL) {
  3488. dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
  3489. mgp->board_span, mgp->iomem_base);
  3490. status = -ENXIO;
  3491. goto abort_with_mtrr;
  3492. }
  3493. hdr_offset =
  3494. ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
  3495. ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
  3496. mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset));
  3497. if (mgp->sram_size > mgp->board_span ||
  3498. mgp->sram_size <= MYRI10GE_FW_OFFSET) {
  3499. dev_err(&pdev->dev,
  3500. "invalid sram_size %dB or board span %ldB\n",
  3501. mgp->sram_size, mgp->board_span);
  3502. goto abort_with_ioremap;
  3503. }
  3504. memcpy_fromio(mgp->eeprom_strings,
  3505. mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
  3506. memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
  3507. status = myri10ge_read_mac_addr(mgp);
  3508. if (status)
  3509. goto abort_with_ioremap;
  3510. for (i = 0; i < ETH_ALEN; i++)
  3511. netdev->dev_addr[i] = mgp->mac_addr[i];
  3512. myri10ge_select_firmware(mgp);
  3513. status = myri10ge_load_firmware(mgp, 1);
  3514. if (status != 0) {
  3515. dev_err(&pdev->dev, "failed to load firmware\n");
  3516. goto abort_with_ioremap;
  3517. }
  3518. myri10ge_probe_slices(mgp);
  3519. status = myri10ge_alloc_slices(mgp);
  3520. if (status != 0) {
  3521. dev_err(&pdev->dev, "failed to alloc slice state\n");
  3522. goto abort_with_firmware;
  3523. }
  3524. netif_set_real_num_tx_queues(netdev, mgp->num_slices);
  3525. netif_set_real_num_rx_queues(netdev, mgp->num_slices);
  3526. status = myri10ge_reset(mgp);
  3527. if (status != 0) {
  3528. dev_err(&pdev->dev, "failed reset\n");
  3529. goto abort_with_slices;
  3530. }
  3531. #ifdef CONFIG_MYRI10GE_DCA
  3532. myri10ge_setup_dca(mgp);
  3533. #endif
  3534. pci_set_drvdata(pdev, mgp);
  3535. if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
  3536. myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
  3537. if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
  3538. myri10ge_initial_mtu = 68;
  3539. netdev->netdev_ops = &myri10ge_netdev_ops;
  3540. netdev->mtu = myri10ge_initial_mtu;
  3541. netdev->base_addr = mgp->iomem_base;
  3542. netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
  3543. netdev->features = netdev->hw_features;
  3544. if (dac_enabled)
  3545. netdev->features |= NETIF_F_HIGHDMA;
  3546. netdev->vlan_features |= mgp->features;
  3547. if (mgp->fw_ver_tiny < 37)
  3548. netdev->vlan_features &= ~NETIF_F_TSO6;
  3549. if (mgp->fw_ver_tiny < 32)
  3550. netdev->vlan_features &= ~NETIF_F_TSO;
  3551. /* make sure we can get an irq, and that MSI can be
  3552. * setup (if available). Also ensure netdev->irq
  3553. * is set to correct value if MSI is enabled */
  3554. status = myri10ge_request_irq(mgp);
  3555. if (status != 0)
  3556. goto abort_with_firmware;
  3557. netdev->irq = pdev->irq;
  3558. myri10ge_free_irq(mgp);
  3559. /* Save configuration space to be restored if the
  3560. * nic resets due to a parity error */
  3561. pci_save_state(pdev);
  3562. /* Setup the watchdog timer */
  3563. setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
  3564. (unsigned long)mgp);
  3565. SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
  3566. INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
  3567. status = register_netdev(netdev);
  3568. if (status != 0) {
  3569. dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
  3570. goto abort_with_state;
  3571. }
  3572. if (mgp->msix_enabled)
  3573. dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
  3574. mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
  3575. (mgp->wc_enabled ? "Enabled" : "Disabled"));
  3576. else
  3577. dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
  3578. mgp->msi_enabled ? "MSI" : "xPIC",
  3579. netdev->irq, mgp->tx_boundary, mgp->fw_name,
  3580. (mgp->wc_enabled ? "Enabled" : "Disabled"));
  3581. board_number++;
  3582. return 0;
  3583. abort_with_state:
  3584. pci_restore_state(pdev);
  3585. abort_with_slices:
  3586. myri10ge_free_slices(mgp);
  3587. abort_with_firmware:
  3588. myri10ge_dummy_rdma(mgp, 0);
  3589. abort_with_ioremap:
  3590. if (mgp->mac_addr_string != NULL)
  3591. dev_err(&pdev->dev,
  3592. "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
  3593. mgp->mac_addr_string, mgp->serial_number);
  3594. iounmap(mgp->sram);
  3595. abort_with_mtrr:
  3596. #ifdef CONFIG_MTRR
  3597. if (mgp->mtrr >= 0)
  3598. mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
  3599. #endif
  3600. dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
  3601. mgp->cmd, mgp->cmd_bus);
  3602. abort_with_enabled:
  3603. pci_disable_device(pdev);
  3604. abort_with_netdev:
  3605. set_fw_name(mgp, NULL, false);
  3606. free_netdev(netdev);
  3607. return status;
  3608. }
  3609. /*
  3610. * myri10ge_remove
  3611. *
  3612. * Does what is necessary to shutdown one Myrinet device. Called
  3613. * once for each Myrinet card by the kernel when a module is
  3614. * unloaded.
  3615. */
  3616. static void myri10ge_remove(struct pci_dev *pdev)
  3617. {
  3618. struct myri10ge_priv *mgp;
  3619. struct net_device *netdev;
  3620. mgp = pci_get_drvdata(pdev);
  3621. if (mgp == NULL)
  3622. return;
  3623. cancel_work_sync(&mgp->watchdog_work);
  3624. netdev = mgp->dev;
  3625. unregister_netdev(netdev);
  3626. #ifdef CONFIG_MYRI10GE_DCA
  3627. myri10ge_teardown_dca(mgp);
  3628. #endif
  3629. myri10ge_dummy_rdma(mgp, 0);
  3630. /* avoid a memory leak */
  3631. pci_restore_state(pdev);
  3632. iounmap(mgp->sram);
  3633. #ifdef CONFIG_MTRR
  3634. if (mgp->mtrr >= 0)
  3635. mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
  3636. #endif
  3637. myri10ge_free_slices(mgp);
  3638. if (mgp->msix_vectors != NULL)
  3639. kfree(mgp->msix_vectors);
  3640. dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
  3641. mgp->cmd, mgp->cmd_bus);
  3642. set_fw_name(mgp, NULL, false);
  3643. free_netdev(netdev);
  3644. pci_disable_device(pdev);
  3645. pci_set_drvdata(pdev, NULL);
  3646. }
  3647. #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
  3648. #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
  3649. static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
  3650. {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
  3651. {PCI_DEVICE
  3652. (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
  3653. {0},
  3654. };
  3655. MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
  3656. static struct pci_driver myri10ge_driver = {
  3657. .name = "myri10ge",
  3658. .probe = myri10ge_probe,
  3659. .remove = myri10ge_remove,
  3660. .id_table = myri10ge_pci_tbl,
  3661. #ifdef CONFIG_PM
  3662. .suspend = myri10ge_suspend,
  3663. .resume = myri10ge_resume,
  3664. #endif
  3665. };
  3666. #ifdef CONFIG_MYRI10GE_DCA
  3667. static int
  3668. myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
  3669. {
  3670. int err = driver_for_each_device(&myri10ge_driver.driver,
  3671. NULL, &event,
  3672. myri10ge_notify_dca_device);
  3673. if (err)
  3674. return NOTIFY_BAD;
  3675. return NOTIFY_DONE;
  3676. }
  3677. static struct notifier_block myri10ge_dca_notifier = {
  3678. .notifier_call = myri10ge_notify_dca,
  3679. .next = NULL,
  3680. .priority = 0,
  3681. };
  3682. #endif /* CONFIG_MYRI10GE_DCA */
  3683. static __init int myri10ge_init_module(void)
  3684. {
  3685. pr_info("Version %s\n", MYRI10GE_VERSION_STR);
  3686. if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
  3687. pr_err("Illegal rssh hash type %d, defaulting to source port\n",
  3688. myri10ge_rss_hash);
  3689. myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
  3690. }
  3691. #ifdef CONFIG_MYRI10GE_DCA
  3692. dca_register_notify(&myri10ge_dca_notifier);
  3693. #endif
  3694. if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
  3695. myri10ge_max_slices = MYRI10GE_MAX_SLICES;
  3696. return pci_register_driver(&myri10ge_driver);
  3697. }
  3698. module_init(myri10ge_init_module);
  3699. static __exit void myri10ge_cleanup_module(void)
  3700. {
  3701. #ifdef CONFIG_MYRI10GE_DCA
  3702. dca_unregister_notify(&myri10ge_dca_notifier);
  3703. #endif
  3704. pci_unregister_driver(&myri10ge_driver);
  3705. }
  3706. module_exit(myri10ge_cleanup_module);