lio_main.c 120 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/pci.h>
  19. #include <linux/firmware.h>
  20. #include <net/vxlan.h>
  21. #include <linux/kthread.h>
  22. #include "liquidio_common.h"
  23. #include "octeon_droq.h"
  24. #include "octeon_iq.h"
  25. #include "response_manager.h"
  26. #include "octeon_device.h"
  27. #include "octeon_nic.h"
  28. #include "octeon_main.h"
  29. #include "octeon_network.h"
  30. #include "cn66xx_regs.h"
  31. #include "cn66xx_device.h"
  32. #include "cn68xx_device.h"
  33. #include "cn23xx_pf_device.h"
  34. #include "liquidio_image.h"
  35. MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  36. MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
  37. MODULE_LICENSE("GPL");
  38. MODULE_VERSION(LIQUIDIO_VERSION);
  39. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
  40. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
  41. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
  42. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX);
  43. static int ddr_timeout = 10000;
  44. module_param(ddr_timeout, int, 0644);
  45. MODULE_PARM_DESC(ddr_timeout,
  46. "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
  47. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  48. static int debug = -1;
  49. module_param(debug, int, 0644);
  50. MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  51. static char fw_type[LIO_MAX_FW_TYPE_LEN];
  52. module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
  53. MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
  54. static int ptp_enable = 1;
  55. /* Bit mask values for lio->ifstate */
  56. #define LIO_IFSTATE_DROQ_OPS 0x01
  57. #define LIO_IFSTATE_REGISTERED 0x02
  58. #define LIO_IFSTATE_RUNNING 0x04
  59. #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
  60. /* Polling interval for determining when NIC application is alive */
  61. #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
  62. /* runtime link query interval */
  63. #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
  64. struct liquidio_if_cfg_context {
  65. int octeon_id;
  66. wait_queue_head_t wc;
  67. int cond;
  68. };
  69. struct liquidio_if_cfg_resp {
  70. u64 rh;
  71. struct liquidio_if_cfg_info cfg_info;
  72. u64 status;
  73. };
  74. struct liquidio_rx_ctl_context {
  75. int octeon_id;
  76. wait_queue_head_t wc;
  77. int cond;
  78. };
  79. struct oct_link_status_resp {
  80. u64 rh;
  81. struct oct_link_info link_info;
  82. u64 status;
  83. };
  84. struct oct_timestamp_resp {
  85. u64 rh;
  86. u64 timestamp;
  87. u64 status;
  88. };
  89. #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
  90. union tx_info {
  91. u64 u64;
  92. struct {
  93. #ifdef __BIG_ENDIAN_BITFIELD
  94. u16 gso_size;
  95. u16 gso_segs;
  96. u32 reserved;
  97. #else
  98. u32 reserved;
  99. u16 gso_segs;
  100. u16 gso_size;
  101. #endif
  102. } s;
  103. };
  104. /** Octeon device properties to be used by the NIC module.
  105. * Each octeon device in the system will be represented
  106. * by this structure in the NIC module.
  107. */
  108. #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
  109. #define OCTNIC_GSO_MAX_HEADER_SIZE 128
  110. #define OCTNIC_GSO_MAX_SIZE \
  111. (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
  112. /** Structure of a node in list of gather components maintained by
  113. * NIC driver for each network device.
  114. */
  115. struct octnic_gather {
  116. /** List manipulation. Next and prev pointers. */
  117. struct list_head list;
  118. /** Size of the gather component at sg in bytes. */
  119. int sg_size;
  120. /** Number of bytes that sg was adjusted to make it 8B-aligned. */
  121. int adjust;
  122. /** Gather component that can accommodate max sized fragment list
  123. * received from the IP layer.
  124. */
  125. struct octeon_sg_entry *sg;
  126. u64 sg_dma_ptr;
  127. };
  128. struct handshake {
  129. struct completion init;
  130. struct completion started;
  131. struct pci_dev *pci_dev;
  132. int init_ok;
  133. int started_ok;
  134. };
  135. struct octeon_device_priv {
  136. /** Tasklet structures for this device. */
  137. struct tasklet_struct droq_tasklet;
  138. unsigned long napi_mask;
  139. };
  140. #ifdef CONFIG_PCI_IOV
  141. static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
  142. #endif
  143. static int octeon_device_init(struct octeon_device *);
  144. static int liquidio_stop(struct net_device *netdev);
  145. static void liquidio_remove(struct pci_dev *pdev);
  146. static int liquidio_probe(struct pci_dev *pdev,
  147. const struct pci_device_id *ent);
  148. static struct handshake handshake[MAX_OCTEON_DEVICES];
  149. static struct completion first_stage;
  150. static void octeon_droq_bh(unsigned long pdev)
  151. {
  152. int q_no;
  153. int reschedule = 0;
  154. struct octeon_device *oct = (struct octeon_device *)pdev;
  155. struct octeon_device_priv *oct_priv =
  156. (struct octeon_device_priv *)oct->priv;
  157. for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
  158. if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
  159. continue;
  160. reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
  161. MAX_PACKET_BUDGET);
  162. lio_enable_irq(oct->droq[q_no], NULL);
  163. if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
  164. /* set time and cnt interrupt thresholds for this DROQ
  165. * for NAPI
  166. */
  167. int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
  168. octeon_write_csr64(
  169. oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
  170. 0x5700000040ULL);
  171. octeon_write_csr64(
  172. oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
  173. }
  174. }
  175. if (reschedule)
  176. tasklet_schedule(&oct_priv->droq_tasklet);
  177. }
  178. static int lio_wait_for_oq_pkts(struct octeon_device *oct)
  179. {
  180. struct octeon_device_priv *oct_priv =
  181. (struct octeon_device_priv *)oct->priv;
  182. int retry = 100, pkt_cnt = 0, pending_pkts = 0;
  183. int i;
  184. do {
  185. pending_pkts = 0;
  186. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
  187. if (!(oct->io_qmask.oq & BIT_ULL(i)))
  188. continue;
  189. pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
  190. }
  191. if (pkt_cnt > 0) {
  192. pending_pkts += pkt_cnt;
  193. tasklet_schedule(&oct_priv->droq_tasklet);
  194. }
  195. pkt_cnt = 0;
  196. schedule_timeout_uninterruptible(1);
  197. } while (retry-- && pending_pkts);
  198. return pkt_cnt;
  199. }
  200. /**
  201. * \brief Forces all IO queues off on a given device
  202. * @param oct Pointer to Octeon device
  203. */
  204. static void force_io_queues_off(struct octeon_device *oct)
  205. {
  206. if ((oct->chip_id == OCTEON_CN66XX) ||
  207. (oct->chip_id == OCTEON_CN68XX)) {
  208. /* Reset the Enable bits for Input Queues. */
  209. octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
  210. /* Reset the Enable bits for Output Queues. */
  211. octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
  212. }
  213. }
  214. /**
  215. * \brief wait for all pending requests to complete
  216. * @param oct Pointer to Octeon device
  217. *
  218. * Called during shutdown sequence
  219. */
  220. static int wait_for_pending_requests(struct octeon_device *oct)
  221. {
  222. int i, pcount = 0;
  223. for (i = 0; i < 100; i++) {
  224. pcount =
  225. atomic_read(&oct->response_list
  226. [OCTEON_ORDERED_SC_LIST].pending_req_count);
  227. if (pcount)
  228. schedule_timeout_uninterruptible(HZ / 10);
  229. else
  230. break;
  231. }
  232. if (pcount)
  233. return 1;
  234. return 0;
  235. }
  236. /**
  237. * \brief Cause device to go quiet so it can be safely removed/reset/etc
  238. * @param oct Pointer to Octeon device
  239. */
  240. static inline void pcierror_quiesce_device(struct octeon_device *oct)
  241. {
  242. int i;
  243. /* Disable the input and output queues now. No more packets will
  244. * arrive from Octeon, but we should wait for all packet processing
  245. * to finish.
  246. */
  247. force_io_queues_off(oct);
  248. /* To allow for in-flight requests */
  249. schedule_timeout_uninterruptible(100);
  250. if (wait_for_pending_requests(oct))
  251. dev_err(&oct->pci_dev->dev, "There were pending requests\n");
  252. /* Force all requests waiting to be fetched by OCTEON to complete. */
  253. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
  254. struct octeon_instr_queue *iq;
  255. if (!(oct->io_qmask.iq & BIT_ULL(i)))
  256. continue;
  257. iq = oct->instr_queue[i];
  258. if (atomic_read(&iq->instr_pending)) {
  259. spin_lock_bh(&iq->lock);
  260. iq->fill_cnt = 0;
  261. iq->octeon_read_index = iq->host_write_index;
  262. iq->stats.instr_processed +=
  263. atomic_read(&iq->instr_pending);
  264. lio_process_iq_request_list(oct, iq, 0);
  265. spin_unlock_bh(&iq->lock);
  266. }
  267. }
  268. /* Force all pending ordered list requests to time out. */
  269. lio_process_ordered_list(oct, 1);
  270. /* We do not need to wait for output queue packets to be processed. */
  271. }
  272. /**
  273. * \brief Cleanup PCI AER uncorrectable error status
  274. * @param dev Pointer to PCI device
  275. */
  276. static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  277. {
  278. int pos = 0x100;
  279. u32 status, mask;
  280. pr_info("%s :\n", __func__);
  281. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  282. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
  283. if (dev->error_state == pci_channel_io_normal)
  284. status &= ~mask; /* Clear corresponding nonfatal bits */
  285. else
  286. status &= mask; /* Clear corresponding fatal bits */
  287. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  288. }
  289. /**
  290. * \brief Stop all PCI IO to a given device
  291. * @param dev Pointer to Octeon device
  292. */
  293. static void stop_pci_io(struct octeon_device *oct)
  294. {
  295. /* No more instructions will be forwarded. */
  296. atomic_set(&oct->status, OCT_DEV_IN_RESET);
  297. pci_disable_device(oct->pci_dev);
  298. /* Disable interrupts */
  299. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  300. pcierror_quiesce_device(oct);
  301. /* Release the interrupt line */
  302. free_irq(oct->pci_dev->irq, oct);
  303. if (oct->flags & LIO_FLAG_MSI_ENABLED)
  304. pci_disable_msi(oct->pci_dev);
  305. dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
  306. lio_get_state_string(&oct->status));
  307. /* making it a common function for all OCTEON models */
  308. cleanup_aer_uncorrect_error_status(oct->pci_dev);
  309. }
  310. /**
  311. * \brief called when PCI error is detected
  312. * @param pdev Pointer to PCI device
  313. * @param state The current pci connection state
  314. *
  315. * This function is called after a PCI bus error affecting
  316. * this device has been detected.
  317. */
  318. static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
  319. pci_channel_state_t state)
  320. {
  321. struct octeon_device *oct = pci_get_drvdata(pdev);
  322. /* Non-correctable Non-fatal errors */
  323. if (state == pci_channel_io_normal) {
  324. dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
  325. cleanup_aer_uncorrect_error_status(oct->pci_dev);
  326. return PCI_ERS_RESULT_CAN_RECOVER;
  327. }
  328. /* Non-correctable Fatal errors */
  329. dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
  330. stop_pci_io(oct);
  331. /* Always return a DISCONNECT. There is no support for recovery but only
  332. * for a clean shutdown.
  333. */
  334. return PCI_ERS_RESULT_DISCONNECT;
  335. }
  336. /**
  337. * \brief mmio handler
  338. * @param pdev Pointer to PCI device
  339. */
  340. static pci_ers_result_t liquidio_pcie_mmio_enabled(
  341. struct pci_dev *pdev __attribute__((unused)))
  342. {
  343. /* We should never hit this since we never ask for a reset for a Fatal
  344. * Error. We always return DISCONNECT in io_error above.
  345. * But play safe and return RECOVERED for now.
  346. */
  347. return PCI_ERS_RESULT_RECOVERED;
  348. }
  349. /**
  350. * \brief called after the pci bus has been reset.
  351. * @param pdev Pointer to PCI device
  352. *
  353. * Restart the card from scratch, as if from a cold-boot. Implementation
  354. * resembles the first-half of the octeon_resume routine.
  355. */
  356. static pci_ers_result_t liquidio_pcie_slot_reset(
  357. struct pci_dev *pdev __attribute__((unused)))
  358. {
  359. /* We should never hit this since we never ask for a reset for a Fatal
  360. * Error. We always return DISCONNECT in io_error above.
  361. * But play safe and return RECOVERED for now.
  362. */
  363. return PCI_ERS_RESULT_RECOVERED;
  364. }
  365. /**
  366. * \brief called when traffic can start flowing again.
  367. * @param pdev Pointer to PCI device
  368. *
  369. * This callback is called when the error recovery driver tells us that
  370. * its OK to resume normal operation. Implementation resembles the
  371. * second-half of the octeon_resume routine.
  372. */
  373. static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
  374. {
  375. /* Nothing to be done here. */
  376. }
  377. #ifdef CONFIG_PM
  378. /**
  379. * \brief called when suspending
  380. * @param pdev Pointer to PCI device
  381. * @param state state to suspend to
  382. */
  383. static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
  384. pm_message_t state __attribute__((unused)))
  385. {
  386. return 0;
  387. }
  388. /**
  389. * \brief called when resuming
  390. * @param pdev Pointer to PCI device
  391. */
  392. static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
  393. {
  394. return 0;
  395. }
  396. #endif
  397. /* For PCI-E Advanced Error Recovery (AER) Interface */
  398. static const struct pci_error_handlers liquidio_err_handler = {
  399. .error_detected = liquidio_pcie_error_detected,
  400. .mmio_enabled = liquidio_pcie_mmio_enabled,
  401. .slot_reset = liquidio_pcie_slot_reset,
  402. .resume = liquidio_pcie_resume,
  403. };
  404. static const struct pci_device_id liquidio_pci_tbl[] = {
  405. { /* 68xx */
  406. PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
  407. },
  408. { /* 66xx */
  409. PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
  410. },
  411. { /* 23xx pf */
  412. PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
  413. },
  414. {
  415. 0, 0, 0, 0, 0, 0, 0
  416. }
  417. };
  418. MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
  419. static struct pci_driver liquidio_pci_driver = {
  420. .name = "LiquidIO",
  421. .id_table = liquidio_pci_tbl,
  422. .probe = liquidio_probe,
  423. .remove = liquidio_remove,
  424. .err_handler = &liquidio_err_handler, /* For AER */
  425. #ifdef CONFIG_PM
  426. .suspend = liquidio_suspend,
  427. .resume = liquidio_resume,
  428. #endif
  429. #ifdef CONFIG_PCI_IOV
  430. .sriov_configure = liquidio_enable_sriov,
  431. #endif
  432. };
  433. /**
  434. * \brief register PCI driver
  435. */
  436. static int liquidio_init_pci(void)
  437. {
  438. return pci_register_driver(&liquidio_pci_driver);
  439. }
  440. /**
  441. * \brief unregister PCI driver
  442. */
  443. static void liquidio_deinit_pci(void)
  444. {
  445. pci_unregister_driver(&liquidio_pci_driver);
  446. }
  447. /**
  448. * \brief check interface state
  449. * @param lio per-network private data
  450. * @param state_flag flag state to check
  451. */
  452. static inline int ifstate_check(struct lio *lio, int state_flag)
  453. {
  454. return atomic_read(&lio->ifstate) & state_flag;
  455. }
  456. /**
  457. * \brief set interface state
  458. * @param lio per-network private data
  459. * @param state_flag flag state to set
  460. */
  461. static inline void ifstate_set(struct lio *lio, int state_flag)
  462. {
  463. atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
  464. }
  465. /**
  466. * \brief clear interface state
  467. * @param lio per-network private data
  468. * @param state_flag flag state to clear
  469. */
  470. static inline void ifstate_reset(struct lio *lio, int state_flag)
  471. {
  472. atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
  473. }
  474. /**
  475. * \brief Stop Tx queues
  476. * @param netdev network device
  477. */
  478. static inline void txqs_stop(struct net_device *netdev)
  479. {
  480. if (netif_is_multiqueue(netdev)) {
  481. int i;
  482. for (i = 0; i < netdev->num_tx_queues; i++)
  483. netif_stop_subqueue(netdev, i);
  484. } else {
  485. netif_stop_queue(netdev);
  486. }
  487. }
  488. /**
  489. * \brief Start Tx queues
  490. * @param netdev network device
  491. */
  492. static inline void txqs_start(struct net_device *netdev)
  493. {
  494. if (netif_is_multiqueue(netdev)) {
  495. int i;
  496. for (i = 0; i < netdev->num_tx_queues; i++)
  497. netif_start_subqueue(netdev, i);
  498. } else {
  499. netif_start_queue(netdev);
  500. }
  501. }
  502. /**
  503. * \brief Wake Tx queues
  504. * @param netdev network device
  505. */
  506. static inline void txqs_wake(struct net_device *netdev)
  507. {
  508. struct lio *lio = GET_LIO(netdev);
  509. if (netif_is_multiqueue(netdev)) {
  510. int i;
  511. for (i = 0; i < netdev->num_tx_queues; i++) {
  512. int qno = lio->linfo.txpciq[i %
  513. (lio->linfo.num_txpciq)].s.q_no;
  514. if (__netif_subqueue_stopped(netdev, i)) {
  515. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
  516. tx_restart, 1);
  517. netif_wake_subqueue(netdev, i);
  518. }
  519. }
  520. } else {
  521. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
  522. tx_restart, 1);
  523. netif_wake_queue(netdev);
  524. }
  525. }
  526. /**
  527. * \brief Stop Tx queue
  528. * @param netdev network device
  529. */
  530. static void stop_txq(struct net_device *netdev)
  531. {
  532. txqs_stop(netdev);
  533. }
  534. /**
  535. * \brief Start Tx queue
  536. * @param netdev network device
  537. */
  538. static void start_txq(struct net_device *netdev)
  539. {
  540. struct lio *lio = GET_LIO(netdev);
  541. if (lio->linfo.link.s.link_up) {
  542. txqs_start(netdev);
  543. return;
  544. }
  545. }
  546. /**
  547. * \brief Wake a queue
  548. * @param netdev network device
  549. * @param q which queue to wake
  550. */
  551. static inline void wake_q(struct net_device *netdev, int q)
  552. {
  553. if (netif_is_multiqueue(netdev))
  554. netif_wake_subqueue(netdev, q);
  555. else
  556. netif_wake_queue(netdev);
  557. }
  558. /**
  559. * \brief Stop a queue
  560. * @param netdev network device
  561. * @param q which queue to stop
  562. */
  563. static inline void stop_q(struct net_device *netdev, int q)
  564. {
  565. if (netif_is_multiqueue(netdev))
  566. netif_stop_subqueue(netdev, q);
  567. else
  568. netif_stop_queue(netdev);
  569. }
  570. /**
  571. * \brief Check Tx queue status, and take appropriate action
  572. * @param lio per-network private data
  573. * @returns 0 if full, number of queues woken up otherwise
  574. */
  575. static inline int check_txq_status(struct lio *lio)
  576. {
  577. int ret_val = 0;
  578. if (netif_is_multiqueue(lio->netdev)) {
  579. int numqs = lio->netdev->num_tx_queues;
  580. int q, iq = 0;
  581. /* check each sub-queue state */
  582. for (q = 0; q < numqs; q++) {
  583. iq = lio->linfo.txpciq[q %
  584. (lio->linfo.num_txpciq)].s.q_no;
  585. if (octnet_iq_is_full(lio->oct_dev, iq))
  586. continue;
  587. if (__netif_subqueue_stopped(lio->netdev, q)) {
  588. wake_q(lio->netdev, q);
  589. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
  590. tx_restart, 1);
  591. ret_val++;
  592. }
  593. }
  594. } else {
  595. if (octnet_iq_is_full(lio->oct_dev, lio->txq))
  596. return 0;
  597. wake_q(lio->netdev, lio->txq);
  598. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
  599. tx_restart, 1);
  600. ret_val = 1;
  601. }
  602. return ret_val;
  603. }
  604. /**
  605. * Remove the node at the head of the list. The list would be empty at
  606. * the end of this call if there are no more nodes in the list.
  607. */
  608. static inline struct list_head *list_delete_head(struct list_head *root)
  609. {
  610. struct list_head *node;
  611. if ((root->prev == root) && (root->next == root))
  612. node = NULL;
  613. else
  614. node = root->next;
  615. if (node)
  616. list_del(node);
  617. return node;
  618. }
  619. /**
  620. * \brief Delete gather lists
  621. * @param lio per-network private data
  622. */
  623. static void delete_glists(struct lio *lio)
  624. {
  625. struct octnic_gather *g;
  626. int i;
  627. if (!lio->glist)
  628. return;
  629. for (i = 0; i < lio->linfo.num_txpciq; i++) {
  630. do {
  631. g = (struct octnic_gather *)
  632. list_delete_head(&lio->glist[i]);
  633. if (g) {
  634. if (g->sg) {
  635. dma_unmap_single(&lio->oct_dev->
  636. pci_dev->dev,
  637. g->sg_dma_ptr,
  638. g->sg_size,
  639. DMA_TO_DEVICE);
  640. kfree((void *)((unsigned long)g->sg -
  641. g->adjust));
  642. }
  643. kfree(g);
  644. }
  645. } while (g);
  646. }
  647. kfree((void *)lio->glist);
  648. kfree((void *)lio->glist_lock);
  649. }
  650. /**
  651. * \brief Setup gather lists
  652. * @param lio per-network private data
  653. */
  654. static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
  655. {
  656. int i, j;
  657. struct octnic_gather *g;
  658. lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
  659. GFP_KERNEL);
  660. if (!lio->glist_lock)
  661. return 1;
  662. lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
  663. GFP_KERNEL);
  664. if (!lio->glist) {
  665. kfree((void *)lio->glist_lock);
  666. return 1;
  667. }
  668. for (i = 0; i < num_iqs; i++) {
  669. int numa_node = cpu_to_node(i % num_online_cpus());
  670. spin_lock_init(&lio->glist_lock[i]);
  671. INIT_LIST_HEAD(&lio->glist[i]);
  672. for (j = 0; j < lio->tx_qsize; j++) {
  673. g = kzalloc_node(sizeof(*g), GFP_KERNEL,
  674. numa_node);
  675. if (!g)
  676. g = kzalloc(sizeof(*g), GFP_KERNEL);
  677. if (!g)
  678. break;
  679. g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
  680. OCT_SG_ENTRY_SIZE);
  681. g->sg = kmalloc_node(g->sg_size + 8,
  682. GFP_KERNEL, numa_node);
  683. if (!g->sg)
  684. g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
  685. if (!g->sg) {
  686. kfree(g);
  687. break;
  688. }
  689. /* The gather component should be aligned on 64-bit
  690. * boundary
  691. */
  692. if (((unsigned long)g->sg) & 7) {
  693. g->adjust = 8 - (((unsigned long)g->sg) & 7);
  694. g->sg = (struct octeon_sg_entry *)
  695. ((unsigned long)g->sg + g->adjust);
  696. }
  697. g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
  698. g->sg, g->sg_size,
  699. DMA_TO_DEVICE);
  700. if (dma_mapping_error(&oct->pci_dev->dev,
  701. g->sg_dma_ptr)) {
  702. kfree((void *)((unsigned long)g->sg -
  703. g->adjust));
  704. kfree(g);
  705. break;
  706. }
  707. list_add_tail(&g->list, &lio->glist[i]);
  708. }
  709. if (j != lio->tx_qsize) {
  710. delete_glists(lio);
  711. return 1;
  712. }
  713. }
  714. return 0;
  715. }
  716. /**
  717. * \brief Print link information
  718. * @param netdev network device
  719. */
  720. static void print_link_info(struct net_device *netdev)
  721. {
  722. struct lio *lio = GET_LIO(netdev);
  723. if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
  724. struct oct_link_info *linfo = &lio->linfo;
  725. if (linfo->link.s.link_up) {
  726. netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
  727. linfo->link.s.speed,
  728. (linfo->link.s.duplex) ? "Full" : "Half");
  729. } else {
  730. netif_info(lio, link, lio->netdev, "Link Down\n");
  731. }
  732. }
  733. }
  734. /**
  735. * \brief Routine to notify MTU change
  736. * @param work work_struct data structure
  737. */
  738. static void octnet_link_status_change(struct work_struct *work)
  739. {
  740. struct cavium_wk *wk = (struct cavium_wk *)work;
  741. struct lio *lio = (struct lio *)wk->ctxptr;
  742. rtnl_lock();
  743. call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
  744. rtnl_unlock();
  745. }
  746. /**
  747. * \brief Sets up the mtu status change work
  748. * @param netdev network device
  749. */
  750. static inline int setup_link_status_change_wq(struct net_device *netdev)
  751. {
  752. struct lio *lio = GET_LIO(netdev);
  753. struct octeon_device *oct = lio->oct_dev;
  754. lio->link_status_wq.wq = alloc_workqueue("link-status",
  755. WQ_MEM_RECLAIM, 0);
  756. if (!lio->link_status_wq.wq) {
  757. dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
  758. return -1;
  759. }
  760. INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
  761. octnet_link_status_change);
  762. lio->link_status_wq.wk.ctxptr = lio;
  763. return 0;
  764. }
  765. static inline void cleanup_link_status_change_wq(struct net_device *netdev)
  766. {
  767. struct lio *lio = GET_LIO(netdev);
  768. if (lio->link_status_wq.wq) {
  769. cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
  770. destroy_workqueue(lio->link_status_wq.wq);
  771. }
  772. }
  773. /**
  774. * \brief Update link status
  775. * @param netdev network device
  776. * @param ls link status structure
  777. *
  778. * Called on receipt of a link status response from the core application to
  779. * update each interface's link status.
  780. */
  781. static inline void update_link_status(struct net_device *netdev,
  782. union oct_link_status *ls)
  783. {
  784. struct lio *lio = GET_LIO(netdev);
  785. int changed = (lio->linfo.link.u64 != ls->u64);
  786. lio->linfo.link.u64 = ls->u64;
  787. if ((lio->intf_open) && (changed)) {
  788. print_link_info(netdev);
  789. lio->link_changes++;
  790. if (lio->linfo.link.s.link_up) {
  791. netif_carrier_on(netdev);
  792. txqs_wake(netdev);
  793. } else {
  794. netif_carrier_off(netdev);
  795. stop_txq(netdev);
  796. }
  797. }
  798. }
  799. /* Runs in interrupt context. */
  800. static void update_txq_status(struct octeon_device *oct, int iq_num)
  801. {
  802. struct net_device *netdev;
  803. struct lio *lio;
  804. struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
  805. netdev = oct->props[iq->ifidx].netdev;
  806. /* This is needed because the first IQ does not have
  807. * a netdev associated with it.
  808. */
  809. if (!netdev)
  810. return;
  811. lio = GET_LIO(netdev);
  812. if (netif_is_multiqueue(netdev)) {
  813. if (__netif_subqueue_stopped(netdev, iq->q_index) &&
  814. lio->linfo.link.s.link_up &&
  815. (!octnet_iq_is_full(oct, iq_num))) {
  816. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
  817. tx_restart, 1);
  818. netif_wake_subqueue(netdev, iq->q_index);
  819. } else {
  820. if (!octnet_iq_is_full(oct, lio->txq)) {
  821. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
  822. lio->txq,
  823. tx_restart, 1);
  824. wake_q(netdev, lio->txq);
  825. }
  826. }
  827. }
  828. }
  829. static
  830. int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
  831. {
  832. struct octeon_device *oct = droq->oct_dev;
  833. struct octeon_device_priv *oct_priv =
  834. (struct octeon_device_priv *)oct->priv;
  835. if (droq->ops.poll_mode) {
  836. droq->ops.napi_fn(droq);
  837. } else {
  838. if (ret & MSIX_PO_INT) {
  839. tasklet_schedule(&oct_priv->droq_tasklet);
  840. return 1;
  841. }
  842. /* this will be flushed periodically by check iq db */
  843. if (ret & MSIX_PI_INT)
  844. return 0;
  845. }
  846. return 0;
  847. }
  848. /**
  849. * \brief Droq packet processor sceduler
  850. * @param oct octeon device
  851. */
  852. static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
  853. {
  854. struct octeon_device_priv *oct_priv =
  855. (struct octeon_device_priv *)oct->priv;
  856. u64 oq_no;
  857. struct octeon_droq *droq;
  858. if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
  859. for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
  860. oq_no++) {
  861. if (!(oct->droq_intr & BIT_ULL(oq_no)))
  862. continue;
  863. droq = oct->droq[oq_no];
  864. if (droq->ops.poll_mode) {
  865. droq->ops.napi_fn(droq);
  866. oct_priv->napi_mask |= (1 << oq_no);
  867. } else {
  868. tasklet_schedule(&oct_priv->droq_tasklet);
  869. }
  870. }
  871. }
  872. }
  873. static irqreturn_t
  874. liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
  875. {
  876. u64 ret;
  877. struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
  878. struct octeon_device *oct = ioq_vector->oct_dev;
  879. struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
  880. ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
  881. if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
  882. liquidio_schedule_msix_droq_pkt_handler(droq, ret);
  883. return IRQ_HANDLED;
  884. }
  885. /**
  886. * \brief Interrupt handler for octeon
  887. * @param irq unused
  888. * @param dev octeon device
  889. */
  890. static
  891. irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
  892. void *dev)
  893. {
  894. struct octeon_device *oct = (struct octeon_device *)dev;
  895. irqreturn_t ret;
  896. /* Disable our interrupts for the duration of ISR */
  897. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  898. ret = oct->fn_list.process_interrupt_regs(oct);
  899. if (ret == IRQ_HANDLED)
  900. liquidio_schedule_droq_pkt_handlers(oct);
  901. /* Re-enable our interrupts */
  902. if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
  903. oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
  904. return ret;
  905. }
  906. /**
  907. * \brief Setup interrupt for octeon device
  908. * @param oct octeon device
  909. *
  910. * Enable interrupt in Octeon device as given in the PCI interrupt mask.
  911. */
  912. static int octeon_setup_interrupt(struct octeon_device *oct)
  913. {
  914. int irqret, err;
  915. struct msix_entry *msix_entries;
  916. int i;
  917. int num_ioq_vectors;
  918. int num_alloc_ioq_vectors;
  919. if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
  920. oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
  921. /* one non ioq interrupt for handling sli_mac_pf_int_sum */
  922. oct->num_msix_irqs += 1;
  923. oct->msix_entries = kcalloc(
  924. oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
  925. if (!oct->msix_entries)
  926. return 1;
  927. msix_entries = (struct msix_entry *)oct->msix_entries;
  928. /*Assumption is that pf msix vectors start from pf srn to pf to
  929. * trs and not from 0. if not change this code
  930. */
  931. for (i = 0; i < oct->num_msix_irqs - 1; i++)
  932. msix_entries[i].entry = oct->sriov_info.pf_srn + i;
  933. msix_entries[oct->num_msix_irqs - 1].entry =
  934. oct->sriov_info.trs;
  935. num_alloc_ioq_vectors = pci_enable_msix_range(
  936. oct->pci_dev, msix_entries,
  937. oct->num_msix_irqs,
  938. oct->num_msix_irqs);
  939. if (num_alloc_ioq_vectors < 0) {
  940. dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
  941. kfree(oct->msix_entries);
  942. oct->msix_entries = NULL;
  943. return 1;
  944. }
  945. dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
  946. num_ioq_vectors = oct->num_msix_irqs;
  947. /** For PF, there is one non-ioq interrupt handler */
  948. num_ioq_vectors -= 1;
  949. irqret = request_irq(msix_entries[num_ioq_vectors].vector,
  950. liquidio_legacy_intr_handler, 0, "octeon",
  951. oct);
  952. if (irqret) {
  953. dev_err(&oct->pci_dev->dev,
  954. "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
  955. irqret);
  956. pci_disable_msix(oct->pci_dev);
  957. kfree(oct->msix_entries);
  958. oct->msix_entries = NULL;
  959. return 1;
  960. }
  961. for (i = 0; i < num_ioq_vectors; i++) {
  962. irqret = request_irq(msix_entries[i].vector,
  963. liquidio_msix_intr_handler, 0,
  964. "octeon", &oct->ioq_vector[i]);
  965. if (irqret) {
  966. dev_err(&oct->pci_dev->dev,
  967. "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
  968. irqret);
  969. /** Freeing the non-ioq irq vector here . */
  970. free_irq(msix_entries[num_ioq_vectors].vector,
  971. oct);
  972. while (i) {
  973. i--;
  974. /** clearing affinity mask. */
  975. irq_set_affinity_hint(
  976. msix_entries[i].vector, NULL);
  977. free_irq(msix_entries[i].vector,
  978. &oct->ioq_vector[i]);
  979. }
  980. pci_disable_msix(oct->pci_dev);
  981. kfree(oct->msix_entries);
  982. oct->msix_entries = NULL;
  983. return 1;
  984. }
  985. oct->ioq_vector[i].vector = msix_entries[i].vector;
  986. /* assign the cpu mask for this msix interrupt vector */
  987. irq_set_affinity_hint(
  988. msix_entries[i].vector,
  989. (&oct->ioq_vector[i].affinity_mask));
  990. }
  991. dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
  992. oct->octeon_id);
  993. } else {
  994. err = pci_enable_msi(oct->pci_dev);
  995. if (err)
  996. dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
  997. err);
  998. else
  999. oct->flags |= LIO_FLAG_MSI_ENABLED;
  1000. irqret = request_irq(oct->pci_dev->irq,
  1001. liquidio_legacy_intr_handler, IRQF_SHARED,
  1002. "octeon", oct);
  1003. if (irqret) {
  1004. if (oct->flags & LIO_FLAG_MSI_ENABLED)
  1005. pci_disable_msi(oct->pci_dev);
  1006. dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
  1007. irqret);
  1008. return 1;
  1009. }
  1010. }
  1011. return 0;
  1012. }
  1013. static int liquidio_watchdog(void *param)
  1014. {
  1015. u64 wdog;
  1016. u16 mask_of_stuck_cores = 0;
  1017. u16 mask_of_crashed_cores = 0;
  1018. int core_num;
  1019. u8 core_is_stuck[LIO_MAX_CORES];
  1020. u8 core_crashed[LIO_MAX_CORES];
  1021. struct octeon_device *oct = param;
  1022. memset(core_is_stuck, 0, sizeof(core_is_stuck));
  1023. memset(core_crashed, 0, sizeof(core_crashed));
  1024. while (!kthread_should_stop()) {
  1025. mask_of_crashed_cores =
  1026. (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
  1027. for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
  1028. if (!core_is_stuck[core_num]) {
  1029. wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
  1030. /* look at watchdog state field */
  1031. wdog &= CIU3_WDOG_MASK;
  1032. if (wdog) {
  1033. /* this watchdog timer has expired */
  1034. core_is_stuck[core_num] =
  1035. LIO_MONITOR_WDOG_EXPIRE;
  1036. mask_of_stuck_cores |= (1 << core_num);
  1037. }
  1038. }
  1039. if (!core_crashed[core_num])
  1040. core_crashed[core_num] =
  1041. (mask_of_crashed_cores >> core_num) & 1;
  1042. }
  1043. if (mask_of_stuck_cores) {
  1044. for (core_num = 0; core_num < LIO_MAX_CORES;
  1045. core_num++) {
  1046. if (core_is_stuck[core_num] == 1) {
  1047. dev_err(&oct->pci_dev->dev,
  1048. "ERROR: Octeon core %d is stuck!\n",
  1049. core_num);
  1050. /* 2 means we have printk'd an error
  1051. * so no need to repeat the same printk
  1052. */
  1053. core_is_stuck[core_num] =
  1054. LIO_MONITOR_CORE_STUCK_MSGD;
  1055. }
  1056. }
  1057. }
  1058. if (mask_of_crashed_cores) {
  1059. for (core_num = 0; core_num < LIO_MAX_CORES;
  1060. core_num++) {
  1061. if (core_crashed[core_num] == 1) {
  1062. dev_err(&oct->pci_dev->dev,
  1063. "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
  1064. core_num);
  1065. /* 2 means we have printk'd an error
  1066. * so no need to repeat the same printk
  1067. */
  1068. core_crashed[core_num] =
  1069. LIO_MONITOR_CORE_STUCK_MSGD;
  1070. }
  1071. }
  1072. }
  1073. #ifdef CONFIG_MODULE_UNLOAD
  1074. if (mask_of_stuck_cores || mask_of_crashed_cores) {
  1075. /* make module refcount=0 so that rmmod will work */
  1076. long refcount;
  1077. refcount = module_refcount(THIS_MODULE);
  1078. while (refcount > 0) {
  1079. module_put(THIS_MODULE);
  1080. refcount = module_refcount(THIS_MODULE);
  1081. }
  1082. /* compensate for and withstand an unlikely (but still
  1083. * possible) race condition
  1084. */
  1085. while (refcount < 0) {
  1086. try_module_get(THIS_MODULE);
  1087. refcount = module_refcount(THIS_MODULE);
  1088. }
  1089. }
  1090. #endif
  1091. /* sleep for two seconds */
  1092. set_current_state(TASK_INTERRUPTIBLE);
  1093. schedule_timeout(2 * HZ);
  1094. }
  1095. return 0;
  1096. }
  1097. /**
  1098. * \brief PCI probe handler
  1099. * @param pdev PCI device structure
  1100. * @param ent unused
  1101. */
  1102. static int
  1103. liquidio_probe(struct pci_dev *pdev,
  1104. const struct pci_device_id *ent __attribute__((unused)))
  1105. {
  1106. struct octeon_device *oct_dev = NULL;
  1107. struct handshake *hs;
  1108. oct_dev = octeon_allocate_device(pdev->device,
  1109. sizeof(struct octeon_device_priv));
  1110. if (!oct_dev) {
  1111. dev_err(&pdev->dev, "Unable to allocate device\n");
  1112. return -ENOMEM;
  1113. }
  1114. if (pdev->device == OCTEON_CN23XX_PF_VID)
  1115. oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
  1116. dev_info(&pdev->dev, "Initializing device %x:%x.\n",
  1117. (u32)pdev->vendor, (u32)pdev->device);
  1118. /* Assign octeon_device for this device to the private data area. */
  1119. pci_set_drvdata(pdev, oct_dev);
  1120. /* set linux specific device pointer */
  1121. oct_dev->pci_dev = (void *)pdev;
  1122. hs = &handshake[oct_dev->octeon_id];
  1123. init_completion(&hs->init);
  1124. init_completion(&hs->started);
  1125. hs->pci_dev = pdev;
  1126. if (oct_dev->octeon_id == 0)
  1127. /* first LiquidIO NIC is detected */
  1128. complete(&first_stage);
  1129. if (octeon_device_init(oct_dev)) {
  1130. complete(&hs->init);
  1131. liquidio_remove(pdev);
  1132. return -ENOMEM;
  1133. }
  1134. if (OCTEON_CN23XX_PF(oct_dev)) {
  1135. u64 scratch1;
  1136. u8 bus, device, function;
  1137. scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
  1138. if (!(scratch1 & 4ULL)) {
  1139. /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
  1140. * the lio watchdog kernel thread is running for this
  1141. * NIC. Each NIC gets one watchdog kernel thread.
  1142. */
  1143. scratch1 |= 4ULL;
  1144. octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
  1145. scratch1);
  1146. bus = pdev->bus->number;
  1147. device = PCI_SLOT(pdev->devfn);
  1148. function = PCI_FUNC(pdev->devfn);
  1149. oct_dev->watchdog_task = kthread_create(
  1150. liquidio_watchdog, oct_dev,
  1151. "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
  1152. if (!IS_ERR(oct_dev->watchdog_task)) {
  1153. wake_up_process(oct_dev->watchdog_task);
  1154. } else {
  1155. oct_dev->watchdog_task = NULL;
  1156. dev_err(&oct_dev->pci_dev->dev,
  1157. "failed to create kernel_thread\n");
  1158. liquidio_remove(pdev);
  1159. return -1;
  1160. }
  1161. }
  1162. }
  1163. oct_dev->rx_pause = 1;
  1164. oct_dev->tx_pause = 1;
  1165. dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
  1166. return 0;
  1167. }
  1168. /**
  1169. *\brief Destroy resources associated with octeon device
  1170. * @param pdev PCI device structure
  1171. * @param ent unused
  1172. */
  1173. static void octeon_destroy_resources(struct octeon_device *oct)
  1174. {
  1175. int i;
  1176. struct msix_entry *msix_entries;
  1177. struct octeon_device_priv *oct_priv =
  1178. (struct octeon_device_priv *)oct->priv;
  1179. struct handshake *hs;
  1180. switch (atomic_read(&oct->status)) {
  1181. case OCT_DEV_RUNNING:
  1182. case OCT_DEV_CORE_OK:
  1183. /* No more instructions will be forwarded. */
  1184. atomic_set(&oct->status, OCT_DEV_IN_RESET);
  1185. oct->app_mode = CVM_DRV_INVALID_APP;
  1186. dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
  1187. lio_get_state_string(&oct->status));
  1188. schedule_timeout_uninterruptible(HZ / 10);
  1189. /* fallthrough */
  1190. case OCT_DEV_HOST_OK:
  1191. /* fallthrough */
  1192. case OCT_DEV_CONSOLE_INIT_DONE:
  1193. /* Remove any consoles */
  1194. octeon_remove_consoles(oct);
  1195. /* fallthrough */
  1196. case OCT_DEV_IO_QUEUES_DONE:
  1197. if (wait_for_pending_requests(oct))
  1198. dev_err(&oct->pci_dev->dev, "There were pending requests\n");
  1199. if (lio_wait_for_instr_fetch(oct))
  1200. dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
  1201. /* Disable the input and output queues now. No more packets will
  1202. * arrive from Octeon, but we should wait for all packet
  1203. * processing to finish.
  1204. */
  1205. oct->fn_list.disable_io_queues(oct);
  1206. if (lio_wait_for_oq_pkts(oct))
  1207. dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
  1208. /* fallthrough */
  1209. case OCT_DEV_INTR_SET_DONE:
  1210. /* Disable interrupts */
  1211. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  1212. if (oct->msix_on) {
  1213. msix_entries = (struct msix_entry *)oct->msix_entries;
  1214. for (i = 0; i < oct->num_msix_irqs - 1; i++) {
  1215. /* clear the affinity_cpumask */
  1216. irq_set_affinity_hint(msix_entries[i].vector,
  1217. NULL);
  1218. free_irq(msix_entries[i].vector,
  1219. &oct->ioq_vector[i]);
  1220. }
  1221. /* non-iov vector's argument is oct struct */
  1222. free_irq(msix_entries[i].vector, oct);
  1223. pci_disable_msix(oct->pci_dev);
  1224. kfree(oct->msix_entries);
  1225. oct->msix_entries = NULL;
  1226. } else {
  1227. /* Release the interrupt line */
  1228. free_irq(oct->pci_dev->irq, oct);
  1229. if (oct->flags & LIO_FLAG_MSI_ENABLED)
  1230. pci_disable_msi(oct->pci_dev);
  1231. }
  1232. /* fallthrough */
  1233. case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
  1234. if (OCTEON_CN23XX_PF(oct))
  1235. octeon_free_ioq_vector(oct);
  1236. /* fallthrough */
  1237. case OCT_DEV_MBOX_SETUP_DONE:
  1238. if (OCTEON_CN23XX_PF(oct))
  1239. oct->fn_list.free_mbox(oct);
  1240. /* fallthrough */
  1241. case OCT_DEV_IN_RESET:
  1242. case OCT_DEV_DROQ_INIT_DONE:
  1243. /* Wait for any pending operations */
  1244. mdelay(100);
  1245. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
  1246. if (!(oct->io_qmask.oq & BIT_ULL(i)))
  1247. continue;
  1248. octeon_delete_droq(oct, i);
  1249. }
  1250. /* Force any pending handshakes to complete */
  1251. for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
  1252. hs = &handshake[i];
  1253. if (hs->pci_dev) {
  1254. handshake[oct->octeon_id].init_ok = 0;
  1255. complete(&handshake[oct->octeon_id].init);
  1256. handshake[oct->octeon_id].started_ok = 0;
  1257. complete(&handshake[oct->octeon_id].started);
  1258. }
  1259. }
  1260. /* fallthrough */
  1261. case OCT_DEV_RESP_LIST_INIT_DONE:
  1262. octeon_delete_response_list(oct);
  1263. /* fallthrough */
  1264. case OCT_DEV_INSTR_QUEUE_INIT_DONE:
  1265. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
  1266. if (!(oct->io_qmask.iq & BIT_ULL(i)))
  1267. continue;
  1268. octeon_delete_instr_queue(oct, i);
  1269. }
  1270. #ifdef CONFIG_PCI_IOV
  1271. if (oct->sriov_info.sriov_enabled)
  1272. pci_disable_sriov(oct->pci_dev);
  1273. #endif
  1274. /* fallthrough */
  1275. case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
  1276. octeon_free_sc_buffer_pool(oct);
  1277. /* fallthrough */
  1278. case OCT_DEV_DISPATCH_INIT_DONE:
  1279. octeon_delete_dispatch_list(oct);
  1280. cancel_delayed_work_sync(&oct->nic_poll_work.work);
  1281. /* fallthrough */
  1282. case OCT_DEV_PCI_MAP_DONE:
  1283. /* Soft reset the octeon device before exiting */
  1284. if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
  1285. oct->fn_list.soft_reset(oct);
  1286. octeon_unmap_pci_barx(oct, 0);
  1287. octeon_unmap_pci_barx(oct, 1);
  1288. /* fallthrough */
  1289. case OCT_DEV_PCI_ENABLE_DONE:
  1290. pci_clear_master(oct->pci_dev);
  1291. /* Disable the device, releasing the PCI INT */
  1292. pci_disable_device(oct->pci_dev);
  1293. /* fallthrough */
  1294. case OCT_DEV_BEGIN_STATE:
  1295. /* Nothing to be done here either */
  1296. break;
  1297. } /* end switch (oct->status) */
  1298. tasklet_kill(&oct_priv->droq_tasklet);
  1299. }
  1300. /**
  1301. * \brief Callback for rx ctrl
  1302. * @param status status of request
  1303. * @param buf pointer to resp structure
  1304. */
  1305. static void rx_ctl_callback(struct octeon_device *oct,
  1306. u32 status,
  1307. void *buf)
  1308. {
  1309. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  1310. struct liquidio_rx_ctl_context *ctx;
  1311. ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
  1312. oct = lio_get_device(ctx->octeon_id);
  1313. if (status)
  1314. dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
  1315. CVM_CAST64(status));
  1316. WRITE_ONCE(ctx->cond, 1);
  1317. /* This barrier is required to be sure that the response has been
  1318. * written fully before waking up the handler
  1319. */
  1320. wmb();
  1321. wake_up_interruptible(&ctx->wc);
  1322. }
  1323. /**
  1324. * \brief Send Rx control command
  1325. * @param lio per-network private data
  1326. * @param start_stop whether to start or stop
  1327. */
  1328. static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
  1329. {
  1330. struct octeon_soft_command *sc;
  1331. struct liquidio_rx_ctl_context *ctx;
  1332. union octnet_cmd *ncmd;
  1333. int ctx_size = sizeof(struct liquidio_rx_ctl_context);
  1334. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1335. int retval;
  1336. if (oct->props[lio->ifidx].rx_on == start_stop)
  1337. return;
  1338. sc = (struct octeon_soft_command *)
  1339. octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
  1340. 16, ctx_size);
  1341. ncmd = (union octnet_cmd *)sc->virtdptr;
  1342. ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
  1343. WRITE_ONCE(ctx->cond, 0);
  1344. ctx->octeon_id = lio_get_device_id(oct);
  1345. init_waitqueue_head(&ctx->wc);
  1346. ncmd->u64 = 0;
  1347. ncmd->s.cmd = OCTNET_CMD_RX_CTL;
  1348. ncmd->s.param1 = start_stop;
  1349. octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
  1350. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1351. octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  1352. OPCODE_NIC_CMD, 0, 0, 0);
  1353. sc->callback = rx_ctl_callback;
  1354. sc->callback_arg = sc;
  1355. sc->wait_time = 5000;
  1356. retval = octeon_send_soft_command(oct, sc);
  1357. if (retval == IQ_SEND_FAILED) {
  1358. netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
  1359. } else {
  1360. /* Sleep on a wait queue till the cond flag indicates that the
  1361. * response arrived or timed-out.
  1362. */
  1363. if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
  1364. return;
  1365. oct->props[lio->ifidx].rx_on = start_stop;
  1366. }
  1367. octeon_free_soft_command(oct, sc);
  1368. }
  1369. /**
  1370. * \brief Destroy NIC device interface
  1371. * @param oct octeon device
  1372. * @param ifidx which interface to destroy
  1373. *
  1374. * Cleanup associated with each interface for an Octeon device when NIC
  1375. * module is being unloaded or if initialization fails during load.
  1376. */
  1377. static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
  1378. {
  1379. struct net_device *netdev = oct->props[ifidx].netdev;
  1380. struct lio *lio;
  1381. struct napi_struct *napi, *n;
  1382. if (!netdev) {
  1383. dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
  1384. __func__, ifidx);
  1385. return;
  1386. }
  1387. lio = GET_LIO(netdev);
  1388. dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
  1389. if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
  1390. liquidio_stop(netdev);
  1391. if (oct->props[lio->ifidx].napi_enabled == 1) {
  1392. list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
  1393. napi_disable(napi);
  1394. oct->props[lio->ifidx].napi_enabled = 0;
  1395. if (OCTEON_CN23XX_PF(oct))
  1396. oct->droq[0]->ops.poll_mode = 0;
  1397. }
  1398. if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
  1399. unregister_netdev(netdev);
  1400. cleanup_link_status_change_wq(netdev);
  1401. delete_glists(lio);
  1402. free_netdev(netdev);
  1403. oct->props[ifidx].gmxport = -1;
  1404. oct->props[ifidx].netdev = NULL;
  1405. }
  1406. /**
  1407. * \brief Stop complete NIC functionality
  1408. * @param oct octeon device
  1409. */
  1410. static int liquidio_stop_nic_module(struct octeon_device *oct)
  1411. {
  1412. int i, j;
  1413. struct lio *lio;
  1414. dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
  1415. if (!oct->ifcount) {
  1416. dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
  1417. return 1;
  1418. }
  1419. spin_lock_bh(&oct->cmd_resp_wqlock);
  1420. oct->cmd_resp_state = OCT_DRV_OFFLINE;
  1421. spin_unlock_bh(&oct->cmd_resp_wqlock);
  1422. for (i = 0; i < oct->ifcount; i++) {
  1423. lio = GET_LIO(oct->props[i].netdev);
  1424. for (j = 0; j < lio->linfo.num_rxpciq; j++)
  1425. octeon_unregister_droq_ops(oct,
  1426. lio->linfo.rxpciq[j].s.q_no);
  1427. }
  1428. for (i = 0; i < oct->ifcount; i++)
  1429. liquidio_destroy_nic_device(oct, i);
  1430. dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
  1431. return 0;
  1432. }
  1433. /**
  1434. * \brief Cleans up resources at unload time
  1435. * @param pdev PCI device structure
  1436. */
  1437. static void liquidio_remove(struct pci_dev *pdev)
  1438. {
  1439. struct octeon_device *oct_dev = pci_get_drvdata(pdev);
  1440. dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
  1441. if (oct_dev->watchdog_task)
  1442. kthread_stop(oct_dev->watchdog_task);
  1443. if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
  1444. liquidio_stop_nic_module(oct_dev);
  1445. /* Reset the octeon device and cleanup all memory allocated for
  1446. * the octeon device by driver.
  1447. */
  1448. octeon_destroy_resources(oct_dev);
  1449. dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
  1450. /* This octeon device has been removed. Update the global
  1451. * data structure to reflect this. Free the device structure.
  1452. */
  1453. octeon_free_device_mem(oct_dev);
  1454. }
  1455. /**
  1456. * \brief Identify the Octeon device and to map the BAR address space
  1457. * @param oct octeon device
  1458. */
  1459. static int octeon_chip_specific_setup(struct octeon_device *oct)
  1460. {
  1461. u32 dev_id, rev_id;
  1462. int ret = 1;
  1463. char *s;
  1464. pci_read_config_dword(oct->pci_dev, 0, &dev_id);
  1465. pci_read_config_dword(oct->pci_dev, 8, &rev_id);
  1466. oct->rev_id = rev_id & 0xff;
  1467. switch (dev_id) {
  1468. case OCTEON_CN68XX_PCIID:
  1469. oct->chip_id = OCTEON_CN68XX;
  1470. ret = lio_setup_cn68xx_octeon_device(oct);
  1471. s = "CN68XX";
  1472. break;
  1473. case OCTEON_CN66XX_PCIID:
  1474. oct->chip_id = OCTEON_CN66XX;
  1475. ret = lio_setup_cn66xx_octeon_device(oct);
  1476. s = "CN66XX";
  1477. break;
  1478. case OCTEON_CN23XX_PCIID_PF:
  1479. oct->chip_id = OCTEON_CN23XX_PF_VID;
  1480. ret = setup_cn23xx_octeon_pf_device(oct);
  1481. s = "CN23XX";
  1482. break;
  1483. default:
  1484. s = "?";
  1485. dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
  1486. dev_id);
  1487. }
  1488. if (!ret)
  1489. dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
  1490. OCTEON_MAJOR_REV(oct),
  1491. OCTEON_MINOR_REV(oct),
  1492. octeon_get_conf(oct)->card_name,
  1493. LIQUIDIO_VERSION);
  1494. return ret;
  1495. }
  1496. /**
  1497. * \brief PCI initialization for each Octeon device.
  1498. * @param oct octeon device
  1499. */
  1500. static int octeon_pci_os_setup(struct octeon_device *oct)
  1501. {
  1502. /* setup PCI stuff first */
  1503. if (pci_enable_device(oct->pci_dev)) {
  1504. dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
  1505. return 1;
  1506. }
  1507. if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
  1508. dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
  1509. pci_disable_device(oct->pci_dev);
  1510. return 1;
  1511. }
  1512. /* Enable PCI DMA Master. */
  1513. pci_set_master(oct->pci_dev);
  1514. return 0;
  1515. }
  1516. static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
  1517. {
  1518. int q = 0;
  1519. if (netif_is_multiqueue(lio->netdev))
  1520. q = skb->queue_mapping % lio->linfo.num_txpciq;
  1521. return q;
  1522. }
  1523. /**
  1524. * \brief Check Tx queue state for a given network buffer
  1525. * @param lio per-network private data
  1526. * @param skb network buffer
  1527. */
  1528. static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
  1529. {
  1530. int q = 0, iq = 0;
  1531. if (netif_is_multiqueue(lio->netdev)) {
  1532. q = skb->queue_mapping;
  1533. iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
  1534. } else {
  1535. iq = lio->txq;
  1536. q = iq;
  1537. }
  1538. if (octnet_iq_is_full(lio->oct_dev, iq))
  1539. return 0;
  1540. if (__netif_subqueue_stopped(lio->netdev, q)) {
  1541. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
  1542. wake_q(lio->netdev, q);
  1543. }
  1544. return 1;
  1545. }
  1546. /**
  1547. * \brief Unmap and free network buffer
  1548. * @param buf buffer
  1549. */
  1550. static void free_netbuf(void *buf)
  1551. {
  1552. struct sk_buff *skb;
  1553. struct octnet_buf_free_info *finfo;
  1554. struct lio *lio;
  1555. finfo = (struct octnet_buf_free_info *)buf;
  1556. skb = finfo->skb;
  1557. lio = finfo->lio;
  1558. dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
  1559. DMA_TO_DEVICE);
  1560. check_txq_state(lio, skb);
  1561. tx_buffer_free(skb);
  1562. }
  1563. /**
  1564. * \brief Unmap and free gather buffer
  1565. * @param buf buffer
  1566. */
  1567. static void free_netsgbuf(void *buf)
  1568. {
  1569. struct octnet_buf_free_info *finfo;
  1570. struct sk_buff *skb;
  1571. struct lio *lio;
  1572. struct octnic_gather *g;
  1573. int i, frags, iq;
  1574. finfo = (struct octnet_buf_free_info *)buf;
  1575. skb = finfo->skb;
  1576. lio = finfo->lio;
  1577. g = finfo->g;
  1578. frags = skb_shinfo(skb)->nr_frags;
  1579. dma_unmap_single(&lio->oct_dev->pci_dev->dev,
  1580. g->sg[0].ptr[0], (skb->len - skb->data_len),
  1581. DMA_TO_DEVICE);
  1582. i = 1;
  1583. while (frags--) {
  1584. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
  1585. pci_unmap_page((lio->oct_dev)->pci_dev,
  1586. g->sg[(i >> 2)].ptr[(i & 3)],
  1587. frag->size, DMA_TO_DEVICE);
  1588. i++;
  1589. }
  1590. dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
  1591. g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
  1592. iq = skb_iq(lio, skb);
  1593. spin_lock(&lio->glist_lock[iq]);
  1594. list_add_tail(&g->list, &lio->glist[iq]);
  1595. spin_unlock(&lio->glist_lock[iq]);
  1596. check_txq_state(lio, skb); /* mq support: sub-queue state check */
  1597. tx_buffer_free(skb);
  1598. }
  1599. /**
  1600. * \brief Unmap and free gather buffer with response
  1601. * @param buf buffer
  1602. */
  1603. static void free_netsgbuf_with_resp(void *buf)
  1604. {
  1605. struct octeon_soft_command *sc;
  1606. struct octnet_buf_free_info *finfo;
  1607. struct sk_buff *skb;
  1608. struct lio *lio;
  1609. struct octnic_gather *g;
  1610. int i, frags, iq;
  1611. sc = (struct octeon_soft_command *)buf;
  1612. skb = (struct sk_buff *)sc->callback_arg;
  1613. finfo = (struct octnet_buf_free_info *)&skb->cb;
  1614. lio = finfo->lio;
  1615. g = finfo->g;
  1616. frags = skb_shinfo(skb)->nr_frags;
  1617. dma_unmap_single(&lio->oct_dev->pci_dev->dev,
  1618. g->sg[0].ptr[0], (skb->len - skb->data_len),
  1619. DMA_TO_DEVICE);
  1620. i = 1;
  1621. while (frags--) {
  1622. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
  1623. pci_unmap_page((lio->oct_dev)->pci_dev,
  1624. g->sg[(i >> 2)].ptr[(i & 3)],
  1625. frag->size, DMA_TO_DEVICE);
  1626. i++;
  1627. }
  1628. dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
  1629. g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
  1630. iq = skb_iq(lio, skb);
  1631. spin_lock(&lio->glist_lock[iq]);
  1632. list_add_tail(&g->list, &lio->glist[iq]);
  1633. spin_unlock(&lio->glist_lock[iq]);
  1634. /* Don't free the skb yet */
  1635. check_txq_state(lio, skb);
  1636. }
  1637. /**
  1638. * \brief Adjust ptp frequency
  1639. * @param ptp PTP clock info
  1640. * @param ppb how much to adjust by, in parts-per-billion
  1641. */
  1642. static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  1643. {
  1644. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1645. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1646. u64 comp, delta;
  1647. unsigned long flags;
  1648. bool neg_adj = false;
  1649. if (ppb < 0) {
  1650. neg_adj = true;
  1651. ppb = -ppb;
  1652. }
  1653. /* The hardware adds the clock compensation value to the
  1654. * PTP clock on every coprocessor clock cycle, so we
  1655. * compute the delta in terms of coprocessor clocks.
  1656. */
  1657. delta = (u64)ppb << 32;
  1658. do_div(delta, oct->coproc_clock_rate);
  1659. spin_lock_irqsave(&lio->ptp_lock, flags);
  1660. comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
  1661. if (neg_adj)
  1662. comp -= delta;
  1663. else
  1664. comp += delta;
  1665. lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
  1666. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1667. return 0;
  1668. }
  1669. /**
  1670. * \brief Adjust ptp time
  1671. * @param ptp PTP clock info
  1672. * @param delta how much to adjust by, in nanosecs
  1673. */
  1674. static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  1675. {
  1676. unsigned long flags;
  1677. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1678. spin_lock_irqsave(&lio->ptp_lock, flags);
  1679. lio->ptp_adjust += delta;
  1680. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1681. return 0;
  1682. }
  1683. /**
  1684. * \brief Get hardware clock time, including any adjustment
  1685. * @param ptp PTP clock info
  1686. * @param ts timespec
  1687. */
  1688. static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
  1689. struct timespec64 *ts)
  1690. {
  1691. u64 ns;
  1692. unsigned long flags;
  1693. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1694. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1695. spin_lock_irqsave(&lio->ptp_lock, flags);
  1696. ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
  1697. ns += lio->ptp_adjust;
  1698. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1699. *ts = ns_to_timespec64(ns);
  1700. return 0;
  1701. }
  1702. /**
  1703. * \brief Set hardware clock time. Reset adjustment
  1704. * @param ptp PTP clock info
  1705. * @param ts timespec
  1706. */
  1707. static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
  1708. const struct timespec64 *ts)
  1709. {
  1710. u64 ns;
  1711. unsigned long flags;
  1712. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1713. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1714. ns = timespec_to_ns(ts);
  1715. spin_lock_irqsave(&lio->ptp_lock, flags);
  1716. lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
  1717. lio->ptp_adjust = 0;
  1718. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1719. return 0;
  1720. }
  1721. /**
  1722. * \brief Check if PTP is enabled
  1723. * @param ptp PTP clock info
  1724. * @param rq request
  1725. * @param on is it on
  1726. */
  1727. static int
  1728. liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
  1729. struct ptp_clock_request *rq __attribute__((unused)),
  1730. int on __attribute__((unused)))
  1731. {
  1732. return -EOPNOTSUPP;
  1733. }
  1734. /**
  1735. * \brief Open PTP clock source
  1736. * @param netdev network device
  1737. */
  1738. static void oct_ptp_open(struct net_device *netdev)
  1739. {
  1740. struct lio *lio = GET_LIO(netdev);
  1741. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1742. spin_lock_init(&lio->ptp_lock);
  1743. snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
  1744. lio->ptp_info.owner = THIS_MODULE;
  1745. lio->ptp_info.max_adj = 250000000;
  1746. lio->ptp_info.n_alarm = 0;
  1747. lio->ptp_info.n_ext_ts = 0;
  1748. lio->ptp_info.n_per_out = 0;
  1749. lio->ptp_info.pps = 0;
  1750. lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
  1751. lio->ptp_info.adjtime = liquidio_ptp_adjtime;
  1752. lio->ptp_info.gettime64 = liquidio_ptp_gettime;
  1753. lio->ptp_info.settime64 = liquidio_ptp_settime;
  1754. lio->ptp_info.enable = liquidio_ptp_enable;
  1755. lio->ptp_adjust = 0;
  1756. lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
  1757. &oct->pci_dev->dev);
  1758. if (IS_ERR(lio->ptp_clock))
  1759. lio->ptp_clock = NULL;
  1760. }
  1761. /**
  1762. * \brief Init PTP clock
  1763. * @param oct octeon device
  1764. */
  1765. static void liquidio_ptp_init(struct octeon_device *oct)
  1766. {
  1767. u64 clock_comp, cfg;
  1768. clock_comp = (u64)NSEC_PER_SEC << 32;
  1769. do_div(clock_comp, oct->coproc_clock_rate);
  1770. lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
  1771. /* Enable */
  1772. cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
  1773. lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
  1774. }
  1775. /**
  1776. * \brief Load firmware to device
  1777. * @param oct octeon device
  1778. *
  1779. * Maps device to firmware filename, requests firmware, and downloads it
  1780. */
  1781. static int load_firmware(struct octeon_device *oct)
  1782. {
  1783. int ret = 0;
  1784. const struct firmware *fw;
  1785. char fw_name[LIO_MAX_FW_FILENAME_LEN];
  1786. char *tmp_fw_type;
  1787. if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
  1788. sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
  1789. dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
  1790. return ret;
  1791. }
  1792. if (fw_type[0] == '\0')
  1793. tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
  1794. else
  1795. tmp_fw_type = fw_type;
  1796. sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
  1797. octeon_get_conf(oct)->card_name, tmp_fw_type,
  1798. LIO_FW_NAME_SUFFIX);
  1799. ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
  1800. if (ret) {
  1801. dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
  1802. fw_name);
  1803. release_firmware(fw);
  1804. return ret;
  1805. }
  1806. ret = octeon_download_firmware(oct, fw->data, fw->size);
  1807. release_firmware(fw);
  1808. return ret;
  1809. }
  1810. /**
  1811. * \brief Setup output queue
  1812. * @param oct octeon device
  1813. * @param q_no which queue
  1814. * @param num_descs how many descriptors
  1815. * @param desc_size size of each descriptor
  1816. * @param app_ctx application context
  1817. */
  1818. static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
  1819. int desc_size, void *app_ctx)
  1820. {
  1821. int ret_val = 0;
  1822. dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
  1823. /* droq creation and local register settings. */
  1824. ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
  1825. if (ret_val < 0)
  1826. return ret_val;
  1827. if (ret_val == 1) {
  1828. dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
  1829. return 0;
  1830. }
  1831. /* tasklet creation for the droq */
  1832. /* Enable the droq queues */
  1833. octeon_set_droq_pkt_op(oct, q_no, 1);
  1834. /* Send Credit for Octeon Output queues. Credits are always
  1835. * sent after the output queue is enabled.
  1836. */
  1837. writel(oct->droq[q_no]->max_count,
  1838. oct->droq[q_no]->pkts_credit_reg);
  1839. return ret_val;
  1840. }
  1841. /**
  1842. * \brief Callback for getting interface configuration
  1843. * @param status status of request
  1844. * @param buf pointer to resp structure
  1845. */
  1846. static void if_cfg_callback(struct octeon_device *oct,
  1847. u32 status __attribute__((unused)),
  1848. void *buf)
  1849. {
  1850. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  1851. struct liquidio_if_cfg_resp *resp;
  1852. struct liquidio_if_cfg_context *ctx;
  1853. resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
  1854. ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
  1855. oct = lio_get_device(ctx->octeon_id);
  1856. if (resp->status)
  1857. dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
  1858. CVM_CAST64(resp->status));
  1859. WRITE_ONCE(ctx->cond, 1);
  1860. snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
  1861. resp->cfg_info.liquidio_firmware_version);
  1862. /* This barrier is required to be sure that the response has been
  1863. * written fully before waking up the handler
  1864. */
  1865. wmb();
  1866. wake_up_interruptible(&ctx->wc);
  1867. }
  1868. /**
  1869. * \brief Select queue based on hash
  1870. * @param dev Net device
  1871. * @param skb sk_buff structure
  1872. * @returns selected queue number
  1873. */
  1874. static u16 select_q(struct net_device *dev, struct sk_buff *skb,
  1875. void *accel_priv __attribute__((unused)),
  1876. select_queue_fallback_t fallback __attribute__((unused)))
  1877. {
  1878. u32 qindex = 0;
  1879. struct lio *lio;
  1880. lio = GET_LIO(dev);
  1881. qindex = skb_tx_hash(dev, skb);
  1882. return (u16)(qindex % (lio->linfo.num_txpciq));
  1883. }
  1884. /** Routine to push packets arriving on Octeon interface upto network layer.
  1885. * @param oct_id - octeon device id.
  1886. * @param skbuff - skbuff struct to be passed to network layer.
  1887. * @param len - size of total data received.
  1888. * @param rh - Control header associated with the packet
  1889. * @param param - additional control data with the packet
  1890. * @param arg - farg registered in droq_ops
  1891. */
  1892. static void
  1893. liquidio_push_packet(u32 octeon_id __attribute__((unused)),
  1894. void *skbuff,
  1895. u32 len,
  1896. union octeon_rh *rh,
  1897. void *param,
  1898. void *arg)
  1899. {
  1900. struct napi_struct *napi = param;
  1901. struct sk_buff *skb = (struct sk_buff *)skbuff;
  1902. struct skb_shared_hwtstamps *shhwtstamps;
  1903. u64 ns;
  1904. u16 vtag = 0;
  1905. struct net_device *netdev = (struct net_device *)arg;
  1906. struct octeon_droq *droq = container_of(param, struct octeon_droq,
  1907. napi);
  1908. if (netdev) {
  1909. int packet_was_received;
  1910. struct lio *lio = GET_LIO(netdev);
  1911. struct octeon_device *oct = lio->oct_dev;
  1912. /* Do not proceed if the interface is not in RUNNING state. */
  1913. if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
  1914. recv_buffer_free(skb);
  1915. droq->stats.rx_dropped++;
  1916. return;
  1917. }
  1918. skb->dev = netdev;
  1919. skb_record_rx_queue(skb, droq->q_no);
  1920. if (likely(len > MIN_SKB_SIZE)) {
  1921. struct octeon_skb_page_info *pg_info;
  1922. unsigned char *va;
  1923. pg_info = ((struct octeon_skb_page_info *)(skb->cb));
  1924. if (pg_info->page) {
  1925. /* For Paged allocation use the frags */
  1926. va = page_address(pg_info->page) +
  1927. pg_info->page_offset;
  1928. memcpy(skb->data, va, MIN_SKB_SIZE);
  1929. skb_put(skb, MIN_SKB_SIZE);
  1930. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  1931. pg_info->page,
  1932. pg_info->page_offset +
  1933. MIN_SKB_SIZE,
  1934. len - MIN_SKB_SIZE,
  1935. LIO_RXBUFFER_SZ);
  1936. }
  1937. } else {
  1938. struct octeon_skb_page_info *pg_info =
  1939. ((struct octeon_skb_page_info *)(skb->cb));
  1940. skb_copy_to_linear_data(skb, page_address(pg_info->page)
  1941. + pg_info->page_offset, len);
  1942. skb_put(skb, len);
  1943. put_page(pg_info->page);
  1944. }
  1945. if (((oct->chip_id == OCTEON_CN66XX) ||
  1946. (oct->chip_id == OCTEON_CN68XX)) &&
  1947. ptp_enable) {
  1948. if (rh->r_dh.has_hwtstamp) {
  1949. /* timestamp is included from the hardware at
  1950. * the beginning of the packet.
  1951. */
  1952. if (ifstate_check
  1953. (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
  1954. /* Nanoseconds are in the first 64-bits
  1955. * of the packet.
  1956. */
  1957. memcpy(&ns, (skb->data), sizeof(ns));
  1958. shhwtstamps = skb_hwtstamps(skb);
  1959. shhwtstamps->hwtstamp =
  1960. ns_to_ktime(ns +
  1961. lio->ptp_adjust);
  1962. }
  1963. skb_pull(skb, sizeof(ns));
  1964. }
  1965. }
  1966. skb->protocol = eth_type_trans(skb, skb->dev);
  1967. if ((netdev->features & NETIF_F_RXCSUM) &&
  1968. (((rh->r_dh.encap_on) &&
  1969. (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
  1970. (!(rh->r_dh.encap_on) &&
  1971. (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
  1972. /* checksum has already been verified */
  1973. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1974. else
  1975. skb->ip_summed = CHECKSUM_NONE;
  1976. /* Setting Encapsulation field on basis of status received
  1977. * from the firmware
  1978. */
  1979. if (rh->r_dh.encap_on) {
  1980. skb->encapsulation = 1;
  1981. skb->csum_level = 1;
  1982. droq->stats.rx_vxlan++;
  1983. }
  1984. /* inbound VLAN tag */
  1985. if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  1986. (rh->r_dh.vlan != 0)) {
  1987. u16 vid = rh->r_dh.vlan;
  1988. u16 priority = rh->r_dh.priority;
  1989. vtag = priority << 13 | vid;
  1990. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
  1991. }
  1992. packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
  1993. if (packet_was_received) {
  1994. droq->stats.rx_bytes_received += len;
  1995. droq->stats.rx_pkts_received++;
  1996. netdev->last_rx = jiffies;
  1997. } else {
  1998. droq->stats.rx_dropped++;
  1999. netif_info(lio, rx_err, lio->netdev,
  2000. "droq:%d error rx_dropped:%llu\n",
  2001. droq->q_no, droq->stats.rx_dropped);
  2002. }
  2003. } else {
  2004. recv_buffer_free(skb);
  2005. }
  2006. }
  2007. /**
  2008. * \brief wrapper for calling napi_schedule
  2009. * @param param parameters to pass to napi_schedule
  2010. *
  2011. * Used when scheduling on different CPUs
  2012. */
  2013. static void napi_schedule_wrapper(void *param)
  2014. {
  2015. struct napi_struct *napi = param;
  2016. napi_schedule(napi);
  2017. }
  2018. /**
  2019. * \brief callback when receive interrupt occurs and we are in NAPI mode
  2020. * @param arg pointer to octeon output queue
  2021. */
  2022. static void liquidio_napi_drv_callback(void *arg)
  2023. {
  2024. struct octeon_device *oct;
  2025. struct octeon_droq *droq = arg;
  2026. int this_cpu = smp_processor_id();
  2027. oct = droq->oct_dev;
  2028. if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
  2029. napi_schedule_irqoff(&droq->napi);
  2030. } else {
  2031. struct call_single_data *csd = &droq->csd;
  2032. csd->func = napi_schedule_wrapper;
  2033. csd->info = &droq->napi;
  2034. csd->flags = 0;
  2035. smp_call_function_single_async(droq->cpu_id, csd);
  2036. }
  2037. }
  2038. /**
  2039. * \brief Entry point for NAPI polling
  2040. * @param napi NAPI structure
  2041. * @param budget maximum number of items to process
  2042. */
  2043. static int liquidio_napi_poll(struct napi_struct *napi, int budget)
  2044. {
  2045. struct octeon_droq *droq;
  2046. int work_done;
  2047. int tx_done = 0, iq_no;
  2048. struct octeon_instr_queue *iq;
  2049. struct octeon_device *oct;
  2050. droq = container_of(napi, struct octeon_droq, napi);
  2051. oct = droq->oct_dev;
  2052. iq_no = droq->q_no;
  2053. /* Handle Droq descriptors */
  2054. work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
  2055. POLL_EVENT_PROCESS_PKTS,
  2056. budget);
  2057. /* Flush the instruction queue */
  2058. iq = oct->instr_queue[iq_no];
  2059. if (iq) {
  2060. /* Process iq buffers with in the budget limits */
  2061. tx_done = octeon_flush_iq(oct, iq, 1, budget);
  2062. /* Update iq read-index rather than waiting for next interrupt.
  2063. * Return back if tx_done is false.
  2064. */
  2065. update_txq_status(oct, iq_no);
  2066. } else {
  2067. dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
  2068. __func__, iq_no);
  2069. }
  2070. if ((work_done < budget) && (tx_done)) {
  2071. napi_complete(napi);
  2072. octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
  2073. POLL_EVENT_ENABLE_INTR, 0);
  2074. return 0;
  2075. }
  2076. return (!tx_done) ? (budget) : (work_done);
  2077. }
  2078. /**
  2079. * \brief Setup input and output queues
  2080. * @param octeon_dev octeon device
  2081. * @param ifidx Interface Index
  2082. *
  2083. * Note: Queues are with respect to the octeon device. Thus
  2084. * an input queue is for egress packets, and output queues
  2085. * are for ingress packets.
  2086. */
  2087. static inline int setup_io_queues(struct octeon_device *octeon_dev,
  2088. int ifidx)
  2089. {
  2090. struct octeon_droq_ops droq_ops;
  2091. struct net_device *netdev;
  2092. static int cpu_id;
  2093. static int cpu_id_modulus;
  2094. struct octeon_droq *droq;
  2095. struct napi_struct *napi;
  2096. int q, q_no, retval = 0;
  2097. struct lio *lio;
  2098. int num_tx_descs;
  2099. netdev = octeon_dev->props[ifidx].netdev;
  2100. lio = GET_LIO(netdev);
  2101. memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
  2102. droq_ops.fptr = liquidio_push_packet;
  2103. droq_ops.farg = (void *)netdev;
  2104. droq_ops.poll_mode = 1;
  2105. droq_ops.napi_fn = liquidio_napi_drv_callback;
  2106. cpu_id = 0;
  2107. cpu_id_modulus = num_present_cpus();
  2108. /* set up DROQs. */
  2109. for (q = 0; q < lio->linfo.num_rxpciq; q++) {
  2110. q_no = lio->linfo.rxpciq[q].s.q_no;
  2111. dev_dbg(&octeon_dev->pci_dev->dev,
  2112. "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
  2113. q, q_no);
  2114. retval = octeon_setup_droq(octeon_dev, q_no,
  2115. CFG_GET_NUM_RX_DESCS_NIC_IF
  2116. (octeon_get_conf(octeon_dev),
  2117. lio->ifidx),
  2118. CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
  2119. (octeon_get_conf(octeon_dev),
  2120. lio->ifidx), NULL);
  2121. if (retval) {
  2122. dev_err(&octeon_dev->pci_dev->dev,
  2123. "%s : Runtime DROQ(RxQ) creation failed.\n",
  2124. __func__);
  2125. return 1;
  2126. }
  2127. droq = octeon_dev->droq[q_no];
  2128. napi = &droq->napi;
  2129. dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
  2130. (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
  2131. netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
  2132. /* designate a CPU for this droq */
  2133. droq->cpu_id = cpu_id;
  2134. cpu_id++;
  2135. if (cpu_id >= cpu_id_modulus)
  2136. cpu_id = 0;
  2137. octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
  2138. }
  2139. if (OCTEON_CN23XX_PF(octeon_dev)) {
  2140. /* 23XX PF can receive control messages (via the first PF-owned
  2141. * droq) from the firmware even if the ethX interface is down,
  2142. * so that's why poll_mode must be off for the first droq.
  2143. */
  2144. octeon_dev->droq[0]->ops.poll_mode = 0;
  2145. }
  2146. /* set up IQs. */
  2147. for (q = 0; q < lio->linfo.num_txpciq; q++) {
  2148. num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
  2149. (octeon_dev),
  2150. lio->ifidx);
  2151. retval = octeon_setup_iq(octeon_dev, ifidx, q,
  2152. lio->linfo.txpciq[q], num_tx_descs,
  2153. netdev_get_tx_queue(netdev, q));
  2154. if (retval) {
  2155. dev_err(&octeon_dev->pci_dev->dev,
  2156. " %s : Runtime IQ(TxQ) creation failed.\n",
  2157. __func__);
  2158. return 1;
  2159. }
  2160. }
  2161. return 0;
  2162. }
  2163. /**
  2164. * \brief Poll routine for checking transmit queue status
  2165. * @param work work_struct data structure
  2166. */
  2167. static void octnet_poll_check_txq_status(struct work_struct *work)
  2168. {
  2169. struct cavium_wk *wk = (struct cavium_wk *)work;
  2170. struct lio *lio = (struct lio *)wk->ctxptr;
  2171. if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
  2172. return;
  2173. check_txq_status(lio);
  2174. queue_delayed_work(lio->txq_status_wq.wq,
  2175. &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
  2176. }
  2177. /**
  2178. * \brief Sets up the txq poll check
  2179. * @param netdev network device
  2180. */
  2181. static inline int setup_tx_poll_fn(struct net_device *netdev)
  2182. {
  2183. struct lio *lio = GET_LIO(netdev);
  2184. struct octeon_device *oct = lio->oct_dev;
  2185. lio->txq_status_wq.wq = alloc_workqueue("txq-status",
  2186. WQ_MEM_RECLAIM, 0);
  2187. if (!lio->txq_status_wq.wq) {
  2188. dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
  2189. return -1;
  2190. }
  2191. INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
  2192. octnet_poll_check_txq_status);
  2193. lio->txq_status_wq.wk.ctxptr = lio;
  2194. queue_delayed_work(lio->txq_status_wq.wq,
  2195. &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
  2196. return 0;
  2197. }
  2198. static inline void cleanup_tx_poll_fn(struct net_device *netdev)
  2199. {
  2200. struct lio *lio = GET_LIO(netdev);
  2201. if (lio->txq_status_wq.wq) {
  2202. cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
  2203. destroy_workqueue(lio->txq_status_wq.wq);
  2204. }
  2205. }
  2206. /**
  2207. * \brief Net device open for LiquidIO
  2208. * @param netdev network device
  2209. */
  2210. static int liquidio_open(struct net_device *netdev)
  2211. {
  2212. struct lio *lio = GET_LIO(netdev);
  2213. struct octeon_device *oct = lio->oct_dev;
  2214. struct napi_struct *napi, *n;
  2215. if (oct->props[lio->ifidx].napi_enabled == 0) {
  2216. list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
  2217. napi_enable(napi);
  2218. oct->props[lio->ifidx].napi_enabled = 1;
  2219. if (OCTEON_CN23XX_PF(oct))
  2220. oct->droq[0]->ops.poll_mode = 1;
  2221. }
  2222. oct_ptp_open(netdev);
  2223. ifstate_set(lio, LIO_IFSTATE_RUNNING);
  2224. /* Ready for link status updates */
  2225. lio->intf_open = 1;
  2226. netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
  2227. if (OCTEON_CN23XX_PF(oct)) {
  2228. if (!oct->msix_on)
  2229. if (setup_tx_poll_fn(netdev))
  2230. return -1;
  2231. } else {
  2232. if (setup_tx_poll_fn(netdev))
  2233. return -1;
  2234. }
  2235. start_txq(netdev);
  2236. /* tell Octeon to start forwarding packets to host */
  2237. send_rx_ctrl_cmd(lio, 1);
  2238. dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
  2239. netdev->name);
  2240. return 0;
  2241. }
  2242. /**
  2243. * \brief Net device stop for LiquidIO
  2244. * @param netdev network device
  2245. */
  2246. static int liquidio_stop(struct net_device *netdev)
  2247. {
  2248. struct lio *lio = GET_LIO(netdev);
  2249. struct octeon_device *oct = lio->oct_dev;
  2250. ifstate_reset(lio, LIO_IFSTATE_RUNNING);
  2251. netif_tx_disable(netdev);
  2252. /* Inform that netif carrier is down */
  2253. netif_carrier_off(netdev);
  2254. lio->intf_open = 0;
  2255. lio->linfo.link.s.link_up = 0;
  2256. lio->link_changes++;
  2257. /* Pause for a moment and wait for Octeon to flush out (to the wire) any
  2258. * egress packets that are in-flight.
  2259. */
  2260. set_current_state(TASK_INTERRUPTIBLE);
  2261. schedule_timeout(msecs_to_jiffies(100));
  2262. /* Now it should be safe to tell Octeon that nic interface is down. */
  2263. send_rx_ctrl_cmd(lio, 0);
  2264. if (OCTEON_CN23XX_PF(oct)) {
  2265. if (!oct->msix_on)
  2266. cleanup_tx_poll_fn(netdev);
  2267. } else {
  2268. cleanup_tx_poll_fn(netdev);
  2269. }
  2270. if (lio->ptp_clock) {
  2271. ptp_clock_unregister(lio->ptp_clock);
  2272. lio->ptp_clock = NULL;
  2273. }
  2274. dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
  2275. return 0;
  2276. }
  2277. /**
  2278. * \brief Converts a mask based on net device flags
  2279. * @param netdev network device
  2280. *
  2281. * This routine generates a octnet_ifflags mask from the net device flags
  2282. * received from the OS.
  2283. */
  2284. static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
  2285. {
  2286. enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
  2287. if (netdev->flags & IFF_PROMISC)
  2288. f |= OCTNET_IFFLAG_PROMISC;
  2289. if (netdev->flags & IFF_ALLMULTI)
  2290. f |= OCTNET_IFFLAG_ALLMULTI;
  2291. if (netdev->flags & IFF_MULTICAST) {
  2292. f |= OCTNET_IFFLAG_MULTICAST;
  2293. /* Accept all multicast addresses if there are more than we
  2294. * can handle
  2295. */
  2296. if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
  2297. f |= OCTNET_IFFLAG_ALLMULTI;
  2298. }
  2299. if (netdev->flags & IFF_BROADCAST)
  2300. f |= OCTNET_IFFLAG_BROADCAST;
  2301. return f;
  2302. }
  2303. /**
  2304. * \brief Net device set_multicast_list
  2305. * @param netdev network device
  2306. */
  2307. static void liquidio_set_mcast_list(struct net_device *netdev)
  2308. {
  2309. struct lio *lio = GET_LIO(netdev);
  2310. struct octeon_device *oct = lio->oct_dev;
  2311. struct octnic_ctrl_pkt nctrl;
  2312. struct netdev_hw_addr *ha;
  2313. u64 *mc;
  2314. int ret;
  2315. int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
  2316. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2317. /* Create a ctrl pkt command to be sent to core app. */
  2318. nctrl.ncmd.u64 = 0;
  2319. nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
  2320. nctrl.ncmd.s.param1 = get_new_flags(netdev);
  2321. nctrl.ncmd.s.param2 = mc_count;
  2322. nctrl.ncmd.s.more = mc_count;
  2323. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2324. nctrl.netpndev = (u64)netdev;
  2325. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2326. /* copy all the addresses into the udd */
  2327. mc = &nctrl.udd[0];
  2328. netdev_for_each_mc_addr(ha, netdev) {
  2329. *mc = 0;
  2330. memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
  2331. /* no need to swap bytes */
  2332. if (++mc > &nctrl.udd[mc_count])
  2333. break;
  2334. }
  2335. /* Apparently, any activity in this call from the kernel has to
  2336. * be atomic. So we won't wait for response.
  2337. */
  2338. nctrl.wait_time = 0;
  2339. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2340. if (ret < 0) {
  2341. dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
  2342. ret);
  2343. }
  2344. }
  2345. /**
  2346. * \brief Net device set_mac_address
  2347. * @param netdev network device
  2348. */
  2349. static int liquidio_set_mac(struct net_device *netdev, void *p)
  2350. {
  2351. int ret = 0;
  2352. struct lio *lio = GET_LIO(netdev);
  2353. struct octeon_device *oct = lio->oct_dev;
  2354. struct sockaddr *addr = (struct sockaddr *)p;
  2355. struct octnic_ctrl_pkt nctrl;
  2356. if (!is_valid_ether_addr(addr->sa_data))
  2357. return -EADDRNOTAVAIL;
  2358. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2359. nctrl.ncmd.u64 = 0;
  2360. nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
  2361. nctrl.ncmd.s.param1 = 0;
  2362. nctrl.ncmd.s.more = 1;
  2363. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2364. nctrl.netpndev = (u64)netdev;
  2365. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2366. nctrl.wait_time = 100;
  2367. nctrl.udd[0] = 0;
  2368. /* The MAC Address is presented in network byte order. */
  2369. memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
  2370. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2371. if (ret < 0) {
  2372. dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
  2373. return -ENOMEM;
  2374. }
  2375. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  2376. memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
  2377. return 0;
  2378. }
  2379. /**
  2380. * \brief Net device get_stats
  2381. * @param netdev network device
  2382. */
  2383. static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
  2384. {
  2385. struct lio *lio = GET_LIO(netdev);
  2386. struct net_device_stats *stats = &netdev->stats;
  2387. struct octeon_device *oct;
  2388. u64 pkts = 0, drop = 0, bytes = 0;
  2389. struct oct_droq_stats *oq_stats;
  2390. struct oct_iq_stats *iq_stats;
  2391. int i, iq_no, oq_no;
  2392. oct = lio->oct_dev;
  2393. for (i = 0; i < lio->linfo.num_txpciq; i++) {
  2394. iq_no = lio->linfo.txpciq[i].s.q_no;
  2395. iq_stats = &oct->instr_queue[iq_no]->stats;
  2396. pkts += iq_stats->tx_done;
  2397. drop += iq_stats->tx_dropped;
  2398. bytes += iq_stats->tx_tot_bytes;
  2399. }
  2400. stats->tx_packets = pkts;
  2401. stats->tx_bytes = bytes;
  2402. stats->tx_dropped = drop;
  2403. pkts = 0;
  2404. drop = 0;
  2405. bytes = 0;
  2406. for (i = 0; i < lio->linfo.num_rxpciq; i++) {
  2407. oq_no = lio->linfo.rxpciq[i].s.q_no;
  2408. oq_stats = &oct->droq[oq_no]->stats;
  2409. pkts += oq_stats->rx_pkts_received;
  2410. drop += (oq_stats->rx_dropped +
  2411. oq_stats->dropped_nodispatch +
  2412. oq_stats->dropped_toomany +
  2413. oq_stats->dropped_nomem);
  2414. bytes += oq_stats->rx_bytes_received;
  2415. }
  2416. stats->rx_bytes = bytes;
  2417. stats->rx_packets = pkts;
  2418. stats->rx_dropped = drop;
  2419. return stats;
  2420. }
  2421. /**
  2422. * \brief Net device change_mtu
  2423. * @param netdev network device
  2424. */
  2425. static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
  2426. {
  2427. struct lio *lio = GET_LIO(netdev);
  2428. struct octeon_device *oct = lio->oct_dev;
  2429. struct octnic_ctrl_pkt nctrl;
  2430. int ret = 0;
  2431. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2432. nctrl.ncmd.u64 = 0;
  2433. nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
  2434. nctrl.ncmd.s.param1 = new_mtu;
  2435. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2436. nctrl.wait_time = 100;
  2437. nctrl.netpndev = (u64)netdev;
  2438. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2439. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2440. if (ret < 0) {
  2441. dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
  2442. return -1;
  2443. }
  2444. lio->mtu = new_mtu;
  2445. return 0;
  2446. }
  2447. /**
  2448. * \brief Handler for SIOCSHWTSTAMP ioctl
  2449. * @param netdev network device
  2450. * @param ifr interface request
  2451. * @param cmd command
  2452. */
  2453. static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
  2454. {
  2455. struct hwtstamp_config conf;
  2456. struct lio *lio = GET_LIO(netdev);
  2457. if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
  2458. return -EFAULT;
  2459. if (conf.flags)
  2460. return -EINVAL;
  2461. switch (conf.tx_type) {
  2462. case HWTSTAMP_TX_ON:
  2463. case HWTSTAMP_TX_OFF:
  2464. break;
  2465. default:
  2466. return -ERANGE;
  2467. }
  2468. switch (conf.rx_filter) {
  2469. case HWTSTAMP_FILTER_NONE:
  2470. break;
  2471. case HWTSTAMP_FILTER_ALL:
  2472. case HWTSTAMP_FILTER_SOME:
  2473. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  2474. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  2475. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  2476. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  2477. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  2478. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  2479. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  2480. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  2481. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  2482. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  2483. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  2484. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  2485. conf.rx_filter = HWTSTAMP_FILTER_ALL;
  2486. break;
  2487. default:
  2488. return -ERANGE;
  2489. }
  2490. if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
  2491. ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
  2492. else
  2493. ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
  2494. return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
  2495. }
  2496. /**
  2497. * \brief ioctl handler
  2498. * @param netdev network device
  2499. * @param ifr interface request
  2500. * @param cmd command
  2501. */
  2502. static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  2503. {
  2504. switch (cmd) {
  2505. case SIOCSHWTSTAMP:
  2506. return hwtstamp_ioctl(netdev, ifr);
  2507. default:
  2508. return -EOPNOTSUPP;
  2509. }
  2510. }
  2511. /**
  2512. * \brief handle a Tx timestamp response
  2513. * @param status response status
  2514. * @param buf pointer to skb
  2515. */
  2516. static void handle_timestamp(struct octeon_device *oct,
  2517. u32 status,
  2518. void *buf)
  2519. {
  2520. struct octnet_buf_free_info *finfo;
  2521. struct octeon_soft_command *sc;
  2522. struct oct_timestamp_resp *resp;
  2523. struct lio *lio;
  2524. struct sk_buff *skb = (struct sk_buff *)buf;
  2525. finfo = (struct octnet_buf_free_info *)skb->cb;
  2526. lio = finfo->lio;
  2527. sc = finfo->sc;
  2528. oct = lio->oct_dev;
  2529. resp = (struct oct_timestamp_resp *)sc->virtrptr;
  2530. if (status != OCTEON_REQUEST_DONE) {
  2531. dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
  2532. CVM_CAST64(status));
  2533. resp->timestamp = 0;
  2534. }
  2535. octeon_swap_8B_data(&resp->timestamp, 1);
  2536. if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
  2537. struct skb_shared_hwtstamps ts;
  2538. u64 ns = resp->timestamp;
  2539. netif_info(lio, tx_done, lio->netdev,
  2540. "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
  2541. skb, (unsigned long long)ns);
  2542. ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
  2543. skb_tstamp_tx(skb, &ts);
  2544. }
  2545. octeon_free_soft_command(oct, sc);
  2546. tx_buffer_free(skb);
  2547. }
  2548. /* \brief Send a data packet that will be timestamped
  2549. * @param oct octeon device
  2550. * @param ndata pointer to network data
  2551. * @param finfo pointer to private network data
  2552. */
  2553. static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
  2554. struct octnic_data_pkt *ndata,
  2555. struct octnet_buf_free_info *finfo)
  2556. {
  2557. int retval;
  2558. struct octeon_soft_command *sc;
  2559. struct lio *lio;
  2560. int ring_doorbell;
  2561. u32 len;
  2562. lio = finfo->lio;
  2563. sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
  2564. sizeof(struct oct_timestamp_resp));
  2565. finfo->sc = sc;
  2566. if (!sc) {
  2567. dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
  2568. return IQ_SEND_FAILED;
  2569. }
  2570. if (ndata->reqtype == REQTYPE_NORESP_NET)
  2571. ndata->reqtype = REQTYPE_RESP_NET;
  2572. else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
  2573. ndata->reqtype = REQTYPE_RESP_NET_SG;
  2574. sc->callback = handle_timestamp;
  2575. sc->callback_arg = finfo->skb;
  2576. sc->iq_no = ndata->q_no;
  2577. if (OCTEON_CN23XX_PF(oct))
  2578. len = (u32)((struct octeon_instr_ih3 *)
  2579. (&sc->cmd.cmd3.ih3))->dlengsz;
  2580. else
  2581. len = (u32)((struct octeon_instr_ih2 *)
  2582. (&sc->cmd.cmd2.ih2))->dlengsz;
  2583. ring_doorbell = 1;
  2584. retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
  2585. sc, len, ndata->reqtype);
  2586. if (retval == IQ_SEND_FAILED) {
  2587. dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
  2588. retval);
  2589. octeon_free_soft_command(oct, sc);
  2590. } else {
  2591. netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
  2592. }
  2593. return retval;
  2594. }
  2595. /** \brief Transmit networks packets to the Octeon interface
  2596. * @param skbuff skbuff struct to be passed to network layer.
  2597. * @param netdev pointer to network device
  2598. * @returns whether the packet was transmitted to the device okay or not
  2599. * (NETDEV_TX_OK or NETDEV_TX_BUSY)
  2600. */
  2601. static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
  2602. {
  2603. struct lio *lio;
  2604. struct octnet_buf_free_info *finfo;
  2605. union octnic_cmd_setup cmdsetup;
  2606. struct octnic_data_pkt ndata;
  2607. struct octeon_device *oct;
  2608. struct oct_iq_stats *stats;
  2609. struct octeon_instr_irh *irh;
  2610. union tx_info *tx_info;
  2611. int status = 0;
  2612. int q_idx = 0, iq_no = 0;
  2613. int j;
  2614. u64 dptr = 0;
  2615. u32 tag = 0;
  2616. lio = GET_LIO(netdev);
  2617. oct = lio->oct_dev;
  2618. if (netif_is_multiqueue(netdev)) {
  2619. q_idx = skb->queue_mapping;
  2620. q_idx = (q_idx % (lio->linfo.num_txpciq));
  2621. tag = q_idx;
  2622. iq_no = lio->linfo.txpciq[q_idx].s.q_no;
  2623. } else {
  2624. iq_no = lio->txq;
  2625. }
  2626. stats = &oct->instr_queue[iq_no]->stats;
  2627. /* Check for all conditions in which the current packet cannot be
  2628. * transmitted.
  2629. */
  2630. if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
  2631. (!lio->linfo.link.s.link_up) ||
  2632. (skb->len <= 0)) {
  2633. netif_info(lio, tx_err, lio->netdev,
  2634. "Transmit failed link_status : %d\n",
  2635. lio->linfo.link.s.link_up);
  2636. goto lio_xmit_failed;
  2637. }
  2638. /* Use space in skb->cb to store info used to unmap and
  2639. * free the buffers.
  2640. */
  2641. finfo = (struct octnet_buf_free_info *)skb->cb;
  2642. finfo->lio = lio;
  2643. finfo->skb = skb;
  2644. finfo->sc = NULL;
  2645. /* Prepare the attributes for the data to be passed to OSI. */
  2646. memset(&ndata, 0, sizeof(struct octnic_data_pkt));
  2647. ndata.buf = (void *)finfo;
  2648. ndata.q_no = iq_no;
  2649. if (netif_is_multiqueue(netdev)) {
  2650. if (octnet_iq_is_full(oct, ndata.q_no)) {
  2651. /* defer sending if queue is full */
  2652. netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
  2653. ndata.q_no);
  2654. stats->tx_iq_busy++;
  2655. return NETDEV_TX_BUSY;
  2656. }
  2657. } else {
  2658. if (octnet_iq_is_full(oct, lio->txq)) {
  2659. /* defer sending if queue is full */
  2660. stats->tx_iq_busy++;
  2661. netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
  2662. lio->txq);
  2663. return NETDEV_TX_BUSY;
  2664. }
  2665. }
  2666. /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
  2667. * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
  2668. */
  2669. ndata.datasize = skb->len;
  2670. cmdsetup.u64 = 0;
  2671. cmdsetup.s.iq_no = iq_no;
  2672. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2673. if (skb->encapsulation) {
  2674. cmdsetup.s.tnl_csum = 1;
  2675. stats->tx_vxlan++;
  2676. } else {
  2677. cmdsetup.s.transport_csum = 1;
  2678. }
  2679. }
  2680. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  2681. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2682. cmdsetup.s.timestamp = 1;
  2683. }
  2684. if (skb_shinfo(skb)->nr_frags == 0) {
  2685. cmdsetup.s.u.datasize = skb->len;
  2686. octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
  2687. /* Offload checksum calculation for TCP/UDP packets */
  2688. dptr = dma_map_single(&oct->pci_dev->dev,
  2689. skb->data,
  2690. skb->len,
  2691. DMA_TO_DEVICE);
  2692. if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
  2693. dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
  2694. __func__);
  2695. return NETDEV_TX_BUSY;
  2696. }
  2697. if (OCTEON_CN23XX_PF(oct))
  2698. ndata.cmd.cmd3.dptr = dptr;
  2699. else
  2700. ndata.cmd.cmd2.dptr = dptr;
  2701. finfo->dptr = dptr;
  2702. ndata.reqtype = REQTYPE_NORESP_NET;
  2703. } else {
  2704. int i, frags;
  2705. struct skb_frag_struct *frag;
  2706. struct octnic_gather *g;
  2707. spin_lock(&lio->glist_lock[q_idx]);
  2708. g = (struct octnic_gather *)
  2709. list_delete_head(&lio->glist[q_idx]);
  2710. spin_unlock(&lio->glist_lock[q_idx]);
  2711. if (!g) {
  2712. netif_info(lio, tx_err, lio->netdev,
  2713. "Transmit scatter gather: glist null!\n");
  2714. goto lio_xmit_failed;
  2715. }
  2716. cmdsetup.s.gather = 1;
  2717. cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
  2718. octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
  2719. memset(g->sg, 0, g->sg_size);
  2720. g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
  2721. skb->data,
  2722. (skb->len - skb->data_len),
  2723. DMA_TO_DEVICE);
  2724. if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
  2725. dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
  2726. __func__);
  2727. return NETDEV_TX_BUSY;
  2728. }
  2729. add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
  2730. frags = skb_shinfo(skb)->nr_frags;
  2731. i = 1;
  2732. while (frags--) {
  2733. frag = &skb_shinfo(skb)->frags[i - 1];
  2734. g->sg[(i >> 2)].ptr[(i & 3)] =
  2735. dma_map_page(&oct->pci_dev->dev,
  2736. frag->page.p,
  2737. frag->page_offset,
  2738. frag->size,
  2739. DMA_TO_DEVICE);
  2740. if (dma_mapping_error(&oct->pci_dev->dev,
  2741. g->sg[i >> 2].ptr[i & 3])) {
  2742. dma_unmap_single(&oct->pci_dev->dev,
  2743. g->sg[0].ptr[0],
  2744. skb->len - skb->data_len,
  2745. DMA_TO_DEVICE);
  2746. for (j = 1; j < i; j++) {
  2747. frag = &skb_shinfo(skb)->frags[j - 1];
  2748. dma_unmap_page(&oct->pci_dev->dev,
  2749. g->sg[j >> 2].ptr[j & 3],
  2750. frag->size,
  2751. DMA_TO_DEVICE);
  2752. }
  2753. dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
  2754. __func__);
  2755. return NETDEV_TX_BUSY;
  2756. }
  2757. add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
  2758. i++;
  2759. }
  2760. dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
  2761. g->sg_size, DMA_TO_DEVICE);
  2762. dptr = g->sg_dma_ptr;
  2763. if (OCTEON_CN23XX_PF(oct))
  2764. ndata.cmd.cmd3.dptr = dptr;
  2765. else
  2766. ndata.cmd.cmd2.dptr = dptr;
  2767. finfo->dptr = dptr;
  2768. finfo->g = g;
  2769. ndata.reqtype = REQTYPE_NORESP_NET_SG;
  2770. }
  2771. if (OCTEON_CN23XX_PF(oct)) {
  2772. irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
  2773. tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
  2774. } else {
  2775. irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
  2776. tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
  2777. }
  2778. if (skb_shinfo(skb)->gso_size) {
  2779. tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
  2780. tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
  2781. stats->tx_gso++;
  2782. }
  2783. /* HW insert VLAN tag */
  2784. if (skb_vlan_tag_present(skb)) {
  2785. irh->priority = skb_vlan_tag_get(skb) >> 13;
  2786. irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
  2787. }
  2788. if (unlikely(cmdsetup.s.timestamp))
  2789. status = send_nic_timestamp_pkt(oct, &ndata, finfo);
  2790. else
  2791. status = octnet_send_nic_data_pkt(oct, &ndata);
  2792. if (status == IQ_SEND_FAILED)
  2793. goto lio_xmit_failed;
  2794. netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
  2795. if (status == IQ_SEND_STOP)
  2796. stop_q(lio->netdev, q_idx);
  2797. netif_trans_update(netdev);
  2798. if (skb_shinfo(skb)->gso_size)
  2799. stats->tx_done += skb_shinfo(skb)->gso_segs;
  2800. else
  2801. stats->tx_done++;
  2802. stats->tx_tot_bytes += skb->len;
  2803. return NETDEV_TX_OK;
  2804. lio_xmit_failed:
  2805. stats->tx_dropped++;
  2806. netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
  2807. iq_no, stats->tx_dropped);
  2808. if (dptr)
  2809. dma_unmap_single(&oct->pci_dev->dev, dptr,
  2810. ndata.datasize, DMA_TO_DEVICE);
  2811. tx_buffer_free(skb);
  2812. return NETDEV_TX_OK;
  2813. }
  2814. /** \brief Network device Tx timeout
  2815. * @param netdev pointer to network device
  2816. */
  2817. static void liquidio_tx_timeout(struct net_device *netdev)
  2818. {
  2819. struct lio *lio;
  2820. lio = GET_LIO(netdev);
  2821. netif_info(lio, tx_err, lio->netdev,
  2822. "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
  2823. netdev->stats.tx_dropped);
  2824. netif_trans_update(netdev);
  2825. txqs_wake(netdev);
  2826. }
  2827. static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
  2828. __be16 proto __attribute__((unused)),
  2829. u16 vid)
  2830. {
  2831. struct lio *lio = GET_LIO(netdev);
  2832. struct octeon_device *oct = lio->oct_dev;
  2833. struct octnic_ctrl_pkt nctrl;
  2834. int ret = 0;
  2835. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2836. nctrl.ncmd.u64 = 0;
  2837. nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
  2838. nctrl.ncmd.s.param1 = vid;
  2839. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2840. nctrl.wait_time = 100;
  2841. nctrl.netpndev = (u64)netdev;
  2842. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2843. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2844. if (ret < 0) {
  2845. dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
  2846. ret);
  2847. }
  2848. return ret;
  2849. }
  2850. static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
  2851. __be16 proto __attribute__((unused)),
  2852. u16 vid)
  2853. {
  2854. struct lio *lio = GET_LIO(netdev);
  2855. struct octeon_device *oct = lio->oct_dev;
  2856. struct octnic_ctrl_pkt nctrl;
  2857. int ret = 0;
  2858. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2859. nctrl.ncmd.u64 = 0;
  2860. nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
  2861. nctrl.ncmd.s.param1 = vid;
  2862. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2863. nctrl.wait_time = 100;
  2864. nctrl.netpndev = (u64)netdev;
  2865. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2866. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2867. if (ret < 0) {
  2868. dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
  2869. ret);
  2870. }
  2871. return ret;
  2872. }
  2873. /** Sending command to enable/disable RX checksum offload
  2874. * @param netdev pointer to network device
  2875. * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
  2876. * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
  2877. * OCTNET_CMD_RXCSUM_DISABLE
  2878. * @returns SUCCESS or FAILURE
  2879. */
  2880. static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
  2881. u8 rx_cmd)
  2882. {
  2883. struct lio *lio = GET_LIO(netdev);
  2884. struct octeon_device *oct = lio->oct_dev;
  2885. struct octnic_ctrl_pkt nctrl;
  2886. int ret = 0;
  2887. nctrl.ncmd.u64 = 0;
  2888. nctrl.ncmd.s.cmd = command;
  2889. nctrl.ncmd.s.param1 = rx_cmd;
  2890. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2891. nctrl.wait_time = 100;
  2892. nctrl.netpndev = (u64)netdev;
  2893. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2894. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2895. if (ret < 0) {
  2896. dev_err(&oct->pci_dev->dev,
  2897. "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
  2898. ret);
  2899. }
  2900. return ret;
  2901. }
  2902. /** Sending command to add/delete VxLAN UDP port to firmware
  2903. * @param netdev pointer to network device
  2904. * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
  2905. * @param vxlan_port VxLAN port to be added or deleted
  2906. * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
  2907. * OCTNET_CMD_VXLAN_PORT_DEL
  2908. * @returns SUCCESS or FAILURE
  2909. */
  2910. static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
  2911. u16 vxlan_port, u8 vxlan_cmd_bit)
  2912. {
  2913. struct lio *lio = GET_LIO(netdev);
  2914. struct octeon_device *oct = lio->oct_dev;
  2915. struct octnic_ctrl_pkt nctrl;
  2916. int ret = 0;
  2917. nctrl.ncmd.u64 = 0;
  2918. nctrl.ncmd.s.cmd = command;
  2919. nctrl.ncmd.s.more = vxlan_cmd_bit;
  2920. nctrl.ncmd.s.param1 = vxlan_port;
  2921. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2922. nctrl.wait_time = 100;
  2923. nctrl.netpndev = (u64)netdev;
  2924. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2925. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2926. if (ret < 0) {
  2927. dev_err(&oct->pci_dev->dev,
  2928. "VxLAN port add/delete failed in core (ret:0x%x)\n",
  2929. ret);
  2930. }
  2931. return ret;
  2932. }
  2933. /** \brief Net device fix features
  2934. * @param netdev pointer to network device
  2935. * @param request features requested
  2936. * @returns updated features list
  2937. */
  2938. static netdev_features_t liquidio_fix_features(struct net_device *netdev,
  2939. netdev_features_t request)
  2940. {
  2941. struct lio *lio = netdev_priv(netdev);
  2942. if ((request & NETIF_F_RXCSUM) &&
  2943. !(lio->dev_capability & NETIF_F_RXCSUM))
  2944. request &= ~NETIF_F_RXCSUM;
  2945. if ((request & NETIF_F_HW_CSUM) &&
  2946. !(lio->dev_capability & NETIF_F_HW_CSUM))
  2947. request &= ~NETIF_F_HW_CSUM;
  2948. if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
  2949. request &= ~NETIF_F_TSO;
  2950. if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
  2951. request &= ~NETIF_F_TSO6;
  2952. if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
  2953. request &= ~NETIF_F_LRO;
  2954. /*Disable LRO if RXCSUM is off */
  2955. if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
  2956. (lio->dev_capability & NETIF_F_LRO))
  2957. request &= ~NETIF_F_LRO;
  2958. return request;
  2959. }
  2960. /** \brief Net device set features
  2961. * @param netdev pointer to network device
  2962. * @param features features to enable/disable
  2963. */
  2964. static int liquidio_set_features(struct net_device *netdev,
  2965. netdev_features_t features)
  2966. {
  2967. struct lio *lio = netdev_priv(netdev);
  2968. if (!((netdev->features ^ features) & NETIF_F_LRO))
  2969. return 0;
  2970. if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
  2971. liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
  2972. OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
  2973. else if (!(features & NETIF_F_LRO) &&
  2974. (lio->dev_capability & NETIF_F_LRO))
  2975. liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
  2976. OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
  2977. /* Sending command to firmware to enable/disable RX checksum
  2978. * offload settings using ethtool
  2979. */
  2980. if (!(netdev->features & NETIF_F_RXCSUM) &&
  2981. (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
  2982. (features & NETIF_F_RXCSUM))
  2983. liquidio_set_rxcsum_command(netdev,
  2984. OCTNET_CMD_TNL_RX_CSUM_CTL,
  2985. OCTNET_CMD_RXCSUM_ENABLE);
  2986. else if ((netdev->features & NETIF_F_RXCSUM) &&
  2987. (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
  2988. !(features & NETIF_F_RXCSUM))
  2989. liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
  2990. OCTNET_CMD_RXCSUM_DISABLE);
  2991. return 0;
  2992. }
  2993. static void liquidio_add_vxlan_port(struct net_device *netdev,
  2994. struct udp_tunnel_info *ti)
  2995. {
  2996. if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
  2997. return;
  2998. liquidio_vxlan_port_command(netdev,
  2999. OCTNET_CMD_VXLAN_PORT_CONFIG,
  3000. htons(ti->port),
  3001. OCTNET_CMD_VXLAN_PORT_ADD);
  3002. }
  3003. static void liquidio_del_vxlan_port(struct net_device *netdev,
  3004. struct udp_tunnel_info *ti)
  3005. {
  3006. if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
  3007. return;
  3008. liquidio_vxlan_port_command(netdev,
  3009. OCTNET_CMD_VXLAN_PORT_CONFIG,
  3010. htons(ti->port),
  3011. OCTNET_CMD_VXLAN_PORT_DEL);
  3012. }
  3013. static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
  3014. u8 *mac, bool is_admin_assigned)
  3015. {
  3016. struct lio *lio = GET_LIO(netdev);
  3017. struct octeon_device *oct = lio->oct_dev;
  3018. struct octnic_ctrl_pkt nctrl;
  3019. if (!is_valid_ether_addr(mac))
  3020. return -EINVAL;
  3021. if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
  3022. return -EINVAL;
  3023. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  3024. nctrl.ncmd.u64 = 0;
  3025. nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
  3026. /* vfidx is 0 based, but vf_num (param1) is 1 based */
  3027. nctrl.ncmd.s.param1 = vfidx + 1;
  3028. nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
  3029. nctrl.ncmd.s.more = 1;
  3030. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  3031. nctrl.cb_fn = 0;
  3032. nctrl.wait_time = LIO_CMD_WAIT_TM;
  3033. nctrl.udd[0] = 0;
  3034. /* The MAC Address is presented in network byte order. */
  3035. ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
  3036. oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
  3037. octnet_send_nic_ctrl_pkt(oct, &nctrl);
  3038. return 0;
  3039. }
  3040. static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
  3041. {
  3042. struct lio *lio = GET_LIO(netdev);
  3043. struct octeon_device *oct = lio->oct_dev;
  3044. int retval;
  3045. retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
  3046. if (!retval)
  3047. cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
  3048. return retval;
  3049. }
  3050. static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
  3051. u16 vlan, u8 qos, __be16 vlan_proto)
  3052. {
  3053. struct lio *lio = GET_LIO(netdev);
  3054. struct octeon_device *oct = lio->oct_dev;
  3055. struct octnic_ctrl_pkt nctrl;
  3056. u16 vlantci;
  3057. if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
  3058. return -EINVAL;
  3059. if (vlan_proto != htons(ETH_P_8021Q))
  3060. return -EPROTONOSUPPORT;
  3061. if (vlan >= VLAN_N_VID || qos > 7)
  3062. return -EINVAL;
  3063. if (vlan)
  3064. vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
  3065. else
  3066. vlantci = 0;
  3067. if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
  3068. return 0;
  3069. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  3070. if (vlan)
  3071. nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
  3072. else
  3073. nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
  3074. nctrl.ncmd.s.param1 = vlantci;
  3075. nctrl.ncmd.s.param2 =
  3076. vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
  3077. nctrl.ncmd.s.more = 0;
  3078. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  3079. nctrl.cb_fn = 0;
  3080. nctrl.wait_time = LIO_CMD_WAIT_TM;
  3081. octnet_send_nic_ctrl_pkt(oct, &nctrl);
  3082. oct->sriov_info.vf_vlantci[vfidx] = vlantci;
  3083. return 0;
  3084. }
  3085. static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
  3086. struct ifla_vf_info *ivi)
  3087. {
  3088. struct lio *lio = GET_LIO(netdev);
  3089. struct octeon_device *oct = lio->oct_dev;
  3090. u8 *macaddr;
  3091. if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
  3092. return -EINVAL;
  3093. ivi->vf = vfidx;
  3094. macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
  3095. ether_addr_copy(&ivi->mac[0], macaddr);
  3096. ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
  3097. ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
  3098. ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
  3099. return 0;
  3100. }
  3101. static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
  3102. int linkstate)
  3103. {
  3104. struct lio *lio = GET_LIO(netdev);
  3105. struct octeon_device *oct = lio->oct_dev;
  3106. struct octnic_ctrl_pkt nctrl;
  3107. if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
  3108. return -EINVAL;
  3109. if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
  3110. return 0;
  3111. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  3112. nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
  3113. nctrl.ncmd.s.param1 =
  3114. vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
  3115. nctrl.ncmd.s.param2 = linkstate;
  3116. nctrl.ncmd.s.more = 0;
  3117. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  3118. nctrl.cb_fn = 0;
  3119. nctrl.wait_time = LIO_CMD_WAIT_TM;
  3120. octnet_send_nic_ctrl_pkt(oct, &nctrl);
  3121. oct->sriov_info.vf_linkstate[vfidx] = linkstate;
  3122. return 0;
  3123. }
  3124. static const struct net_device_ops lionetdevops = {
  3125. .ndo_open = liquidio_open,
  3126. .ndo_stop = liquidio_stop,
  3127. .ndo_start_xmit = liquidio_xmit,
  3128. .ndo_get_stats = liquidio_get_stats,
  3129. .ndo_set_mac_address = liquidio_set_mac,
  3130. .ndo_set_rx_mode = liquidio_set_mcast_list,
  3131. .ndo_tx_timeout = liquidio_tx_timeout,
  3132. .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
  3133. .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
  3134. .ndo_change_mtu = liquidio_change_mtu,
  3135. .ndo_do_ioctl = liquidio_ioctl,
  3136. .ndo_fix_features = liquidio_fix_features,
  3137. .ndo_set_features = liquidio_set_features,
  3138. .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
  3139. .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
  3140. .ndo_set_vf_mac = liquidio_set_vf_mac,
  3141. .ndo_set_vf_vlan = liquidio_set_vf_vlan,
  3142. .ndo_get_vf_config = liquidio_get_vf_config,
  3143. .ndo_set_vf_link_state = liquidio_set_vf_link_state,
  3144. .ndo_select_queue = select_q
  3145. };
  3146. /** \brief Entry point for the liquidio module
  3147. */
  3148. static int __init liquidio_init(void)
  3149. {
  3150. int i;
  3151. struct handshake *hs;
  3152. init_completion(&first_stage);
  3153. octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
  3154. if (liquidio_init_pci())
  3155. return -EINVAL;
  3156. wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
  3157. for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
  3158. hs = &handshake[i];
  3159. if (hs->pci_dev) {
  3160. wait_for_completion(&hs->init);
  3161. if (!hs->init_ok) {
  3162. /* init handshake failed */
  3163. dev_err(&hs->pci_dev->dev,
  3164. "Failed to init device\n");
  3165. liquidio_deinit_pci();
  3166. return -EIO;
  3167. }
  3168. }
  3169. }
  3170. for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
  3171. hs = &handshake[i];
  3172. if (hs->pci_dev) {
  3173. wait_for_completion_timeout(&hs->started,
  3174. msecs_to_jiffies(30000));
  3175. if (!hs->started_ok) {
  3176. /* starter handshake failed */
  3177. dev_err(&hs->pci_dev->dev,
  3178. "Firmware failed to start\n");
  3179. liquidio_deinit_pci();
  3180. return -EIO;
  3181. }
  3182. }
  3183. }
  3184. return 0;
  3185. }
  3186. static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
  3187. {
  3188. struct octeon_device *oct = (struct octeon_device *)buf;
  3189. struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
  3190. int gmxport = 0;
  3191. union oct_link_status *ls;
  3192. int i;
  3193. if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
  3194. dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
  3195. recv_pkt->buffer_size[0],
  3196. recv_pkt->rh.r_nic_info.gmxport);
  3197. goto nic_info_err;
  3198. }
  3199. gmxport = recv_pkt->rh.r_nic_info.gmxport;
  3200. ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
  3201. octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
  3202. for (i = 0; i < oct->ifcount; i++) {
  3203. if (oct->props[i].gmxport == gmxport) {
  3204. update_link_status(oct->props[i].netdev, ls);
  3205. break;
  3206. }
  3207. }
  3208. nic_info_err:
  3209. for (i = 0; i < recv_pkt->buffer_count; i++)
  3210. recv_buffer_free(recv_pkt->buffer_ptr[i]);
  3211. octeon_free_recv_info(recv_info);
  3212. return 0;
  3213. }
  3214. /**
  3215. * \brief Setup network interfaces
  3216. * @param octeon_dev octeon device
  3217. *
  3218. * Called during init time for each device. It assumes the NIC
  3219. * is already up and running. The link information for each
  3220. * interface is passed in link_info.
  3221. */
  3222. static int setup_nic_devices(struct octeon_device *octeon_dev)
  3223. {
  3224. struct lio *lio = NULL;
  3225. struct net_device *netdev;
  3226. u8 mac[6], i, j;
  3227. struct octeon_soft_command *sc;
  3228. struct liquidio_if_cfg_context *ctx;
  3229. struct liquidio_if_cfg_resp *resp;
  3230. struct octdev_props *props;
  3231. int retval, num_iqueues, num_oqueues;
  3232. union oct_nic_if_cfg if_cfg;
  3233. unsigned int base_queue;
  3234. unsigned int gmx_port_id;
  3235. u32 resp_size, ctx_size, data_size;
  3236. u32 ifidx_or_pfnum;
  3237. struct lio_version *vdata;
  3238. /* This is to handle link status changes */
  3239. octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
  3240. OPCODE_NIC_INFO,
  3241. lio_nic_info, octeon_dev);
  3242. /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
  3243. * They are handled directly.
  3244. */
  3245. octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
  3246. free_netbuf);
  3247. octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
  3248. free_netsgbuf);
  3249. octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
  3250. free_netsgbuf_with_resp);
  3251. for (i = 0; i < octeon_dev->ifcount; i++) {
  3252. resp_size = sizeof(struct liquidio_if_cfg_resp);
  3253. ctx_size = sizeof(struct liquidio_if_cfg_context);
  3254. data_size = sizeof(struct lio_version);
  3255. sc = (struct octeon_soft_command *)
  3256. octeon_alloc_soft_command(octeon_dev, data_size,
  3257. resp_size, ctx_size);
  3258. resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
  3259. ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
  3260. vdata = (struct lio_version *)sc->virtdptr;
  3261. *((u64 *)vdata) = 0;
  3262. vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
  3263. vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
  3264. vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
  3265. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3266. num_iqueues = octeon_dev->sriov_info.num_pf_rings;
  3267. num_oqueues = octeon_dev->sriov_info.num_pf_rings;
  3268. base_queue = octeon_dev->sriov_info.pf_srn;
  3269. gmx_port_id = octeon_dev->pf_num;
  3270. ifidx_or_pfnum = octeon_dev->pf_num;
  3271. } else {
  3272. num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
  3273. octeon_get_conf(octeon_dev), i);
  3274. num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
  3275. octeon_get_conf(octeon_dev), i);
  3276. base_queue = CFG_GET_BASE_QUE_NIC_IF(
  3277. octeon_get_conf(octeon_dev), i);
  3278. gmx_port_id = CFG_GET_GMXID_NIC_IF(
  3279. octeon_get_conf(octeon_dev), i);
  3280. ifidx_or_pfnum = i;
  3281. }
  3282. dev_dbg(&octeon_dev->pci_dev->dev,
  3283. "requesting config for interface %d, iqs %d, oqs %d\n",
  3284. ifidx_or_pfnum, num_iqueues, num_oqueues);
  3285. WRITE_ONCE(ctx->cond, 0);
  3286. ctx->octeon_id = lio_get_device_id(octeon_dev);
  3287. init_waitqueue_head(&ctx->wc);
  3288. if_cfg.u64 = 0;
  3289. if_cfg.s.num_iqueues = num_iqueues;
  3290. if_cfg.s.num_oqueues = num_oqueues;
  3291. if_cfg.s.base_queue = base_queue;
  3292. if_cfg.s.gmx_port_id = gmx_port_id;
  3293. sc->iq_no = 0;
  3294. octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
  3295. OPCODE_NIC_IF_CFG, 0,
  3296. if_cfg.u64, 0);
  3297. sc->callback = if_cfg_callback;
  3298. sc->callback_arg = sc;
  3299. sc->wait_time = 3000;
  3300. retval = octeon_send_soft_command(octeon_dev, sc);
  3301. if (retval == IQ_SEND_FAILED) {
  3302. dev_err(&octeon_dev->pci_dev->dev,
  3303. "iq/oq config failed status: %x\n",
  3304. retval);
  3305. /* Soft instr is freed by driver in case of failure. */
  3306. goto setup_nic_dev_fail;
  3307. }
  3308. /* Sleep on a wait queue till the cond flag indicates that the
  3309. * response arrived or timed-out.
  3310. */
  3311. if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
  3312. dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
  3313. goto setup_nic_wait_intr;
  3314. }
  3315. retval = resp->status;
  3316. if (retval) {
  3317. dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
  3318. goto setup_nic_dev_fail;
  3319. }
  3320. octeon_swap_8B_data((u64 *)(&resp->cfg_info),
  3321. (sizeof(struct liquidio_if_cfg_info)) >> 3);
  3322. num_iqueues = hweight64(resp->cfg_info.iqmask);
  3323. num_oqueues = hweight64(resp->cfg_info.oqmask);
  3324. if (!(num_iqueues) || !(num_oqueues)) {
  3325. dev_err(&octeon_dev->pci_dev->dev,
  3326. "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
  3327. resp->cfg_info.iqmask,
  3328. resp->cfg_info.oqmask);
  3329. goto setup_nic_dev_fail;
  3330. }
  3331. dev_dbg(&octeon_dev->pci_dev->dev,
  3332. "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
  3333. i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
  3334. num_iqueues, num_oqueues);
  3335. netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
  3336. if (!netdev) {
  3337. dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
  3338. goto setup_nic_dev_fail;
  3339. }
  3340. SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
  3341. /* Associate the routines that will handle different
  3342. * netdev tasks.
  3343. */
  3344. netdev->netdev_ops = &lionetdevops;
  3345. lio = GET_LIO(netdev);
  3346. memset(lio, 0, sizeof(struct lio));
  3347. lio->ifidx = ifidx_or_pfnum;
  3348. props = &octeon_dev->props[i];
  3349. props->gmxport = resp->cfg_info.linfo.gmxport;
  3350. props->netdev = netdev;
  3351. lio->linfo.num_rxpciq = num_oqueues;
  3352. lio->linfo.num_txpciq = num_iqueues;
  3353. for (j = 0; j < num_oqueues; j++) {
  3354. lio->linfo.rxpciq[j].u64 =
  3355. resp->cfg_info.linfo.rxpciq[j].u64;
  3356. }
  3357. for (j = 0; j < num_iqueues; j++) {
  3358. lio->linfo.txpciq[j].u64 =
  3359. resp->cfg_info.linfo.txpciq[j].u64;
  3360. }
  3361. lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
  3362. lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
  3363. lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
  3364. lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  3365. if (OCTEON_CN23XX_PF(octeon_dev) ||
  3366. OCTEON_CN6XXX(octeon_dev)) {
  3367. lio->dev_capability = NETIF_F_HIGHDMA
  3368. | NETIF_F_IP_CSUM
  3369. | NETIF_F_IPV6_CSUM
  3370. | NETIF_F_SG | NETIF_F_RXCSUM
  3371. | NETIF_F_GRO
  3372. | NETIF_F_TSO | NETIF_F_TSO6
  3373. | NETIF_F_LRO;
  3374. }
  3375. netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
  3376. /* Copy of transmit encapsulation capabilities:
  3377. * TSO, TSO6, Checksums for this device
  3378. */
  3379. lio->enc_dev_capability = NETIF_F_IP_CSUM
  3380. | NETIF_F_IPV6_CSUM
  3381. | NETIF_F_GSO_UDP_TUNNEL
  3382. | NETIF_F_HW_CSUM | NETIF_F_SG
  3383. | NETIF_F_RXCSUM
  3384. | NETIF_F_TSO | NETIF_F_TSO6
  3385. | NETIF_F_LRO;
  3386. netdev->hw_enc_features = (lio->enc_dev_capability &
  3387. ~NETIF_F_LRO);
  3388. lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
  3389. netdev->vlan_features = lio->dev_capability;
  3390. /* Add any unchangeable hw features */
  3391. lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
  3392. NETIF_F_HW_VLAN_CTAG_RX |
  3393. NETIF_F_HW_VLAN_CTAG_TX;
  3394. netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
  3395. netdev->hw_features = lio->dev_capability;
  3396. /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
  3397. netdev->hw_features = netdev->hw_features &
  3398. ~NETIF_F_HW_VLAN_CTAG_RX;
  3399. /* MTU range: 68 - 16000 */
  3400. netdev->min_mtu = LIO_MIN_MTU_SIZE;
  3401. netdev->max_mtu = LIO_MAX_MTU_SIZE;
  3402. /* Point to the properties for octeon device to which this
  3403. * interface belongs.
  3404. */
  3405. lio->oct_dev = octeon_dev;
  3406. lio->octprops = props;
  3407. lio->netdev = netdev;
  3408. dev_dbg(&octeon_dev->pci_dev->dev,
  3409. "if%d gmx: %d hw_addr: 0x%llx\n", i,
  3410. lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
  3411. for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
  3412. u8 vfmac[ETH_ALEN];
  3413. random_ether_addr(&vfmac[0]);
  3414. if (__liquidio_set_vf_mac(netdev, j,
  3415. &vfmac[0], false)) {
  3416. dev_err(&octeon_dev->pci_dev->dev,
  3417. "Error setting VF%d MAC address\n",
  3418. j);
  3419. goto setup_nic_dev_fail;
  3420. }
  3421. }
  3422. /* 64-bit swap required on LE machines */
  3423. octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
  3424. for (j = 0; j < 6; j++)
  3425. mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
  3426. /* Copy MAC Address to OS network device structure */
  3427. ether_addr_copy(netdev->dev_addr, mac);
  3428. /* By default all interfaces on a single Octeon uses the same
  3429. * tx and rx queues
  3430. */
  3431. lio->txq = lio->linfo.txpciq[0].s.q_no;
  3432. lio->rxq = lio->linfo.rxpciq[0].s.q_no;
  3433. if (setup_io_queues(octeon_dev, i)) {
  3434. dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
  3435. goto setup_nic_dev_fail;
  3436. }
  3437. ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
  3438. lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
  3439. lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
  3440. if (setup_glists(octeon_dev, lio, num_iqueues)) {
  3441. dev_err(&octeon_dev->pci_dev->dev,
  3442. "Gather list allocation failed\n");
  3443. goto setup_nic_dev_fail;
  3444. }
  3445. /* Register ethtool support */
  3446. liquidio_set_ethtool_ops(netdev);
  3447. if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
  3448. octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
  3449. else
  3450. octeon_dev->priv_flags = 0x0;
  3451. if (netdev->features & NETIF_F_LRO)
  3452. liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
  3453. OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
  3454. liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
  3455. if ((debug != -1) && (debug & NETIF_MSG_HW))
  3456. liquidio_set_feature(netdev,
  3457. OCTNET_CMD_VERBOSE_ENABLE, 0);
  3458. if (setup_link_status_change_wq(netdev))
  3459. goto setup_nic_dev_fail;
  3460. /* Register the network device with the OS */
  3461. if (register_netdev(netdev)) {
  3462. dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
  3463. goto setup_nic_dev_fail;
  3464. }
  3465. dev_dbg(&octeon_dev->pci_dev->dev,
  3466. "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
  3467. i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  3468. netif_carrier_off(netdev);
  3469. lio->link_changes++;
  3470. ifstate_set(lio, LIO_IFSTATE_REGISTERED);
  3471. /* Sending command to firmware to enable Rx checksum offload
  3472. * by default at the time of setup of Liquidio driver for
  3473. * this device
  3474. */
  3475. liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
  3476. OCTNET_CMD_RXCSUM_ENABLE);
  3477. liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
  3478. OCTNET_CMD_TXCSUM_ENABLE);
  3479. dev_dbg(&octeon_dev->pci_dev->dev,
  3480. "NIC ifidx:%d Setup successful\n", i);
  3481. octeon_free_soft_command(octeon_dev, sc);
  3482. }
  3483. return 0;
  3484. setup_nic_dev_fail:
  3485. octeon_free_soft_command(octeon_dev, sc);
  3486. setup_nic_wait_intr:
  3487. while (i--) {
  3488. dev_err(&octeon_dev->pci_dev->dev,
  3489. "NIC ifidx:%d Setup failed\n", i);
  3490. liquidio_destroy_nic_device(octeon_dev, i);
  3491. }
  3492. return -ENODEV;
  3493. }
  3494. #ifdef CONFIG_PCI_IOV
  3495. static int octeon_enable_sriov(struct octeon_device *oct)
  3496. {
  3497. unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
  3498. struct pci_dev *vfdev;
  3499. int err;
  3500. u32 u;
  3501. if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
  3502. err = pci_enable_sriov(oct->pci_dev,
  3503. oct->sriov_info.num_vfs_alloced);
  3504. if (err) {
  3505. dev_err(&oct->pci_dev->dev,
  3506. "OCTEON: Failed to enable PCI sriov: %d\n",
  3507. err);
  3508. oct->sriov_info.num_vfs_alloced = 0;
  3509. return err;
  3510. }
  3511. oct->sriov_info.sriov_enabled = 1;
  3512. /* init lookup table that maps DPI ring number to VF pci_dev
  3513. * struct pointer
  3514. */
  3515. u = 0;
  3516. vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
  3517. OCTEON_CN23XX_VF_VID, NULL);
  3518. while (vfdev) {
  3519. if (vfdev->is_virtfn &&
  3520. (vfdev->physfn == oct->pci_dev)) {
  3521. oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
  3522. vfdev;
  3523. u += oct->sriov_info.rings_per_vf;
  3524. }
  3525. vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
  3526. OCTEON_CN23XX_VF_VID, vfdev);
  3527. }
  3528. }
  3529. return num_vfs_alloced;
  3530. }
  3531. static int lio_pci_sriov_disable(struct octeon_device *oct)
  3532. {
  3533. int u;
  3534. if (pci_vfs_assigned(oct->pci_dev)) {
  3535. dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
  3536. return -EPERM;
  3537. }
  3538. pci_disable_sriov(oct->pci_dev);
  3539. u = 0;
  3540. while (u < MAX_POSSIBLE_VFS) {
  3541. oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
  3542. u += oct->sriov_info.rings_per_vf;
  3543. }
  3544. oct->sriov_info.num_vfs_alloced = 0;
  3545. dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
  3546. oct->pf_num);
  3547. return 0;
  3548. }
  3549. static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
  3550. {
  3551. struct octeon_device *oct = pci_get_drvdata(dev);
  3552. int ret = 0;
  3553. if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
  3554. (oct->sriov_info.sriov_enabled)) {
  3555. dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
  3556. oct->pf_num, num_vfs);
  3557. return 0;
  3558. }
  3559. if (!num_vfs) {
  3560. ret = lio_pci_sriov_disable(oct);
  3561. } else if (num_vfs > oct->sriov_info.max_vfs) {
  3562. dev_err(&oct->pci_dev->dev,
  3563. "OCTEON: Max allowed VFs:%d user requested:%d",
  3564. oct->sriov_info.max_vfs, num_vfs);
  3565. ret = -EPERM;
  3566. } else {
  3567. oct->sriov_info.num_vfs_alloced = num_vfs;
  3568. ret = octeon_enable_sriov(oct);
  3569. dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
  3570. oct->pf_num, num_vfs);
  3571. }
  3572. return ret;
  3573. }
  3574. #endif
  3575. /**
  3576. * \brief initialize the NIC
  3577. * @param oct octeon device
  3578. *
  3579. * This initialization routine is called once the Octeon device application is
  3580. * up and running
  3581. */
  3582. static int liquidio_init_nic_module(struct octeon_device *oct)
  3583. {
  3584. struct oct_intrmod_cfg *intrmod_cfg;
  3585. int i, retval = 0;
  3586. int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
  3587. dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
  3588. /* only default iq and oq were initialized
  3589. * initialize the rest as well
  3590. */
  3591. /* run port_config command for each port */
  3592. oct->ifcount = num_nic_ports;
  3593. memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
  3594. for (i = 0; i < MAX_OCTEON_LINKS; i++)
  3595. oct->props[i].gmxport = -1;
  3596. retval = setup_nic_devices(oct);
  3597. if (retval) {
  3598. dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
  3599. goto octnet_init_failure;
  3600. }
  3601. liquidio_ptp_init(oct);
  3602. /* Initialize interrupt moderation params */
  3603. intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
  3604. intrmod_cfg->rx_enable = 1;
  3605. intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
  3606. intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
  3607. intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
  3608. intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
  3609. intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
  3610. intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
  3611. intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
  3612. intrmod_cfg->tx_enable = 1;
  3613. intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
  3614. intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
  3615. intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
  3616. intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
  3617. intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
  3618. dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
  3619. return retval;
  3620. octnet_init_failure:
  3621. oct->ifcount = 0;
  3622. return retval;
  3623. }
  3624. /**
  3625. * \brief starter callback that invokes the remaining initialization work after
  3626. * the NIC is up and running.
  3627. * @param octptr work struct work_struct
  3628. */
  3629. static void nic_starter(struct work_struct *work)
  3630. {
  3631. struct octeon_device *oct;
  3632. struct cavium_wk *wk = (struct cavium_wk *)work;
  3633. oct = (struct octeon_device *)wk->ctxptr;
  3634. if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
  3635. return;
  3636. /* If the status of the device is CORE_OK, the core
  3637. * application has reported its application type. Call
  3638. * any registered handlers now and move to the RUNNING
  3639. * state.
  3640. */
  3641. if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
  3642. schedule_delayed_work(&oct->nic_poll_work.work,
  3643. LIQUIDIO_STARTER_POLL_INTERVAL_MS);
  3644. return;
  3645. }
  3646. atomic_set(&oct->status, OCT_DEV_RUNNING);
  3647. if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
  3648. dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
  3649. if (liquidio_init_nic_module(oct))
  3650. dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
  3651. else
  3652. handshake[oct->octeon_id].started_ok = 1;
  3653. } else {
  3654. dev_err(&oct->pci_dev->dev,
  3655. "Unexpected application running on NIC (%d). Check firmware.\n",
  3656. oct->app_mode);
  3657. }
  3658. complete(&handshake[oct->octeon_id].started);
  3659. }
  3660. static int
  3661. octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
  3662. {
  3663. struct octeon_device *oct = (struct octeon_device *)buf;
  3664. struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
  3665. int i, notice, vf_idx;
  3666. u64 *data, vf_num;
  3667. notice = recv_pkt->rh.r.ossp;
  3668. data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
  3669. /* the first 64-bit word of data is the vf_num */
  3670. vf_num = data[0];
  3671. octeon_swap_8B_data(&vf_num, 1);
  3672. vf_idx = (int)vf_num - 1;
  3673. if (notice == VF_DRV_LOADED) {
  3674. if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
  3675. oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
  3676. dev_info(&oct->pci_dev->dev,
  3677. "driver for VF%d was loaded\n", vf_idx);
  3678. try_module_get(THIS_MODULE);
  3679. }
  3680. } else if (notice == VF_DRV_REMOVED) {
  3681. if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
  3682. oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
  3683. dev_info(&oct->pci_dev->dev,
  3684. "driver for VF%d was removed\n", vf_idx);
  3685. module_put(THIS_MODULE);
  3686. }
  3687. } else if (notice == VF_DRV_MACADDR_CHANGED) {
  3688. u8 *b = (u8 *)&data[1];
  3689. oct->sriov_info.vf_macaddr[vf_idx] = data[1];
  3690. dev_info(&oct->pci_dev->dev,
  3691. "VF driver changed VF%d's MAC address to %pM\n",
  3692. vf_idx, b + 2);
  3693. }
  3694. for (i = 0; i < recv_pkt->buffer_count; i++)
  3695. recv_buffer_free(recv_pkt->buffer_ptr[i]);
  3696. octeon_free_recv_info(recv_info);
  3697. return 0;
  3698. }
  3699. /**
  3700. * \brief Device initialization for each Octeon device that is probed
  3701. * @param octeon_dev octeon device
  3702. */
  3703. static int octeon_device_init(struct octeon_device *octeon_dev)
  3704. {
  3705. int j, ret;
  3706. int fw_loaded = 0;
  3707. char bootcmd[] = "\n";
  3708. struct octeon_device_priv *oct_priv =
  3709. (struct octeon_device_priv *)octeon_dev->priv;
  3710. atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
  3711. /* Enable access to the octeon device and make its DMA capability
  3712. * known to the OS.
  3713. */
  3714. if (octeon_pci_os_setup(octeon_dev))
  3715. return 1;
  3716. atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
  3717. /* Identify the Octeon type and map the BAR address space. */
  3718. if (octeon_chip_specific_setup(octeon_dev)) {
  3719. dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
  3720. return 1;
  3721. }
  3722. atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
  3723. octeon_dev->app_mode = CVM_DRV_INVALID_APP;
  3724. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3725. if (!cn23xx_fw_loaded(octeon_dev)) {
  3726. fw_loaded = 0;
  3727. /* Do a soft reset of the Octeon device. */
  3728. if (octeon_dev->fn_list.soft_reset(octeon_dev))
  3729. return 1;
  3730. /* things might have changed */
  3731. if (!cn23xx_fw_loaded(octeon_dev))
  3732. fw_loaded = 0;
  3733. else
  3734. fw_loaded = 1;
  3735. } else {
  3736. fw_loaded = 1;
  3737. }
  3738. } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
  3739. return 1;
  3740. }
  3741. /* Initialize the dispatch mechanism used to push packets arriving on
  3742. * Octeon Output queues.
  3743. */
  3744. if (octeon_init_dispatch_list(octeon_dev))
  3745. return 1;
  3746. octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
  3747. OPCODE_NIC_CORE_DRV_ACTIVE,
  3748. octeon_core_drv_init,
  3749. octeon_dev);
  3750. octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
  3751. OPCODE_NIC_VF_DRV_NOTICE,
  3752. octeon_recv_vf_drv_notice, octeon_dev);
  3753. INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
  3754. octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
  3755. schedule_delayed_work(&octeon_dev->nic_poll_work.work,
  3756. LIQUIDIO_STARTER_POLL_INTERVAL_MS);
  3757. atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
  3758. if (octeon_set_io_queues_off(octeon_dev)) {
  3759. dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
  3760. return 1;
  3761. }
  3762. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3763. ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
  3764. if (ret) {
  3765. dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
  3766. return ret;
  3767. }
  3768. }
  3769. /* Initialize soft command buffer pool
  3770. */
  3771. if (octeon_setup_sc_buffer_pool(octeon_dev)) {
  3772. dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
  3773. return 1;
  3774. }
  3775. atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
  3776. /* Setup the data structures that manage this Octeon's Input queues. */
  3777. if (octeon_setup_instr_queues(octeon_dev)) {
  3778. dev_err(&octeon_dev->pci_dev->dev,
  3779. "instruction queue initialization failed\n");
  3780. return 1;
  3781. }
  3782. atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
  3783. /* Initialize lists to manage the requests of different types that
  3784. * arrive from user & kernel applications for this octeon device.
  3785. */
  3786. if (octeon_setup_response_list(octeon_dev)) {
  3787. dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
  3788. return 1;
  3789. }
  3790. atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
  3791. if (octeon_setup_output_queues(octeon_dev)) {
  3792. dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
  3793. return 1;
  3794. }
  3795. atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
  3796. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3797. if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
  3798. dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
  3799. return 1;
  3800. }
  3801. atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
  3802. if (octeon_allocate_ioq_vector(octeon_dev)) {
  3803. dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
  3804. return 1;
  3805. }
  3806. atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
  3807. } else {
  3808. /* The input and output queue registers were setup earlier (the
  3809. * queues were not enabled). Any additional registers
  3810. * that need to be programmed should be done now.
  3811. */
  3812. ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
  3813. if (ret) {
  3814. dev_err(&octeon_dev->pci_dev->dev,
  3815. "Failed to configure device registers\n");
  3816. return ret;
  3817. }
  3818. }
  3819. /* Initialize the tasklet that handles output queue packet processing.*/
  3820. dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
  3821. tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
  3822. (unsigned long)octeon_dev);
  3823. /* Setup the interrupt handler and record the INT SUM register address
  3824. */
  3825. if (octeon_setup_interrupt(octeon_dev))
  3826. return 1;
  3827. /* Enable Octeon device interrupts */
  3828. octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
  3829. atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
  3830. /* Enable the input and output queues for this Octeon device */
  3831. ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
  3832. if (ret) {
  3833. dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
  3834. return ret;
  3835. }
  3836. atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
  3837. if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
  3838. dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
  3839. if (!ddr_timeout) {
  3840. dev_info(&octeon_dev->pci_dev->dev,
  3841. "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
  3842. }
  3843. schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
  3844. /* Wait for the octeon to initialize DDR after the soft-reset.*/
  3845. while (!ddr_timeout) {
  3846. set_current_state(TASK_INTERRUPTIBLE);
  3847. if (schedule_timeout(HZ / 10)) {
  3848. /* user probably pressed Control-C */
  3849. return 1;
  3850. }
  3851. }
  3852. ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
  3853. if (ret) {
  3854. dev_err(&octeon_dev->pci_dev->dev,
  3855. "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
  3856. ret);
  3857. return 1;
  3858. }
  3859. if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
  3860. dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
  3861. return 1;
  3862. }
  3863. /* Divert uboot to take commands from host instead. */
  3864. ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
  3865. dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
  3866. ret = octeon_init_consoles(octeon_dev);
  3867. if (ret) {
  3868. dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
  3869. return 1;
  3870. }
  3871. ret = octeon_add_console(octeon_dev, 0);
  3872. if (ret) {
  3873. dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
  3874. return 1;
  3875. }
  3876. atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
  3877. dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
  3878. ret = load_firmware(octeon_dev);
  3879. if (ret) {
  3880. dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
  3881. return 1;
  3882. }
  3883. /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
  3884. * loaded
  3885. */
  3886. if (OCTEON_CN23XX_PF(octeon_dev))
  3887. octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
  3888. 2ULL);
  3889. }
  3890. handshake[octeon_dev->octeon_id].init_ok = 1;
  3891. complete(&handshake[octeon_dev->octeon_id].init);
  3892. atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
  3893. /* Send Credit for Octeon Output queues. Credits are always sent after
  3894. * the output queue is enabled.
  3895. */
  3896. for (j = 0; j < octeon_dev->num_oqs; j++)
  3897. writel(octeon_dev->droq[j]->max_count,
  3898. octeon_dev->droq[j]->pkts_credit_reg);
  3899. /* Packets can start arriving on the output queues from this point. */
  3900. return 0;
  3901. }
  3902. /**
  3903. * \brief Exits the module
  3904. */
  3905. static void __exit liquidio_exit(void)
  3906. {
  3907. liquidio_deinit_pci();
  3908. pr_info("LiquidIO network module is now unloaded\n");
  3909. }
  3910. module_init(liquidio_init);
  3911. module_exit(liquidio_exit);