ehea_main.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622
  1. /*
  2. * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/device.h>
  30. #include <linux/in.h>
  31. #include <linux/ip.h>
  32. #include <linux/tcp.h>
  33. #include <linux/udp.h>
  34. #include <linux/if.h>
  35. #include <linux/list.h>
  36. #include <linux/slab.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/notifier.h>
  39. #include <linux/reboot.h>
  40. #include <linux/memory.h>
  41. #include <asm/kexec.h>
  42. #include <linux/mutex.h>
  43. #include <linux/prefetch.h>
  44. #include <net/ip.h>
  45. #include "ehea.h"
  46. #include "ehea_qmr.h"
  47. #include "ehea_phyp.h"
  48. MODULE_LICENSE("GPL");
  49. MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  50. MODULE_DESCRIPTION("IBM eServer HEA Driver");
  51. MODULE_VERSION(DRV_VERSION);
  52. static int msg_level = -1;
  53. static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  54. static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  55. static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  56. static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  57. static int use_mcs = 1;
  58. static int prop_carrier_state;
  59. module_param(msg_level, int, 0);
  60. module_param(rq1_entries, int, 0);
  61. module_param(rq2_entries, int, 0);
  62. module_param(rq3_entries, int, 0);
  63. module_param(sq_entries, int, 0);
  64. module_param(prop_carrier_state, int, 0);
  65. module_param(use_mcs, int, 0);
  66. MODULE_PARM_DESC(msg_level, "msg_level");
  67. MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
  68. "port to stack. 1:yes, 0:no. Default = 0 ");
  69. MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  70. "[2^x - 1], x = [7..14]. Default = "
  71. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  72. MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  73. "[2^x - 1], x = [7..14]. Default = "
  74. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  75. MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  76. "[2^x - 1], x = [7..14]. Default = "
  77. __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  78. MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
  79. "[2^x - 1], x = [7..14]. Default = "
  80. __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  81. MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
  82. "Default = 1");
  83. static int port_name_cnt;
  84. static LIST_HEAD(adapter_list);
  85. static unsigned long ehea_driver_flags;
  86. static DEFINE_MUTEX(dlpar_mem_lock);
  87. static struct ehea_fw_handle_array ehea_fw_handles;
  88. static struct ehea_bcmc_reg_array ehea_bcmc_regs;
  89. static int ehea_probe_adapter(struct platform_device *dev);
  90. static int ehea_remove(struct platform_device *dev);
  91. static const struct of_device_id ehea_module_device_table[] = {
  92. {
  93. .name = "lhea",
  94. .compatible = "IBM,lhea",
  95. },
  96. {
  97. .type = "network",
  98. .compatible = "IBM,lhea-ethernet",
  99. },
  100. {},
  101. };
  102. MODULE_DEVICE_TABLE(of, ehea_module_device_table);
  103. static const struct of_device_id ehea_device_table[] = {
  104. {
  105. .name = "lhea",
  106. .compatible = "IBM,lhea",
  107. },
  108. {},
  109. };
  110. static struct platform_driver ehea_driver = {
  111. .driver = {
  112. .name = "ehea",
  113. .owner = THIS_MODULE,
  114. .of_match_table = ehea_device_table,
  115. },
  116. .probe = ehea_probe_adapter,
  117. .remove = ehea_remove,
  118. };
  119. void ehea_dump(void *adr, int len, char *msg)
  120. {
  121. int x;
  122. unsigned char *deb = adr;
  123. for (x = 0; x < len; x += 16) {
  124. pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
  125. msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
  126. deb += 16;
  127. }
  128. }
  129. static void ehea_schedule_port_reset(struct ehea_port *port)
  130. {
  131. if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
  132. schedule_work(&port->reset_task);
  133. }
  134. static void ehea_update_firmware_handles(void)
  135. {
  136. struct ehea_fw_handle_entry *arr = NULL;
  137. struct ehea_adapter *adapter;
  138. int num_adapters = 0;
  139. int num_ports = 0;
  140. int num_portres = 0;
  141. int i = 0;
  142. int num_fw_handles, k, l;
  143. /* Determine number of handles */
  144. mutex_lock(&ehea_fw_handles.lock);
  145. list_for_each_entry(adapter, &adapter_list, list) {
  146. num_adapters++;
  147. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  148. struct ehea_port *port = adapter->port[k];
  149. if (!port || (port->state != EHEA_PORT_UP))
  150. continue;
  151. num_ports++;
  152. num_portres += port->num_def_qps;
  153. }
  154. }
  155. num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
  156. num_ports * EHEA_NUM_PORT_FW_HANDLES +
  157. num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
  158. if (num_fw_handles) {
  159. arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
  160. if (!arr)
  161. goto out; /* Keep the existing array */
  162. } else
  163. goto out_update;
  164. list_for_each_entry(adapter, &adapter_list, list) {
  165. if (num_adapters == 0)
  166. break;
  167. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  168. struct ehea_port *port = adapter->port[k];
  169. if (!port || (port->state != EHEA_PORT_UP) ||
  170. (num_ports == 0))
  171. continue;
  172. for (l = 0; l < port->num_def_qps; l++) {
  173. struct ehea_port_res *pr = &port->port_res[l];
  174. arr[i].adh = adapter->handle;
  175. arr[i++].fwh = pr->qp->fw_handle;
  176. arr[i].adh = adapter->handle;
  177. arr[i++].fwh = pr->send_cq->fw_handle;
  178. arr[i].adh = adapter->handle;
  179. arr[i++].fwh = pr->recv_cq->fw_handle;
  180. arr[i].adh = adapter->handle;
  181. arr[i++].fwh = pr->eq->fw_handle;
  182. arr[i].adh = adapter->handle;
  183. arr[i++].fwh = pr->send_mr.handle;
  184. arr[i].adh = adapter->handle;
  185. arr[i++].fwh = pr->recv_mr.handle;
  186. }
  187. arr[i].adh = adapter->handle;
  188. arr[i++].fwh = port->qp_eq->fw_handle;
  189. num_ports--;
  190. }
  191. arr[i].adh = adapter->handle;
  192. arr[i++].fwh = adapter->neq->fw_handle;
  193. if (adapter->mr.handle) {
  194. arr[i].adh = adapter->handle;
  195. arr[i++].fwh = adapter->mr.handle;
  196. }
  197. num_adapters--;
  198. }
  199. out_update:
  200. kfree(ehea_fw_handles.arr);
  201. ehea_fw_handles.arr = arr;
  202. ehea_fw_handles.num_entries = i;
  203. out:
  204. mutex_unlock(&ehea_fw_handles.lock);
  205. }
  206. static void ehea_update_bcmc_registrations(void)
  207. {
  208. unsigned long flags;
  209. struct ehea_bcmc_reg_entry *arr = NULL;
  210. struct ehea_adapter *adapter;
  211. struct ehea_mc_list *mc_entry;
  212. int num_registrations = 0;
  213. int i = 0;
  214. int k;
  215. spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
  216. /* Determine number of registrations */
  217. list_for_each_entry(adapter, &adapter_list, list)
  218. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  219. struct ehea_port *port = adapter->port[k];
  220. if (!port || (port->state != EHEA_PORT_UP))
  221. continue;
  222. num_registrations += 2; /* Broadcast registrations */
  223. list_for_each_entry(mc_entry, &port->mc_list->list,list)
  224. num_registrations += 2;
  225. }
  226. if (num_registrations) {
  227. arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
  228. if (!arr)
  229. goto out; /* Keep the existing array */
  230. } else
  231. goto out_update;
  232. list_for_each_entry(adapter, &adapter_list, list) {
  233. for (k = 0; k < EHEA_MAX_PORTS; k++) {
  234. struct ehea_port *port = adapter->port[k];
  235. if (!port || (port->state != EHEA_PORT_UP))
  236. continue;
  237. if (num_registrations == 0)
  238. goto out_update;
  239. arr[i].adh = adapter->handle;
  240. arr[i].port_id = port->logical_port_id;
  241. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  242. EHEA_BCMC_UNTAGGED;
  243. arr[i++].macaddr = port->mac_addr;
  244. arr[i].adh = adapter->handle;
  245. arr[i].port_id = port->logical_port_id;
  246. arr[i].reg_type = EHEA_BCMC_BROADCAST |
  247. EHEA_BCMC_VLANID_ALL;
  248. arr[i++].macaddr = port->mac_addr;
  249. num_registrations -= 2;
  250. list_for_each_entry(mc_entry,
  251. &port->mc_list->list, list) {
  252. if (num_registrations == 0)
  253. goto out_update;
  254. arr[i].adh = adapter->handle;
  255. arr[i].port_id = port->logical_port_id;
  256. arr[i].reg_type = EHEA_BCMC_MULTICAST |
  257. EHEA_BCMC_UNTAGGED;
  258. if (mc_entry->macaddr == 0)
  259. arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
  260. arr[i++].macaddr = mc_entry->macaddr;
  261. arr[i].adh = adapter->handle;
  262. arr[i].port_id = port->logical_port_id;
  263. arr[i].reg_type = EHEA_BCMC_MULTICAST |
  264. EHEA_BCMC_VLANID_ALL;
  265. if (mc_entry->macaddr == 0)
  266. arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
  267. arr[i++].macaddr = mc_entry->macaddr;
  268. num_registrations -= 2;
  269. }
  270. }
  271. }
  272. out_update:
  273. kfree(ehea_bcmc_regs.arr);
  274. ehea_bcmc_regs.arr = arr;
  275. ehea_bcmc_regs.num_entries = i;
  276. out:
  277. spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
  278. }
  279. static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
  280. struct rtnl_link_stats64 *stats)
  281. {
  282. struct ehea_port *port = netdev_priv(dev);
  283. u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
  284. int i;
  285. for (i = 0; i < port->num_def_qps; i++) {
  286. rx_packets += port->port_res[i].rx_packets;
  287. rx_bytes += port->port_res[i].rx_bytes;
  288. }
  289. for (i = 0; i < port->num_def_qps; i++) {
  290. tx_packets += port->port_res[i].tx_packets;
  291. tx_bytes += port->port_res[i].tx_bytes;
  292. }
  293. stats->tx_packets = tx_packets;
  294. stats->rx_bytes = rx_bytes;
  295. stats->tx_bytes = tx_bytes;
  296. stats->rx_packets = rx_packets;
  297. stats->multicast = port->stats.multicast;
  298. stats->rx_errors = port->stats.rx_errors;
  299. return stats;
  300. }
  301. static void ehea_update_stats(struct work_struct *work)
  302. {
  303. struct ehea_port *port =
  304. container_of(work, struct ehea_port, stats_work.work);
  305. struct net_device *dev = port->netdev;
  306. struct rtnl_link_stats64 *stats = &port->stats;
  307. struct hcp_ehea_port_cb2 *cb2;
  308. u64 hret;
  309. cb2 = (void *)get_zeroed_page(GFP_KERNEL);
  310. if (!cb2) {
  311. netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
  312. goto resched;
  313. }
  314. hret = ehea_h_query_ehea_port(port->adapter->handle,
  315. port->logical_port_id,
  316. H_PORT_CB2, H_PORT_CB2_ALL, cb2);
  317. if (hret != H_SUCCESS) {
  318. netdev_err(dev, "query_ehea_port failed\n");
  319. goto out_herr;
  320. }
  321. if (netif_msg_hw(port))
  322. ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
  323. stats->multicast = cb2->rxmcp;
  324. stats->rx_errors = cb2->rxuerr;
  325. out_herr:
  326. free_page((unsigned long)cb2);
  327. resched:
  328. schedule_delayed_work(&port->stats_work,
  329. round_jiffies_relative(msecs_to_jiffies(1000)));
  330. }
  331. static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
  332. {
  333. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  334. struct net_device *dev = pr->port->netdev;
  335. int max_index_mask = pr->rq1_skba.len - 1;
  336. int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
  337. int adder = 0;
  338. int i;
  339. pr->rq1_skba.os_skbs = 0;
  340. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  341. if (nr_of_wqes > 0)
  342. pr->rq1_skba.index = index;
  343. pr->rq1_skba.os_skbs = fill_wqes;
  344. return;
  345. }
  346. for (i = 0; i < fill_wqes; i++) {
  347. if (!skb_arr_rq1[index]) {
  348. skb_arr_rq1[index] = netdev_alloc_skb(dev,
  349. EHEA_L_PKT_SIZE);
  350. if (!skb_arr_rq1[index]) {
  351. pr->rq1_skba.os_skbs = fill_wqes - i;
  352. break;
  353. }
  354. }
  355. index--;
  356. index &= max_index_mask;
  357. adder++;
  358. }
  359. if (adder == 0)
  360. return;
  361. /* Ring doorbell */
  362. ehea_update_rq1a(pr->qp, adder);
  363. }
  364. static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
  365. {
  366. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  367. struct net_device *dev = pr->port->netdev;
  368. int i;
  369. if (nr_rq1a > pr->rq1_skba.len) {
  370. netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
  371. return;
  372. }
  373. for (i = 0; i < nr_rq1a; i++) {
  374. skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
  375. if (!skb_arr_rq1[i])
  376. break;
  377. }
  378. /* Ring doorbell */
  379. ehea_update_rq1a(pr->qp, i - 1);
  380. }
  381. static int ehea_refill_rq_def(struct ehea_port_res *pr,
  382. struct ehea_q_skb_arr *q_skba, int rq_nr,
  383. int num_wqes, int wqe_type, int packet_size)
  384. {
  385. struct net_device *dev = pr->port->netdev;
  386. struct ehea_qp *qp = pr->qp;
  387. struct sk_buff **skb_arr = q_skba->arr;
  388. struct ehea_rwqe *rwqe;
  389. int i, index, max_index_mask, fill_wqes;
  390. int adder = 0;
  391. int ret = 0;
  392. fill_wqes = q_skba->os_skbs + num_wqes;
  393. q_skba->os_skbs = 0;
  394. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  395. q_skba->os_skbs = fill_wqes;
  396. return ret;
  397. }
  398. index = q_skba->index;
  399. max_index_mask = q_skba->len - 1;
  400. for (i = 0; i < fill_wqes; i++) {
  401. u64 tmp_addr;
  402. struct sk_buff *skb;
  403. skb = netdev_alloc_skb_ip_align(dev, packet_size);
  404. if (!skb) {
  405. q_skba->os_skbs = fill_wqes - i;
  406. if (q_skba->os_skbs == q_skba->len - 2) {
  407. netdev_info(pr->port->netdev,
  408. "rq%i ran dry - no mem for skb\n",
  409. rq_nr);
  410. ret = -ENOMEM;
  411. }
  412. break;
  413. }
  414. skb_arr[index] = skb;
  415. tmp_addr = ehea_map_vaddr(skb->data);
  416. if (tmp_addr == -1) {
  417. dev_consume_skb_any(skb);
  418. q_skba->os_skbs = fill_wqes - i;
  419. ret = 0;
  420. break;
  421. }
  422. rwqe = ehea_get_next_rwqe(qp, rq_nr);
  423. rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
  424. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
  425. rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
  426. rwqe->sg_list[0].vaddr = tmp_addr;
  427. rwqe->sg_list[0].len = packet_size;
  428. rwqe->data_segments = 1;
  429. index++;
  430. index &= max_index_mask;
  431. adder++;
  432. }
  433. q_skba->index = index;
  434. if (adder == 0)
  435. goto out;
  436. /* Ring doorbell */
  437. iosync();
  438. if (rq_nr == 2)
  439. ehea_update_rq2a(pr->qp, adder);
  440. else
  441. ehea_update_rq3a(pr->qp, adder);
  442. out:
  443. return ret;
  444. }
  445. static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
  446. {
  447. return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
  448. nr_of_wqes, EHEA_RWQE2_TYPE,
  449. EHEA_RQ2_PKT_SIZE);
  450. }
  451. static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
  452. {
  453. return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
  454. nr_of_wqes, EHEA_RWQE3_TYPE,
  455. EHEA_MAX_PACKET_SIZE);
  456. }
  457. static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
  458. {
  459. *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
  460. if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
  461. return 0;
  462. if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
  463. (cqe->header_length == 0))
  464. return 0;
  465. return -EINVAL;
  466. }
  467. static inline void ehea_fill_skb(struct net_device *dev,
  468. struct sk_buff *skb, struct ehea_cqe *cqe,
  469. struct ehea_port_res *pr)
  470. {
  471. int length = cqe->num_bytes_transfered - 4; /*remove CRC */
  472. skb_put(skb, length);
  473. skb->protocol = eth_type_trans(skb, dev);
  474. /* The packet was not an IPV4 packet so a complemented checksum was
  475. calculated. The value is found in the Internet Checksum field. */
  476. if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
  477. skb->ip_summed = CHECKSUM_COMPLETE;
  478. skb->csum = csum_unfold(~cqe->inet_checksum_value);
  479. } else
  480. skb->ip_summed = CHECKSUM_UNNECESSARY;
  481. skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
  482. }
  483. static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
  484. int arr_len,
  485. struct ehea_cqe *cqe)
  486. {
  487. int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  488. struct sk_buff *skb;
  489. void *pref;
  490. int x;
  491. x = skb_index + 1;
  492. x &= (arr_len - 1);
  493. pref = skb_array[x];
  494. if (pref) {
  495. prefetchw(pref);
  496. prefetchw(pref + EHEA_CACHE_LINE);
  497. pref = (skb_array[x]->data);
  498. prefetch(pref);
  499. prefetch(pref + EHEA_CACHE_LINE);
  500. prefetch(pref + EHEA_CACHE_LINE * 2);
  501. prefetch(pref + EHEA_CACHE_LINE * 3);
  502. }
  503. skb = skb_array[skb_index];
  504. skb_array[skb_index] = NULL;
  505. return skb;
  506. }
  507. static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
  508. int arr_len, int wqe_index)
  509. {
  510. struct sk_buff *skb;
  511. void *pref;
  512. int x;
  513. x = wqe_index + 1;
  514. x &= (arr_len - 1);
  515. pref = skb_array[x];
  516. if (pref) {
  517. prefetchw(pref);
  518. prefetchw(pref + EHEA_CACHE_LINE);
  519. pref = (skb_array[x]->data);
  520. prefetchw(pref);
  521. prefetchw(pref + EHEA_CACHE_LINE);
  522. }
  523. skb = skb_array[wqe_index];
  524. skb_array[wqe_index] = NULL;
  525. return skb;
  526. }
  527. static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
  528. struct ehea_cqe *cqe, int *processed_rq2,
  529. int *processed_rq3)
  530. {
  531. struct sk_buff *skb;
  532. if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
  533. pr->p_stats.err_tcp_cksum++;
  534. if (cqe->status & EHEA_CQE_STAT_ERR_IP)
  535. pr->p_stats.err_ip_cksum++;
  536. if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
  537. pr->p_stats.err_frame_crc++;
  538. if (rq == 2) {
  539. *processed_rq2 += 1;
  540. skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
  541. dev_kfree_skb(skb);
  542. } else if (rq == 3) {
  543. *processed_rq3 += 1;
  544. skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
  545. dev_kfree_skb(skb);
  546. }
  547. if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
  548. if (netif_msg_rx_err(pr->port)) {
  549. pr_err("Critical receive error for QP %d. Resetting port.\n",
  550. pr->qp->init_attr.qp_nr);
  551. ehea_dump(cqe, sizeof(*cqe), "CQE");
  552. }
  553. ehea_schedule_port_reset(pr->port);
  554. return 1;
  555. }
  556. return 0;
  557. }
  558. static int ehea_proc_rwqes(struct net_device *dev,
  559. struct ehea_port_res *pr,
  560. int budget)
  561. {
  562. struct ehea_port *port = pr->port;
  563. struct ehea_qp *qp = pr->qp;
  564. struct ehea_cqe *cqe;
  565. struct sk_buff *skb;
  566. struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
  567. struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
  568. struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
  569. int skb_arr_rq1_len = pr->rq1_skba.len;
  570. int skb_arr_rq2_len = pr->rq2_skba.len;
  571. int skb_arr_rq3_len = pr->rq3_skba.len;
  572. int processed, processed_rq1, processed_rq2, processed_rq3;
  573. u64 processed_bytes = 0;
  574. int wqe_index, last_wqe_index, rq, port_reset;
  575. processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
  576. last_wqe_index = 0;
  577. cqe = ehea_poll_rq1(qp, &wqe_index);
  578. while ((processed < budget) && cqe) {
  579. ehea_inc_rq1(qp);
  580. processed_rq1++;
  581. processed++;
  582. if (netif_msg_rx_status(port))
  583. ehea_dump(cqe, sizeof(*cqe), "CQE");
  584. last_wqe_index = wqe_index;
  585. rmb();
  586. if (!ehea_check_cqe(cqe, &rq)) {
  587. if (rq == 1) {
  588. /* LL RQ1 */
  589. skb = get_skb_by_index_ll(skb_arr_rq1,
  590. skb_arr_rq1_len,
  591. wqe_index);
  592. if (unlikely(!skb)) {
  593. netif_info(port, rx_err, dev,
  594. "LL rq1: skb=NULL\n");
  595. skb = netdev_alloc_skb(dev,
  596. EHEA_L_PKT_SIZE);
  597. if (!skb)
  598. break;
  599. }
  600. skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
  601. cqe->num_bytes_transfered - 4);
  602. ehea_fill_skb(dev, skb, cqe, pr);
  603. } else if (rq == 2) {
  604. /* RQ2 */
  605. skb = get_skb_by_index(skb_arr_rq2,
  606. skb_arr_rq2_len, cqe);
  607. if (unlikely(!skb)) {
  608. netif_err(port, rx_err, dev,
  609. "rq2: skb=NULL\n");
  610. break;
  611. }
  612. ehea_fill_skb(dev, skb, cqe, pr);
  613. processed_rq2++;
  614. } else {
  615. /* RQ3 */
  616. skb = get_skb_by_index(skb_arr_rq3,
  617. skb_arr_rq3_len, cqe);
  618. if (unlikely(!skb)) {
  619. netif_err(port, rx_err, dev,
  620. "rq3: skb=NULL\n");
  621. break;
  622. }
  623. ehea_fill_skb(dev, skb, cqe, pr);
  624. processed_rq3++;
  625. }
  626. processed_bytes += skb->len;
  627. if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
  628. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  629. cqe->vlan_tag);
  630. napi_gro_receive(&pr->napi, skb);
  631. } else {
  632. pr->p_stats.poll_receive_errors++;
  633. port_reset = ehea_treat_poll_error(pr, rq, cqe,
  634. &processed_rq2,
  635. &processed_rq3);
  636. if (port_reset)
  637. break;
  638. }
  639. cqe = ehea_poll_rq1(qp, &wqe_index);
  640. }
  641. pr->rx_packets += processed;
  642. pr->rx_bytes += processed_bytes;
  643. ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
  644. ehea_refill_rq2(pr, processed_rq2);
  645. ehea_refill_rq3(pr, processed_rq3);
  646. return processed;
  647. }
  648. #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
  649. static void reset_sq_restart_flag(struct ehea_port *port)
  650. {
  651. int i;
  652. for (i = 0; i < port->num_def_qps; i++) {
  653. struct ehea_port_res *pr = &port->port_res[i];
  654. pr->sq_restart_flag = 0;
  655. }
  656. wake_up(&port->restart_wq);
  657. }
  658. static void check_sqs(struct ehea_port *port)
  659. {
  660. struct ehea_swqe *swqe;
  661. int swqe_index;
  662. int i, k;
  663. for (i = 0; i < port->num_def_qps; i++) {
  664. struct ehea_port_res *pr = &port->port_res[i];
  665. int ret;
  666. k = 0;
  667. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  668. memset(swqe, 0, SWQE_HEADER_SIZE);
  669. atomic_dec(&pr->swqe_avail);
  670. swqe->tx_control |= EHEA_SWQE_PURGE;
  671. swqe->wr_id = SWQE_RESTART_CHECK;
  672. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  673. swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
  674. swqe->immediate_data_length = 80;
  675. ehea_post_swqe(pr->qp, swqe);
  676. ret = wait_event_timeout(port->restart_wq,
  677. pr->sq_restart_flag == 0,
  678. msecs_to_jiffies(100));
  679. if (!ret) {
  680. pr_err("HW/SW queues out of sync\n");
  681. ehea_schedule_port_reset(pr->port);
  682. return;
  683. }
  684. }
  685. }
  686. static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
  687. {
  688. struct sk_buff *skb;
  689. struct ehea_cq *send_cq = pr->send_cq;
  690. struct ehea_cqe *cqe;
  691. int quota = my_quota;
  692. int cqe_counter = 0;
  693. int swqe_av = 0;
  694. int index;
  695. struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
  696. pr - &pr->port->port_res[0]);
  697. cqe = ehea_poll_cq(send_cq);
  698. while (cqe && (quota > 0)) {
  699. ehea_inc_cq(send_cq);
  700. cqe_counter++;
  701. rmb();
  702. if (cqe->wr_id == SWQE_RESTART_CHECK) {
  703. pr->sq_restart_flag = 1;
  704. swqe_av++;
  705. break;
  706. }
  707. if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
  708. pr_err("Bad send completion status=0x%04X\n",
  709. cqe->status);
  710. if (netif_msg_tx_err(pr->port))
  711. ehea_dump(cqe, sizeof(*cqe), "Send CQE");
  712. if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
  713. pr_err("Resetting port\n");
  714. ehea_schedule_port_reset(pr->port);
  715. break;
  716. }
  717. }
  718. if (netif_msg_tx_done(pr->port))
  719. ehea_dump(cqe, sizeof(*cqe), "CQE");
  720. if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
  721. == EHEA_SWQE2_TYPE)) {
  722. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
  723. skb = pr->sq_skba.arr[index];
  724. dev_consume_skb_any(skb);
  725. pr->sq_skba.arr[index] = NULL;
  726. }
  727. swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
  728. quota--;
  729. cqe = ehea_poll_cq(send_cq);
  730. }
  731. ehea_update_feca(send_cq, cqe_counter);
  732. atomic_add(swqe_av, &pr->swqe_avail);
  733. if (unlikely(netif_tx_queue_stopped(txq) &&
  734. (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
  735. __netif_tx_lock(txq, smp_processor_id());
  736. if (netif_tx_queue_stopped(txq) &&
  737. (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
  738. netif_tx_wake_queue(txq);
  739. __netif_tx_unlock(txq);
  740. }
  741. wake_up(&pr->port->swqe_avail_wq);
  742. return cqe;
  743. }
  744. #define EHEA_POLL_MAX_CQES 65535
  745. static int ehea_poll(struct napi_struct *napi, int budget)
  746. {
  747. struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
  748. napi);
  749. struct net_device *dev = pr->port->netdev;
  750. struct ehea_cqe *cqe;
  751. struct ehea_cqe *cqe_skb = NULL;
  752. int wqe_index;
  753. int rx = 0;
  754. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  755. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  756. while (rx != budget) {
  757. napi_complete(napi);
  758. ehea_reset_cq_ep(pr->recv_cq);
  759. ehea_reset_cq_ep(pr->send_cq);
  760. ehea_reset_cq_n1(pr->recv_cq);
  761. ehea_reset_cq_n1(pr->send_cq);
  762. rmb();
  763. cqe = ehea_poll_rq1(pr->qp, &wqe_index);
  764. cqe_skb = ehea_poll_cq(pr->send_cq);
  765. if (!cqe && !cqe_skb)
  766. return rx;
  767. if (!napi_reschedule(napi))
  768. return rx;
  769. cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
  770. rx += ehea_proc_rwqes(dev, pr, budget - rx);
  771. }
  772. return rx;
  773. }
  774. #ifdef CONFIG_NET_POLL_CONTROLLER
  775. static void ehea_netpoll(struct net_device *dev)
  776. {
  777. struct ehea_port *port = netdev_priv(dev);
  778. int i;
  779. for (i = 0; i < port->num_def_qps; i++)
  780. napi_schedule(&port->port_res[i].napi);
  781. }
  782. #endif
  783. static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
  784. {
  785. struct ehea_port_res *pr = param;
  786. napi_schedule(&pr->napi);
  787. return IRQ_HANDLED;
  788. }
  789. static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
  790. {
  791. struct ehea_port *port = param;
  792. struct ehea_eqe *eqe;
  793. struct ehea_qp *qp;
  794. u32 qp_token;
  795. u64 resource_type, aer, aerr;
  796. int reset_port = 0;
  797. eqe = ehea_poll_eq(port->qp_eq);
  798. while (eqe) {
  799. qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
  800. pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
  801. eqe->entry, qp_token);
  802. qp = port->port_res[qp_token].qp;
  803. resource_type = ehea_error_data(port->adapter, qp->fw_handle,
  804. &aer, &aerr);
  805. if (resource_type == EHEA_AER_RESTYPE_QP) {
  806. if ((aer & EHEA_AER_RESET_MASK) ||
  807. (aerr & EHEA_AERR_RESET_MASK))
  808. reset_port = 1;
  809. } else
  810. reset_port = 1; /* Reset in case of CQ or EQ error */
  811. eqe = ehea_poll_eq(port->qp_eq);
  812. }
  813. if (reset_port) {
  814. pr_err("Resetting port\n");
  815. ehea_schedule_port_reset(port);
  816. }
  817. return IRQ_HANDLED;
  818. }
  819. static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
  820. int logical_port)
  821. {
  822. int i;
  823. for (i = 0; i < EHEA_MAX_PORTS; i++)
  824. if (adapter->port[i])
  825. if (adapter->port[i]->logical_port_id == logical_port)
  826. return adapter->port[i];
  827. return NULL;
  828. }
  829. int ehea_sense_port_attr(struct ehea_port *port)
  830. {
  831. int ret;
  832. u64 hret;
  833. struct hcp_ehea_port_cb0 *cb0;
  834. /* may be called via ehea_neq_tasklet() */
  835. cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
  836. if (!cb0) {
  837. pr_err("no mem for cb0\n");
  838. ret = -ENOMEM;
  839. goto out;
  840. }
  841. hret = ehea_h_query_ehea_port(port->adapter->handle,
  842. port->logical_port_id, H_PORT_CB0,
  843. EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
  844. cb0);
  845. if (hret != H_SUCCESS) {
  846. ret = -EIO;
  847. goto out_free;
  848. }
  849. /* MAC address */
  850. port->mac_addr = cb0->port_mac_addr << 16;
  851. if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
  852. ret = -EADDRNOTAVAIL;
  853. goto out_free;
  854. }
  855. /* Port speed */
  856. switch (cb0->port_speed) {
  857. case H_SPEED_10M_H:
  858. port->port_speed = EHEA_SPEED_10M;
  859. port->full_duplex = 0;
  860. break;
  861. case H_SPEED_10M_F:
  862. port->port_speed = EHEA_SPEED_10M;
  863. port->full_duplex = 1;
  864. break;
  865. case H_SPEED_100M_H:
  866. port->port_speed = EHEA_SPEED_100M;
  867. port->full_duplex = 0;
  868. break;
  869. case H_SPEED_100M_F:
  870. port->port_speed = EHEA_SPEED_100M;
  871. port->full_duplex = 1;
  872. break;
  873. case H_SPEED_1G_F:
  874. port->port_speed = EHEA_SPEED_1G;
  875. port->full_duplex = 1;
  876. break;
  877. case H_SPEED_10G_F:
  878. port->port_speed = EHEA_SPEED_10G;
  879. port->full_duplex = 1;
  880. break;
  881. default:
  882. port->port_speed = 0;
  883. port->full_duplex = 0;
  884. break;
  885. }
  886. port->autoneg = 1;
  887. port->num_mcs = cb0->num_default_qps;
  888. /* Number of default QPs */
  889. if (use_mcs)
  890. port->num_def_qps = cb0->num_default_qps;
  891. else
  892. port->num_def_qps = 1;
  893. if (!port->num_def_qps) {
  894. ret = -EINVAL;
  895. goto out_free;
  896. }
  897. ret = 0;
  898. out_free:
  899. if (ret || netif_msg_probe(port))
  900. ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
  901. free_page((unsigned long)cb0);
  902. out:
  903. return ret;
  904. }
  905. int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
  906. {
  907. struct hcp_ehea_port_cb4 *cb4;
  908. u64 hret;
  909. int ret = 0;
  910. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  911. if (!cb4) {
  912. pr_err("no mem for cb4\n");
  913. ret = -ENOMEM;
  914. goto out;
  915. }
  916. cb4->port_speed = port_speed;
  917. netif_carrier_off(port->netdev);
  918. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  919. port->logical_port_id,
  920. H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
  921. if (hret == H_SUCCESS) {
  922. port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
  923. hret = ehea_h_query_ehea_port(port->adapter->handle,
  924. port->logical_port_id,
  925. H_PORT_CB4, H_PORT_CB4_SPEED,
  926. cb4);
  927. if (hret == H_SUCCESS) {
  928. switch (cb4->port_speed) {
  929. case H_SPEED_10M_H:
  930. port->port_speed = EHEA_SPEED_10M;
  931. port->full_duplex = 0;
  932. break;
  933. case H_SPEED_10M_F:
  934. port->port_speed = EHEA_SPEED_10M;
  935. port->full_duplex = 1;
  936. break;
  937. case H_SPEED_100M_H:
  938. port->port_speed = EHEA_SPEED_100M;
  939. port->full_duplex = 0;
  940. break;
  941. case H_SPEED_100M_F:
  942. port->port_speed = EHEA_SPEED_100M;
  943. port->full_duplex = 1;
  944. break;
  945. case H_SPEED_1G_F:
  946. port->port_speed = EHEA_SPEED_1G;
  947. port->full_duplex = 1;
  948. break;
  949. case H_SPEED_10G_F:
  950. port->port_speed = EHEA_SPEED_10G;
  951. port->full_duplex = 1;
  952. break;
  953. default:
  954. port->port_speed = 0;
  955. port->full_duplex = 0;
  956. break;
  957. }
  958. } else {
  959. pr_err("Failed sensing port speed\n");
  960. ret = -EIO;
  961. }
  962. } else {
  963. if (hret == H_AUTHORITY) {
  964. pr_info("Hypervisor denied setting port speed\n");
  965. ret = -EPERM;
  966. } else {
  967. ret = -EIO;
  968. pr_err("Failed setting port speed\n");
  969. }
  970. }
  971. if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
  972. netif_carrier_on(port->netdev);
  973. free_page((unsigned long)cb4);
  974. out:
  975. return ret;
  976. }
  977. static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
  978. {
  979. int ret;
  980. u8 ec;
  981. u8 portnum;
  982. struct ehea_port *port;
  983. struct net_device *dev;
  984. ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
  985. portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
  986. port = ehea_get_port(adapter, portnum);
  987. if (!port) {
  988. netdev_err(NULL, "unknown portnum %x\n", portnum);
  989. return;
  990. }
  991. dev = port->netdev;
  992. switch (ec) {
  993. case EHEA_EC_PORTSTATE_CHG: /* port state change */
  994. if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
  995. if (!netif_carrier_ok(dev)) {
  996. ret = ehea_sense_port_attr(port);
  997. if (ret) {
  998. netdev_err(dev, "failed resensing port attributes\n");
  999. break;
  1000. }
  1001. netif_info(port, link, dev,
  1002. "Logical port up: %dMbps %s Duplex\n",
  1003. port->port_speed,
  1004. port->full_duplex == 1 ?
  1005. "Full" : "Half");
  1006. netif_carrier_on(dev);
  1007. netif_wake_queue(dev);
  1008. }
  1009. } else
  1010. if (netif_carrier_ok(dev)) {
  1011. netif_info(port, link, dev,
  1012. "Logical port down\n");
  1013. netif_carrier_off(dev);
  1014. netif_tx_disable(dev);
  1015. }
  1016. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
  1017. port->phy_link = EHEA_PHY_LINK_UP;
  1018. netif_info(port, link, dev,
  1019. "Physical port up\n");
  1020. if (prop_carrier_state)
  1021. netif_carrier_on(dev);
  1022. } else {
  1023. port->phy_link = EHEA_PHY_LINK_DOWN;
  1024. netif_info(port, link, dev,
  1025. "Physical port down\n");
  1026. if (prop_carrier_state)
  1027. netif_carrier_off(dev);
  1028. }
  1029. if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
  1030. netdev_info(dev,
  1031. "External switch port is primary port\n");
  1032. else
  1033. netdev_info(dev,
  1034. "External switch port is backup port\n");
  1035. break;
  1036. case EHEA_EC_ADAPTER_MALFUNC:
  1037. netdev_err(dev, "Adapter malfunction\n");
  1038. break;
  1039. case EHEA_EC_PORT_MALFUNC:
  1040. netdev_info(dev, "Port malfunction\n");
  1041. netif_carrier_off(dev);
  1042. netif_tx_disable(dev);
  1043. break;
  1044. default:
  1045. netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
  1046. break;
  1047. }
  1048. }
  1049. static void ehea_neq_tasklet(unsigned long data)
  1050. {
  1051. struct ehea_adapter *adapter = (struct ehea_adapter *)data;
  1052. struct ehea_eqe *eqe;
  1053. u64 event_mask;
  1054. eqe = ehea_poll_eq(adapter->neq);
  1055. pr_debug("eqe=%p\n", eqe);
  1056. while (eqe) {
  1057. pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
  1058. ehea_parse_eqe(adapter, eqe->entry);
  1059. eqe = ehea_poll_eq(adapter->neq);
  1060. pr_debug("next eqe=%p\n", eqe);
  1061. }
  1062. event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
  1063. | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
  1064. | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
  1065. ehea_h_reset_events(adapter->handle,
  1066. adapter->neq->fw_handle, event_mask);
  1067. }
  1068. static irqreturn_t ehea_interrupt_neq(int irq, void *param)
  1069. {
  1070. struct ehea_adapter *adapter = param;
  1071. tasklet_hi_schedule(&adapter->neq_tasklet);
  1072. return IRQ_HANDLED;
  1073. }
  1074. static int ehea_fill_port_res(struct ehea_port_res *pr)
  1075. {
  1076. int ret;
  1077. struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
  1078. ehea_init_fill_rq1(pr, pr->rq1_skba.len);
  1079. ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
  1080. ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
  1081. return ret;
  1082. }
  1083. static int ehea_reg_interrupts(struct net_device *dev)
  1084. {
  1085. struct ehea_port *port = netdev_priv(dev);
  1086. struct ehea_port_res *pr;
  1087. int i, ret;
  1088. snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
  1089. dev->name);
  1090. ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
  1091. ehea_qp_aff_irq_handler,
  1092. 0, port->int_aff_name, port);
  1093. if (ret) {
  1094. netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
  1095. port->qp_eq->attr.ist1);
  1096. goto out_free_qpeq;
  1097. }
  1098. netif_info(port, ifup, dev,
  1099. "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
  1100. port->qp_eq->attr.ist1);
  1101. for (i = 0; i < port->num_def_qps; i++) {
  1102. pr = &port->port_res[i];
  1103. snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
  1104. "%s-queue%d", dev->name, i);
  1105. ret = ibmebus_request_irq(pr->eq->attr.ist1,
  1106. ehea_recv_irq_handler,
  1107. 0, pr->int_send_name, pr);
  1108. if (ret) {
  1109. netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
  1110. i, pr->eq->attr.ist1);
  1111. goto out_free_req;
  1112. }
  1113. netif_info(port, ifup, dev,
  1114. "irq_handle 0x%X for function ehea_queue_int %d registered\n",
  1115. pr->eq->attr.ist1, i);
  1116. }
  1117. out:
  1118. return ret;
  1119. out_free_req:
  1120. while (--i >= 0) {
  1121. u32 ist = port->port_res[i].eq->attr.ist1;
  1122. ibmebus_free_irq(ist, &port->port_res[i]);
  1123. }
  1124. out_free_qpeq:
  1125. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1126. i = port->num_def_qps;
  1127. goto out;
  1128. }
  1129. static void ehea_free_interrupts(struct net_device *dev)
  1130. {
  1131. struct ehea_port *port = netdev_priv(dev);
  1132. struct ehea_port_res *pr;
  1133. int i;
  1134. /* send */
  1135. for (i = 0; i < port->num_def_qps; i++) {
  1136. pr = &port->port_res[i];
  1137. ibmebus_free_irq(pr->eq->attr.ist1, pr);
  1138. netif_info(port, intr, dev,
  1139. "free send irq for res %d with handle 0x%X\n",
  1140. i, pr->eq->attr.ist1);
  1141. }
  1142. /* associated events */
  1143. ibmebus_free_irq(port->qp_eq->attr.ist1, port);
  1144. netif_info(port, intr, dev,
  1145. "associated event interrupt for handle 0x%X freed\n",
  1146. port->qp_eq->attr.ist1);
  1147. }
  1148. static int ehea_configure_port(struct ehea_port *port)
  1149. {
  1150. int ret, i;
  1151. u64 hret, mask;
  1152. struct hcp_ehea_port_cb0 *cb0;
  1153. ret = -ENOMEM;
  1154. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1155. if (!cb0)
  1156. goto out;
  1157. cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
  1158. | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
  1159. | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
  1160. | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
  1161. | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
  1162. PXLY_RC_VLAN_FILTER)
  1163. | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
  1164. for (i = 0; i < port->num_mcs; i++)
  1165. if (use_mcs)
  1166. cb0->default_qpn_arr[i] =
  1167. port->port_res[i].qp->init_attr.qp_nr;
  1168. else
  1169. cb0->default_qpn_arr[i] =
  1170. port->port_res[0].qp->init_attr.qp_nr;
  1171. if (netif_msg_ifup(port))
  1172. ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
  1173. mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
  1174. | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
  1175. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1176. port->logical_port_id,
  1177. H_PORT_CB0, mask, cb0);
  1178. ret = -EIO;
  1179. if (hret != H_SUCCESS)
  1180. goto out_free;
  1181. ret = 0;
  1182. out_free:
  1183. free_page((unsigned long)cb0);
  1184. out:
  1185. return ret;
  1186. }
  1187. static int ehea_gen_smrs(struct ehea_port_res *pr)
  1188. {
  1189. int ret;
  1190. struct ehea_adapter *adapter = pr->port->adapter;
  1191. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
  1192. if (ret)
  1193. goto out;
  1194. ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
  1195. if (ret)
  1196. goto out_free;
  1197. return 0;
  1198. out_free:
  1199. ehea_rem_mr(&pr->send_mr);
  1200. out:
  1201. pr_err("Generating SMRS failed\n");
  1202. return -EIO;
  1203. }
  1204. static int ehea_rem_smrs(struct ehea_port_res *pr)
  1205. {
  1206. if ((ehea_rem_mr(&pr->send_mr)) ||
  1207. (ehea_rem_mr(&pr->recv_mr)))
  1208. return -EIO;
  1209. else
  1210. return 0;
  1211. }
  1212. static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
  1213. {
  1214. int arr_size = sizeof(void *) * max_q_entries;
  1215. q_skba->arr = vzalloc(arr_size);
  1216. if (!q_skba->arr)
  1217. return -ENOMEM;
  1218. q_skba->len = max_q_entries;
  1219. q_skba->index = 0;
  1220. q_skba->os_skbs = 0;
  1221. return 0;
  1222. }
  1223. static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
  1224. struct port_res_cfg *pr_cfg, int queue_token)
  1225. {
  1226. struct ehea_adapter *adapter = port->adapter;
  1227. enum ehea_eq_type eq_type = EHEA_EQ;
  1228. struct ehea_qp_init_attr *init_attr = NULL;
  1229. int ret = -EIO;
  1230. u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
  1231. tx_bytes = pr->tx_bytes;
  1232. tx_packets = pr->tx_packets;
  1233. rx_bytes = pr->rx_bytes;
  1234. rx_packets = pr->rx_packets;
  1235. memset(pr, 0, sizeof(struct ehea_port_res));
  1236. pr->tx_bytes = rx_bytes;
  1237. pr->tx_packets = tx_packets;
  1238. pr->rx_bytes = rx_bytes;
  1239. pr->rx_packets = rx_packets;
  1240. pr->port = port;
  1241. pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
  1242. if (!pr->eq) {
  1243. pr_err("create_eq failed (eq)\n");
  1244. goto out_free;
  1245. }
  1246. pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
  1247. pr->eq->fw_handle,
  1248. port->logical_port_id);
  1249. if (!pr->recv_cq) {
  1250. pr_err("create_cq failed (cq_recv)\n");
  1251. goto out_free;
  1252. }
  1253. pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
  1254. pr->eq->fw_handle,
  1255. port->logical_port_id);
  1256. if (!pr->send_cq) {
  1257. pr_err("create_cq failed (cq_send)\n");
  1258. goto out_free;
  1259. }
  1260. if (netif_msg_ifup(port))
  1261. pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
  1262. pr->send_cq->attr.act_nr_of_cqes,
  1263. pr->recv_cq->attr.act_nr_of_cqes);
  1264. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  1265. if (!init_attr) {
  1266. ret = -ENOMEM;
  1267. pr_err("no mem for ehea_qp_init_attr\n");
  1268. goto out_free;
  1269. }
  1270. init_attr->low_lat_rq1 = 1;
  1271. init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
  1272. init_attr->rq_count = 3;
  1273. init_attr->qp_token = queue_token;
  1274. init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
  1275. init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
  1276. init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
  1277. init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
  1278. init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
  1279. init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
  1280. init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
  1281. init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
  1282. init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
  1283. init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
  1284. init_attr->port_nr = port->logical_port_id;
  1285. init_attr->send_cq_handle = pr->send_cq->fw_handle;
  1286. init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
  1287. init_attr->aff_eq_handle = port->qp_eq->fw_handle;
  1288. pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
  1289. if (!pr->qp) {
  1290. pr_err("create_qp failed\n");
  1291. ret = -EIO;
  1292. goto out_free;
  1293. }
  1294. if (netif_msg_ifup(port))
  1295. pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
  1296. init_attr->qp_nr,
  1297. init_attr->act_nr_send_wqes,
  1298. init_attr->act_nr_rwqes_rq1,
  1299. init_attr->act_nr_rwqes_rq2,
  1300. init_attr->act_nr_rwqes_rq3);
  1301. pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
  1302. ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
  1303. ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
  1304. ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
  1305. ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
  1306. if (ret)
  1307. goto out_free;
  1308. pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
  1309. if (ehea_gen_smrs(pr) != 0) {
  1310. ret = -EIO;
  1311. goto out_free;
  1312. }
  1313. atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
  1314. kfree(init_attr);
  1315. netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
  1316. ret = 0;
  1317. goto out;
  1318. out_free:
  1319. kfree(init_attr);
  1320. vfree(pr->sq_skba.arr);
  1321. vfree(pr->rq1_skba.arr);
  1322. vfree(pr->rq2_skba.arr);
  1323. vfree(pr->rq3_skba.arr);
  1324. ehea_destroy_qp(pr->qp);
  1325. ehea_destroy_cq(pr->send_cq);
  1326. ehea_destroy_cq(pr->recv_cq);
  1327. ehea_destroy_eq(pr->eq);
  1328. out:
  1329. return ret;
  1330. }
  1331. static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
  1332. {
  1333. int ret, i;
  1334. if (pr->qp)
  1335. netif_napi_del(&pr->napi);
  1336. ret = ehea_destroy_qp(pr->qp);
  1337. if (!ret) {
  1338. ehea_destroy_cq(pr->send_cq);
  1339. ehea_destroy_cq(pr->recv_cq);
  1340. ehea_destroy_eq(pr->eq);
  1341. for (i = 0; i < pr->rq1_skba.len; i++)
  1342. if (pr->rq1_skba.arr[i])
  1343. dev_kfree_skb(pr->rq1_skba.arr[i]);
  1344. for (i = 0; i < pr->rq2_skba.len; i++)
  1345. if (pr->rq2_skba.arr[i])
  1346. dev_kfree_skb(pr->rq2_skba.arr[i]);
  1347. for (i = 0; i < pr->rq3_skba.len; i++)
  1348. if (pr->rq3_skba.arr[i])
  1349. dev_kfree_skb(pr->rq3_skba.arr[i]);
  1350. for (i = 0; i < pr->sq_skba.len; i++)
  1351. if (pr->sq_skba.arr[i])
  1352. dev_kfree_skb(pr->sq_skba.arr[i]);
  1353. vfree(pr->rq1_skba.arr);
  1354. vfree(pr->rq2_skba.arr);
  1355. vfree(pr->rq3_skba.arr);
  1356. vfree(pr->sq_skba.arr);
  1357. ret = ehea_rem_smrs(pr);
  1358. }
  1359. return ret;
  1360. }
  1361. static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
  1362. u32 lkey)
  1363. {
  1364. int skb_data_size = skb_headlen(skb);
  1365. u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
  1366. struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
  1367. unsigned int immediate_len = SWQE2_MAX_IMM;
  1368. swqe->descriptors = 0;
  1369. if (skb_is_gso(skb)) {
  1370. swqe->tx_control |= EHEA_SWQE_TSO;
  1371. swqe->mss = skb_shinfo(skb)->gso_size;
  1372. /*
  1373. * For TSO packets we only copy the headers into the
  1374. * immediate area.
  1375. */
  1376. immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
  1377. }
  1378. if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
  1379. skb_copy_from_linear_data(skb, imm_data, immediate_len);
  1380. swqe->immediate_data_length = immediate_len;
  1381. if (skb_data_size > immediate_len) {
  1382. sg1entry->l_key = lkey;
  1383. sg1entry->len = skb_data_size - immediate_len;
  1384. sg1entry->vaddr =
  1385. ehea_map_vaddr(skb->data + immediate_len);
  1386. swqe->descriptors++;
  1387. }
  1388. } else {
  1389. skb_copy_from_linear_data(skb, imm_data, skb_data_size);
  1390. swqe->immediate_data_length = skb_data_size;
  1391. }
  1392. }
  1393. static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
  1394. struct ehea_swqe *swqe, u32 lkey)
  1395. {
  1396. struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
  1397. skb_frag_t *frag;
  1398. int nfrags, sg1entry_contains_frag_data, i;
  1399. nfrags = skb_shinfo(skb)->nr_frags;
  1400. sg1entry = &swqe->u.immdata_desc.sg_entry;
  1401. sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
  1402. sg1entry_contains_frag_data = 0;
  1403. write_swqe2_immediate(skb, swqe, lkey);
  1404. /* write descriptors */
  1405. if (nfrags > 0) {
  1406. if (swqe->descriptors == 0) {
  1407. /* sg1entry not yet used */
  1408. frag = &skb_shinfo(skb)->frags[0];
  1409. /* copy sg1entry data */
  1410. sg1entry->l_key = lkey;
  1411. sg1entry->len = skb_frag_size(frag);
  1412. sg1entry->vaddr =
  1413. ehea_map_vaddr(skb_frag_address(frag));
  1414. swqe->descriptors++;
  1415. sg1entry_contains_frag_data = 1;
  1416. }
  1417. for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
  1418. frag = &skb_shinfo(skb)->frags[i];
  1419. sgentry = &sg_list[i - sg1entry_contains_frag_data];
  1420. sgentry->l_key = lkey;
  1421. sgentry->len = skb_frag_size(frag);
  1422. sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
  1423. swqe->descriptors++;
  1424. }
  1425. }
  1426. }
  1427. static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
  1428. {
  1429. int ret = 0;
  1430. u64 hret;
  1431. u8 reg_type;
  1432. /* De/Register untagged packets */
  1433. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
  1434. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1435. port->logical_port_id,
  1436. reg_type, port->mac_addr, 0, hcallid);
  1437. if (hret != H_SUCCESS) {
  1438. pr_err("%sregistering bc address failed (tagged)\n",
  1439. hcallid == H_REG_BCMC ? "" : "de");
  1440. ret = -EIO;
  1441. goto out_herr;
  1442. }
  1443. /* De/Register VLAN packets */
  1444. reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
  1445. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1446. port->logical_port_id,
  1447. reg_type, port->mac_addr, 0, hcallid);
  1448. if (hret != H_SUCCESS) {
  1449. pr_err("%sregistering bc address failed (vlan)\n",
  1450. hcallid == H_REG_BCMC ? "" : "de");
  1451. ret = -EIO;
  1452. }
  1453. out_herr:
  1454. return ret;
  1455. }
  1456. static int ehea_set_mac_addr(struct net_device *dev, void *sa)
  1457. {
  1458. struct ehea_port *port = netdev_priv(dev);
  1459. struct sockaddr *mac_addr = sa;
  1460. struct hcp_ehea_port_cb0 *cb0;
  1461. int ret;
  1462. u64 hret;
  1463. if (!is_valid_ether_addr(mac_addr->sa_data)) {
  1464. ret = -EADDRNOTAVAIL;
  1465. goto out;
  1466. }
  1467. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1468. if (!cb0) {
  1469. pr_err("no mem for cb0\n");
  1470. ret = -ENOMEM;
  1471. goto out;
  1472. }
  1473. memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
  1474. cb0->port_mac_addr = cb0->port_mac_addr >> 16;
  1475. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1476. port->logical_port_id, H_PORT_CB0,
  1477. EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
  1478. if (hret != H_SUCCESS) {
  1479. ret = -EIO;
  1480. goto out_free;
  1481. }
  1482. memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
  1483. /* Deregister old MAC in pHYP */
  1484. if (port->state == EHEA_PORT_UP) {
  1485. ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  1486. if (ret)
  1487. goto out_upregs;
  1488. }
  1489. port->mac_addr = cb0->port_mac_addr << 16;
  1490. /* Register new MAC in pHYP */
  1491. if (port->state == EHEA_PORT_UP) {
  1492. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1493. if (ret)
  1494. goto out_upregs;
  1495. }
  1496. ret = 0;
  1497. out_upregs:
  1498. ehea_update_bcmc_registrations();
  1499. out_free:
  1500. free_page((unsigned long)cb0);
  1501. out:
  1502. return ret;
  1503. }
  1504. static void ehea_promiscuous_error(u64 hret, int enable)
  1505. {
  1506. if (hret == H_AUTHORITY)
  1507. pr_info("Hypervisor denied %sabling promiscuous mode\n",
  1508. enable == 1 ? "en" : "dis");
  1509. else
  1510. pr_err("failed %sabling promiscuous mode\n",
  1511. enable == 1 ? "en" : "dis");
  1512. }
  1513. static void ehea_promiscuous(struct net_device *dev, int enable)
  1514. {
  1515. struct ehea_port *port = netdev_priv(dev);
  1516. struct hcp_ehea_port_cb7 *cb7;
  1517. u64 hret;
  1518. if (enable == port->promisc)
  1519. return;
  1520. cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
  1521. if (!cb7) {
  1522. pr_err("no mem for cb7\n");
  1523. goto out;
  1524. }
  1525. /* Modify Pxs_DUCQPN in CB7 */
  1526. cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
  1527. hret = ehea_h_modify_ehea_port(port->adapter->handle,
  1528. port->logical_port_id,
  1529. H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
  1530. if (hret) {
  1531. ehea_promiscuous_error(hret, enable);
  1532. goto out;
  1533. }
  1534. port->promisc = enable;
  1535. out:
  1536. free_page((unsigned long)cb7);
  1537. }
  1538. static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
  1539. u32 hcallid)
  1540. {
  1541. u64 hret;
  1542. u8 reg_type;
  1543. reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
  1544. if (mc_mac_addr == 0)
  1545. reg_type |= EHEA_BCMC_SCOPE_ALL;
  1546. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1547. port->logical_port_id,
  1548. reg_type, mc_mac_addr, 0, hcallid);
  1549. if (hret)
  1550. goto out;
  1551. reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
  1552. if (mc_mac_addr == 0)
  1553. reg_type |= EHEA_BCMC_SCOPE_ALL;
  1554. hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
  1555. port->logical_port_id,
  1556. reg_type, mc_mac_addr, 0, hcallid);
  1557. out:
  1558. return hret;
  1559. }
  1560. static int ehea_drop_multicast_list(struct net_device *dev)
  1561. {
  1562. struct ehea_port *port = netdev_priv(dev);
  1563. struct ehea_mc_list *mc_entry = port->mc_list;
  1564. struct list_head *pos;
  1565. struct list_head *temp;
  1566. int ret = 0;
  1567. u64 hret;
  1568. list_for_each_safe(pos, temp, &(port->mc_list->list)) {
  1569. mc_entry = list_entry(pos, struct ehea_mc_list, list);
  1570. hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
  1571. H_DEREG_BCMC);
  1572. if (hret) {
  1573. pr_err("failed deregistering mcast MAC\n");
  1574. ret = -EIO;
  1575. }
  1576. list_del(pos);
  1577. kfree(mc_entry);
  1578. }
  1579. return ret;
  1580. }
  1581. static void ehea_allmulti(struct net_device *dev, int enable)
  1582. {
  1583. struct ehea_port *port = netdev_priv(dev);
  1584. u64 hret;
  1585. if (!port->allmulti) {
  1586. if (enable) {
  1587. /* Enable ALLMULTI */
  1588. ehea_drop_multicast_list(dev);
  1589. hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
  1590. if (!hret)
  1591. port->allmulti = 1;
  1592. else
  1593. netdev_err(dev,
  1594. "failed enabling IFF_ALLMULTI\n");
  1595. }
  1596. } else {
  1597. if (!enable) {
  1598. /* Disable ALLMULTI */
  1599. hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
  1600. if (!hret)
  1601. port->allmulti = 0;
  1602. else
  1603. netdev_err(dev,
  1604. "failed disabling IFF_ALLMULTI\n");
  1605. }
  1606. }
  1607. }
  1608. static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
  1609. {
  1610. struct ehea_mc_list *ehea_mcl_entry;
  1611. u64 hret;
  1612. ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
  1613. if (!ehea_mcl_entry)
  1614. return;
  1615. INIT_LIST_HEAD(&ehea_mcl_entry->list);
  1616. memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
  1617. hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
  1618. H_REG_BCMC);
  1619. if (!hret)
  1620. list_add(&ehea_mcl_entry->list, &port->mc_list->list);
  1621. else {
  1622. pr_err("failed registering mcast MAC\n");
  1623. kfree(ehea_mcl_entry);
  1624. }
  1625. }
  1626. static void ehea_set_multicast_list(struct net_device *dev)
  1627. {
  1628. struct ehea_port *port = netdev_priv(dev);
  1629. struct netdev_hw_addr *ha;
  1630. int ret;
  1631. ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
  1632. if (dev->flags & IFF_ALLMULTI) {
  1633. ehea_allmulti(dev, 1);
  1634. goto out;
  1635. }
  1636. ehea_allmulti(dev, 0);
  1637. if (!netdev_mc_empty(dev)) {
  1638. ret = ehea_drop_multicast_list(dev);
  1639. if (ret) {
  1640. /* Dropping the current multicast list failed.
  1641. * Enabling ALL_MULTI is the best we can do.
  1642. */
  1643. ehea_allmulti(dev, 1);
  1644. }
  1645. if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
  1646. pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
  1647. port->adapter->max_mc_mac);
  1648. goto out;
  1649. }
  1650. netdev_for_each_mc_addr(ha, dev)
  1651. ehea_add_multicast_entry(port, ha->addr);
  1652. }
  1653. out:
  1654. ehea_update_bcmc_registrations();
  1655. }
  1656. static int ehea_change_mtu(struct net_device *dev, int new_mtu)
  1657. {
  1658. if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
  1659. return -EINVAL;
  1660. dev->mtu = new_mtu;
  1661. return 0;
  1662. }
  1663. static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
  1664. {
  1665. swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
  1666. if (vlan_get_protocol(skb) != htons(ETH_P_IP))
  1667. return;
  1668. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1669. swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
  1670. swqe->ip_start = skb_network_offset(skb);
  1671. swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
  1672. switch (ip_hdr(skb)->protocol) {
  1673. case IPPROTO_UDP:
  1674. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1675. swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
  1676. swqe->tcp_offset = swqe->ip_end + 1 +
  1677. offsetof(struct udphdr, check);
  1678. break;
  1679. case IPPROTO_TCP:
  1680. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1681. swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
  1682. swqe->tcp_offset = swqe->ip_end + 1 +
  1683. offsetof(struct tcphdr, check);
  1684. break;
  1685. }
  1686. }
  1687. static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
  1688. struct ehea_swqe *swqe, u32 lkey)
  1689. {
  1690. swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
  1691. xmit_common(skb, swqe);
  1692. write_swqe2_data(skb, dev, swqe, lkey);
  1693. }
  1694. static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
  1695. struct ehea_swqe *swqe)
  1696. {
  1697. u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
  1698. xmit_common(skb, swqe);
  1699. if (!skb->data_len)
  1700. skb_copy_from_linear_data(skb, imm_data, skb->len);
  1701. else
  1702. skb_copy_bits(skb, 0, imm_data, skb->len);
  1703. swqe->immediate_data_length = skb->len;
  1704. dev_consume_skb_any(skb);
  1705. }
  1706. static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1707. {
  1708. struct ehea_port *port = netdev_priv(dev);
  1709. struct ehea_swqe *swqe;
  1710. u32 lkey;
  1711. int swqe_index;
  1712. struct ehea_port_res *pr;
  1713. struct netdev_queue *txq;
  1714. pr = &port->port_res[skb_get_queue_mapping(skb)];
  1715. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  1716. swqe = ehea_get_swqe(pr->qp, &swqe_index);
  1717. memset(swqe, 0, SWQE_HEADER_SIZE);
  1718. atomic_dec(&pr->swqe_avail);
  1719. if (skb_vlan_tag_present(skb)) {
  1720. swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
  1721. swqe->vlan_tag = skb_vlan_tag_get(skb);
  1722. }
  1723. pr->tx_packets++;
  1724. pr->tx_bytes += skb->len;
  1725. if (skb->len <= SWQE3_MAX_IMM) {
  1726. u32 sig_iv = port->sig_comp_iv;
  1727. u32 swqe_num = pr->swqe_id_counter;
  1728. ehea_xmit3(skb, dev, swqe);
  1729. swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
  1730. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
  1731. if (pr->swqe_ll_count >= (sig_iv - 1)) {
  1732. swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
  1733. sig_iv);
  1734. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1735. pr->swqe_ll_count = 0;
  1736. } else
  1737. pr->swqe_ll_count += 1;
  1738. } else {
  1739. swqe->wr_id =
  1740. EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
  1741. | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
  1742. | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
  1743. | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
  1744. pr->sq_skba.arr[pr->sq_skba.index] = skb;
  1745. pr->sq_skba.index++;
  1746. pr->sq_skba.index &= (pr->sq_skba.len - 1);
  1747. lkey = pr->send_mr.lkey;
  1748. ehea_xmit2(skb, dev, swqe, lkey);
  1749. swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
  1750. }
  1751. pr->swqe_id_counter += 1;
  1752. netif_info(port, tx_queued, dev,
  1753. "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
  1754. if (netif_msg_tx_queued(port))
  1755. ehea_dump(swqe, 512, "swqe");
  1756. if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
  1757. netif_tx_stop_queue(txq);
  1758. swqe->tx_control |= EHEA_SWQE_PURGE;
  1759. }
  1760. ehea_post_swqe(pr->qp, swqe);
  1761. if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
  1762. pr->p_stats.queue_stopped++;
  1763. netif_tx_stop_queue(txq);
  1764. }
  1765. return NETDEV_TX_OK;
  1766. }
  1767. static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
  1768. {
  1769. struct ehea_port *port = netdev_priv(dev);
  1770. struct ehea_adapter *adapter = port->adapter;
  1771. struct hcp_ehea_port_cb1 *cb1;
  1772. int index;
  1773. u64 hret;
  1774. int err = 0;
  1775. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1776. if (!cb1) {
  1777. pr_err("no mem for cb1\n");
  1778. err = -ENOMEM;
  1779. goto out;
  1780. }
  1781. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1782. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1783. if (hret != H_SUCCESS) {
  1784. pr_err("query_ehea_port failed\n");
  1785. err = -EINVAL;
  1786. goto out;
  1787. }
  1788. index = (vid / 64);
  1789. cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
  1790. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1791. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1792. if (hret != H_SUCCESS) {
  1793. pr_err("modify_ehea_port failed\n");
  1794. err = -EINVAL;
  1795. }
  1796. out:
  1797. free_page((unsigned long)cb1);
  1798. return err;
  1799. }
  1800. static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
  1801. {
  1802. struct ehea_port *port = netdev_priv(dev);
  1803. struct ehea_adapter *adapter = port->adapter;
  1804. struct hcp_ehea_port_cb1 *cb1;
  1805. int index;
  1806. u64 hret;
  1807. int err = 0;
  1808. cb1 = (void *)get_zeroed_page(GFP_KERNEL);
  1809. if (!cb1) {
  1810. pr_err("no mem for cb1\n");
  1811. err = -ENOMEM;
  1812. goto out;
  1813. }
  1814. hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
  1815. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1816. if (hret != H_SUCCESS) {
  1817. pr_err("query_ehea_port failed\n");
  1818. err = -EINVAL;
  1819. goto out;
  1820. }
  1821. index = (vid / 64);
  1822. cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
  1823. hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
  1824. H_PORT_CB1, H_PORT_CB1_ALL, cb1);
  1825. if (hret != H_SUCCESS) {
  1826. pr_err("modify_ehea_port failed\n");
  1827. err = -EINVAL;
  1828. }
  1829. out:
  1830. free_page((unsigned long)cb1);
  1831. return err;
  1832. }
  1833. static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
  1834. {
  1835. int ret = -EIO;
  1836. u64 hret;
  1837. u16 dummy16 = 0;
  1838. u64 dummy64 = 0;
  1839. struct hcp_modify_qp_cb0 *cb0;
  1840. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  1841. if (!cb0) {
  1842. ret = -ENOMEM;
  1843. goto out;
  1844. }
  1845. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1846. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1847. if (hret != H_SUCCESS) {
  1848. pr_err("query_ehea_qp failed (1)\n");
  1849. goto out;
  1850. }
  1851. cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
  1852. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1853. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1854. &dummy64, &dummy64, &dummy16, &dummy16);
  1855. if (hret != H_SUCCESS) {
  1856. pr_err("modify_ehea_qp failed (1)\n");
  1857. goto out;
  1858. }
  1859. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1860. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1861. if (hret != H_SUCCESS) {
  1862. pr_err("query_ehea_qp failed (2)\n");
  1863. goto out;
  1864. }
  1865. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
  1866. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1867. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1868. &dummy64, &dummy64, &dummy16, &dummy16);
  1869. if (hret != H_SUCCESS) {
  1870. pr_err("modify_ehea_qp failed (2)\n");
  1871. goto out;
  1872. }
  1873. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1874. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1875. if (hret != H_SUCCESS) {
  1876. pr_err("query_ehea_qp failed (3)\n");
  1877. goto out;
  1878. }
  1879. cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
  1880. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1881. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
  1882. &dummy64, &dummy64, &dummy16, &dummy16);
  1883. if (hret != H_SUCCESS) {
  1884. pr_err("modify_ehea_qp failed (3)\n");
  1885. goto out;
  1886. }
  1887. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  1888. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
  1889. if (hret != H_SUCCESS) {
  1890. pr_err("query_ehea_qp failed (4)\n");
  1891. goto out;
  1892. }
  1893. ret = 0;
  1894. out:
  1895. free_page((unsigned long)cb0);
  1896. return ret;
  1897. }
  1898. static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
  1899. {
  1900. int ret, i;
  1901. struct port_res_cfg pr_cfg, pr_cfg_small_rx;
  1902. enum ehea_eq_type eq_type = EHEA_EQ;
  1903. port->qp_eq = ehea_create_eq(port->adapter, eq_type,
  1904. EHEA_MAX_ENTRIES_EQ, 1);
  1905. if (!port->qp_eq) {
  1906. ret = -EINVAL;
  1907. pr_err("ehea_create_eq failed (qp_eq)\n");
  1908. goto out_kill_eq;
  1909. }
  1910. pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
  1911. pr_cfg.max_entries_scq = sq_entries * 2;
  1912. pr_cfg.max_entries_sq = sq_entries;
  1913. pr_cfg.max_entries_rq1 = rq1_entries;
  1914. pr_cfg.max_entries_rq2 = rq2_entries;
  1915. pr_cfg.max_entries_rq3 = rq3_entries;
  1916. pr_cfg_small_rx.max_entries_rcq = 1;
  1917. pr_cfg_small_rx.max_entries_scq = sq_entries;
  1918. pr_cfg_small_rx.max_entries_sq = sq_entries;
  1919. pr_cfg_small_rx.max_entries_rq1 = 1;
  1920. pr_cfg_small_rx.max_entries_rq2 = 1;
  1921. pr_cfg_small_rx.max_entries_rq3 = 1;
  1922. for (i = 0; i < def_qps; i++) {
  1923. ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
  1924. if (ret)
  1925. goto out_clean_pr;
  1926. }
  1927. for (i = def_qps; i < def_qps; i++) {
  1928. ret = ehea_init_port_res(port, &port->port_res[i],
  1929. &pr_cfg_small_rx, i);
  1930. if (ret)
  1931. goto out_clean_pr;
  1932. }
  1933. return 0;
  1934. out_clean_pr:
  1935. while (--i >= 0)
  1936. ehea_clean_portres(port, &port->port_res[i]);
  1937. out_kill_eq:
  1938. ehea_destroy_eq(port->qp_eq);
  1939. return ret;
  1940. }
  1941. static int ehea_clean_all_portres(struct ehea_port *port)
  1942. {
  1943. int ret = 0;
  1944. int i;
  1945. for (i = 0; i < port->num_def_qps; i++)
  1946. ret |= ehea_clean_portres(port, &port->port_res[i]);
  1947. ret |= ehea_destroy_eq(port->qp_eq);
  1948. return ret;
  1949. }
  1950. static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
  1951. {
  1952. if (adapter->active_ports)
  1953. return;
  1954. ehea_rem_mr(&adapter->mr);
  1955. }
  1956. static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
  1957. {
  1958. if (adapter->active_ports)
  1959. return 0;
  1960. return ehea_reg_kernel_mr(adapter, &adapter->mr);
  1961. }
  1962. static int ehea_up(struct net_device *dev)
  1963. {
  1964. int ret, i;
  1965. struct ehea_port *port = netdev_priv(dev);
  1966. if (port->state == EHEA_PORT_UP)
  1967. return 0;
  1968. ret = ehea_port_res_setup(port, port->num_def_qps);
  1969. if (ret) {
  1970. netdev_err(dev, "port_res_failed\n");
  1971. goto out;
  1972. }
  1973. /* Set default QP for this port */
  1974. ret = ehea_configure_port(port);
  1975. if (ret) {
  1976. netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
  1977. goto out_clean_pr;
  1978. }
  1979. ret = ehea_reg_interrupts(dev);
  1980. if (ret) {
  1981. netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
  1982. goto out_clean_pr;
  1983. }
  1984. for (i = 0; i < port->num_def_qps; i++) {
  1985. ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
  1986. if (ret) {
  1987. netdev_err(dev, "activate_qp failed\n");
  1988. goto out_free_irqs;
  1989. }
  1990. }
  1991. for (i = 0; i < port->num_def_qps; i++) {
  1992. ret = ehea_fill_port_res(&port->port_res[i]);
  1993. if (ret) {
  1994. netdev_err(dev, "out_free_irqs\n");
  1995. goto out_free_irqs;
  1996. }
  1997. }
  1998. ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
  1999. if (ret) {
  2000. ret = -EIO;
  2001. goto out_free_irqs;
  2002. }
  2003. port->state = EHEA_PORT_UP;
  2004. ret = 0;
  2005. goto out;
  2006. out_free_irqs:
  2007. ehea_free_interrupts(dev);
  2008. out_clean_pr:
  2009. ehea_clean_all_portres(port);
  2010. out:
  2011. if (ret)
  2012. netdev_info(dev, "Failed starting. ret=%i\n", ret);
  2013. ehea_update_bcmc_registrations();
  2014. ehea_update_firmware_handles();
  2015. return ret;
  2016. }
  2017. static void port_napi_disable(struct ehea_port *port)
  2018. {
  2019. int i;
  2020. for (i = 0; i < port->num_def_qps; i++)
  2021. napi_disable(&port->port_res[i].napi);
  2022. }
  2023. static void port_napi_enable(struct ehea_port *port)
  2024. {
  2025. int i;
  2026. for (i = 0; i < port->num_def_qps; i++)
  2027. napi_enable(&port->port_res[i].napi);
  2028. }
  2029. static int ehea_open(struct net_device *dev)
  2030. {
  2031. int ret;
  2032. struct ehea_port *port = netdev_priv(dev);
  2033. mutex_lock(&port->port_lock);
  2034. netif_info(port, ifup, dev, "enabling port\n");
  2035. netif_carrier_off(dev);
  2036. ret = ehea_up(dev);
  2037. if (!ret) {
  2038. port_napi_enable(port);
  2039. netif_tx_start_all_queues(dev);
  2040. }
  2041. mutex_unlock(&port->port_lock);
  2042. schedule_delayed_work(&port->stats_work,
  2043. round_jiffies_relative(msecs_to_jiffies(1000)));
  2044. return ret;
  2045. }
  2046. static int ehea_down(struct net_device *dev)
  2047. {
  2048. int ret;
  2049. struct ehea_port *port = netdev_priv(dev);
  2050. if (port->state == EHEA_PORT_DOWN)
  2051. return 0;
  2052. ehea_drop_multicast_list(dev);
  2053. ehea_allmulti(dev, 0);
  2054. ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
  2055. ehea_free_interrupts(dev);
  2056. port->state = EHEA_PORT_DOWN;
  2057. ehea_update_bcmc_registrations();
  2058. ret = ehea_clean_all_portres(port);
  2059. if (ret)
  2060. netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
  2061. ehea_update_firmware_handles();
  2062. return ret;
  2063. }
  2064. static int ehea_stop(struct net_device *dev)
  2065. {
  2066. int ret;
  2067. struct ehea_port *port = netdev_priv(dev);
  2068. netif_info(port, ifdown, dev, "disabling port\n");
  2069. set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2070. cancel_work_sync(&port->reset_task);
  2071. cancel_delayed_work_sync(&port->stats_work);
  2072. mutex_lock(&port->port_lock);
  2073. netif_tx_stop_all_queues(dev);
  2074. port_napi_disable(port);
  2075. ret = ehea_down(dev);
  2076. mutex_unlock(&port->port_lock);
  2077. clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
  2078. return ret;
  2079. }
  2080. static void ehea_purge_sq(struct ehea_qp *orig_qp)
  2081. {
  2082. struct ehea_qp qp = *orig_qp;
  2083. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2084. struct ehea_swqe *swqe;
  2085. int wqe_index;
  2086. int i;
  2087. for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
  2088. swqe = ehea_get_swqe(&qp, &wqe_index);
  2089. swqe->tx_control |= EHEA_SWQE_PURGE;
  2090. }
  2091. }
  2092. static void ehea_flush_sq(struct ehea_port *port)
  2093. {
  2094. int i;
  2095. for (i = 0; i < port->num_def_qps; i++) {
  2096. struct ehea_port_res *pr = &port->port_res[i];
  2097. int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
  2098. int ret;
  2099. ret = wait_event_timeout(port->swqe_avail_wq,
  2100. atomic_read(&pr->swqe_avail) >= swqe_max,
  2101. msecs_to_jiffies(100));
  2102. if (!ret) {
  2103. pr_err("WARNING: sq not flushed completely\n");
  2104. break;
  2105. }
  2106. }
  2107. }
  2108. static int ehea_stop_qps(struct net_device *dev)
  2109. {
  2110. struct ehea_port *port = netdev_priv(dev);
  2111. struct ehea_adapter *adapter = port->adapter;
  2112. struct hcp_modify_qp_cb0 *cb0;
  2113. int ret = -EIO;
  2114. int dret;
  2115. int i;
  2116. u64 hret;
  2117. u64 dummy64 = 0;
  2118. u16 dummy16 = 0;
  2119. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2120. if (!cb0) {
  2121. ret = -ENOMEM;
  2122. goto out;
  2123. }
  2124. for (i = 0; i < (port->num_def_qps); i++) {
  2125. struct ehea_port_res *pr = &port->port_res[i];
  2126. struct ehea_qp *qp = pr->qp;
  2127. /* Purge send queue */
  2128. ehea_purge_sq(qp);
  2129. /* Disable queue pair */
  2130. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2131. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2132. cb0);
  2133. if (hret != H_SUCCESS) {
  2134. pr_err("query_ehea_qp failed (1)\n");
  2135. goto out;
  2136. }
  2137. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2138. cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
  2139. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2140. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2141. 1), cb0, &dummy64,
  2142. &dummy64, &dummy16, &dummy16);
  2143. if (hret != H_SUCCESS) {
  2144. pr_err("modify_ehea_qp failed (1)\n");
  2145. goto out;
  2146. }
  2147. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2148. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2149. cb0);
  2150. if (hret != H_SUCCESS) {
  2151. pr_err("query_ehea_qp failed (2)\n");
  2152. goto out;
  2153. }
  2154. /* deregister shared memory regions */
  2155. dret = ehea_rem_smrs(pr);
  2156. if (dret) {
  2157. pr_err("unreg shared memory region failed\n");
  2158. goto out;
  2159. }
  2160. }
  2161. ret = 0;
  2162. out:
  2163. free_page((unsigned long)cb0);
  2164. return ret;
  2165. }
  2166. static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
  2167. {
  2168. struct ehea_qp qp = *orig_qp;
  2169. struct ehea_qp_init_attr *init_attr = &qp.init_attr;
  2170. struct ehea_rwqe *rwqe;
  2171. struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
  2172. struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
  2173. struct sk_buff *skb;
  2174. u32 lkey = pr->recv_mr.lkey;
  2175. int i;
  2176. int index;
  2177. for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
  2178. rwqe = ehea_get_next_rwqe(&qp, 2);
  2179. rwqe->sg_list[0].l_key = lkey;
  2180. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2181. skb = skba_rq2[index];
  2182. if (skb)
  2183. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2184. }
  2185. for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
  2186. rwqe = ehea_get_next_rwqe(&qp, 3);
  2187. rwqe->sg_list[0].l_key = lkey;
  2188. index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
  2189. skb = skba_rq3[index];
  2190. if (skb)
  2191. rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
  2192. }
  2193. }
  2194. static int ehea_restart_qps(struct net_device *dev)
  2195. {
  2196. struct ehea_port *port = netdev_priv(dev);
  2197. struct ehea_adapter *adapter = port->adapter;
  2198. int ret = 0;
  2199. int i;
  2200. struct hcp_modify_qp_cb0 *cb0;
  2201. u64 hret;
  2202. u64 dummy64 = 0;
  2203. u16 dummy16 = 0;
  2204. cb0 = (void *)get_zeroed_page(GFP_KERNEL);
  2205. if (!cb0) {
  2206. ret = -ENOMEM;
  2207. goto out;
  2208. }
  2209. for (i = 0; i < (port->num_def_qps); i++) {
  2210. struct ehea_port_res *pr = &port->port_res[i];
  2211. struct ehea_qp *qp = pr->qp;
  2212. ret = ehea_gen_smrs(pr);
  2213. if (ret) {
  2214. netdev_err(dev, "creation of shared memory regions failed\n");
  2215. goto out;
  2216. }
  2217. ehea_update_rqs(qp, pr);
  2218. /* Enable queue pair */
  2219. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2220. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2221. cb0);
  2222. if (hret != H_SUCCESS) {
  2223. netdev_err(dev, "query_ehea_qp failed (1)\n");
  2224. goto out;
  2225. }
  2226. cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
  2227. cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
  2228. hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2229. EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
  2230. 1), cb0, &dummy64,
  2231. &dummy64, &dummy16, &dummy16);
  2232. if (hret != H_SUCCESS) {
  2233. netdev_err(dev, "modify_ehea_qp failed (1)\n");
  2234. goto out;
  2235. }
  2236. hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
  2237. EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
  2238. cb0);
  2239. if (hret != H_SUCCESS) {
  2240. netdev_err(dev, "query_ehea_qp failed (2)\n");
  2241. goto out;
  2242. }
  2243. /* refill entire queue */
  2244. ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
  2245. ehea_refill_rq2(pr, 0);
  2246. ehea_refill_rq3(pr, 0);
  2247. }
  2248. out:
  2249. free_page((unsigned long)cb0);
  2250. return ret;
  2251. }
  2252. static void ehea_reset_port(struct work_struct *work)
  2253. {
  2254. int ret;
  2255. struct ehea_port *port =
  2256. container_of(work, struct ehea_port, reset_task);
  2257. struct net_device *dev = port->netdev;
  2258. mutex_lock(&dlpar_mem_lock);
  2259. port->resets++;
  2260. mutex_lock(&port->port_lock);
  2261. netif_tx_disable(dev);
  2262. port_napi_disable(port);
  2263. ehea_down(dev);
  2264. ret = ehea_up(dev);
  2265. if (ret)
  2266. goto out;
  2267. ehea_set_multicast_list(dev);
  2268. netif_info(port, timer, dev, "reset successful\n");
  2269. port_napi_enable(port);
  2270. netif_tx_wake_all_queues(dev);
  2271. out:
  2272. mutex_unlock(&port->port_lock);
  2273. mutex_unlock(&dlpar_mem_lock);
  2274. }
  2275. static void ehea_rereg_mrs(void)
  2276. {
  2277. int ret, i;
  2278. struct ehea_adapter *adapter;
  2279. pr_info("LPAR memory changed - re-initializing driver\n");
  2280. list_for_each_entry(adapter, &adapter_list, list)
  2281. if (adapter->active_ports) {
  2282. /* Shutdown all ports */
  2283. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2284. struct ehea_port *port = adapter->port[i];
  2285. struct net_device *dev;
  2286. if (!port)
  2287. continue;
  2288. dev = port->netdev;
  2289. if (dev->flags & IFF_UP) {
  2290. mutex_lock(&port->port_lock);
  2291. netif_tx_disable(dev);
  2292. ehea_flush_sq(port);
  2293. ret = ehea_stop_qps(dev);
  2294. if (ret) {
  2295. mutex_unlock(&port->port_lock);
  2296. goto out;
  2297. }
  2298. port_napi_disable(port);
  2299. mutex_unlock(&port->port_lock);
  2300. }
  2301. reset_sq_restart_flag(port);
  2302. }
  2303. /* Unregister old memory region */
  2304. ret = ehea_rem_mr(&adapter->mr);
  2305. if (ret) {
  2306. pr_err("unregister MR failed - driver inoperable!\n");
  2307. goto out;
  2308. }
  2309. }
  2310. clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2311. list_for_each_entry(adapter, &adapter_list, list)
  2312. if (adapter->active_ports) {
  2313. /* Register new memory region */
  2314. ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
  2315. if (ret) {
  2316. pr_err("register MR failed - driver inoperable!\n");
  2317. goto out;
  2318. }
  2319. /* Restart all ports */
  2320. for (i = 0; i < EHEA_MAX_PORTS; i++) {
  2321. struct ehea_port *port = adapter->port[i];
  2322. if (port) {
  2323. struct net_device *dev = port->netdev;
  2324. if (dev->flags & IFF_UP) {
  2325. mutex_lock(&port->port_lock);
  2326. ret = ehea_restart_qps(dev);
  2327. if (!ret) {
  2328. check_sqs(port);
  2329. port_napi_enable(port);
  2330. netif_tx_wake_all_queues(dev);
  2331. } else {
  2332. netdev_err(dev, "Unable to restart QPS\n");
  2333. }
  2334. mutex_unlock(&port->port_lock);
  2335. }
  2336. }
  2337. }
  2338. }
  2339. pr_info("re-initializing driver complete\n");
  2340. out:
  2341. return;
  2342. }
  2343. static void ehea_tx_watchdog(struct net_device *dev)
  2344. {
  2345. struct ehea_port *port = netdev_priv(dev);
  2346. if (netif_carrier_ok(dev) &&
  2347. !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
  2348. ehea_schedule_port_reset(port);
  2349. }
  2350. static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
  2351. {
  2352. struct hcp_query_ehea *cb;
  2353. u64 hret;
  2354. int ret;
  2355. cb = (void *)get_zeroed_page(GFP_KERNEL);
  2356. if (!cb) {
  2357. ret = -ENOMEM;
  2358. goto out;
  2359. }
  2360. hret = ehea_h_query_ehea(adapter->handle, cb);
  2361. if (hret != H_SUCCESS) {
  2362. ret = -EIO;
  2363. goto out_herr;
  2364. }
  2365. adapter->max_mc_mac = cb->max_mc_mac - 1;
  2366. ret = 0;
  2367. out_herr:
  2368. free_page((unsigned long)cb);
  2369. out:
  2370. return ret;
  2371. }
  2372. static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
  2373. {
  2374. struct hcp_ehea_port_cb4 *cb4;
  2375. u64 hret;
  2376. int ret = 0;
  2377. *jumbo = 0;
  2378. /* (Try to) enable *jumbo frames */
  2379. cb4 = (void *)get_zeroed_page(GFP_KERNEL);
  2380. if (!cb4) {
  2381. pr_err("no mem for cb4\n");
  2382. ret = -ENOMEM;
  2383. goto out;
  2384. } else {
  2385. hret = ehea_h_query_ehea_port(port->adapter->handle,
  2386. port->logical_port_id,
  2387. H_PORT_CB4,
  2388. H_PORT_CB4_JUMBO, cb4);
  2389. if (hret == H_SUCCESS) {
  2390. if (cb4->jumbo_frame)
  2391. *jumbo = 1;
  2392. else {
  2393. cb4->jumbo_frame = 1;
  2394. hret = ehea_h_modify_ehea_port(port->adapter->
  2395. handle,
  2396. port->
  2397. logical_port_id,
  2398. H_PORT_CB4,
  2399. H_PORT_CB4_JUMBO,
  2400. cb4);
  2401. if (hret == H_SUCCESS)
  2402. *jumbo = 1;
  2403. }
  2404. } else
  2405. ret = -EINVAL;
  2406. free_page((unsigned long)cb4);
  2407. }
  2408. out:
  2409. return ret;
  2410. }
  2411. static ssize_t ehea_show_port_id(struct device *dev,
  2412. struct device_attribute *attr, char *buf)
  2413. {
  2414. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2415. return sprintf(buf, "%d", port->logical_port_id);
  2416. }
  2417. static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
  2418. NULL);
  2419. static void logical_port_release(struct device *dev)
  2420. {
  2421. struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
  2422. of_node_put(port->ofdev.dev.of_node);
  2423. }
  2424. static struct device *ehea_register_port(struct ehea_port *port,
  2425. struct device_node *dn)
  2426. {
  2427. int ret;
  2428. port->ofdev.dev.of_node = of_node_get(dn);
  2429. port->ofdev.dev.parent = &port->adapter->ofdev->dev;
  2430. port->ofdev.dev.bus = &ibmebus_bus_type;
  2431. dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
  2432. port->ofdev.dev.release = logical_port_release;
  2433. ret = of_device_register(&port->ofdev);
  2434. if (ret) {
  2435. pr_err("failed to register device. ret=%d\n", ret);
  2436. goto out;
  2437. }
  2438. ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2439. if (ret) {
  2440. pr_err("failed to register attributes, ret=%d\n", ret);
  2441. goto out_unreg_of_dev;
  2442. }
  2443. return &port->ofdev.dev;
  2444. out_unreg_of_dev:
  2445. of_device_unregister(&port->ofdev);
  2446. out:
  2447. return NULL;
  2448. }
  2449. static void ehea_unregister_port(struct ehea_port *port)
  2450. {
  2451. device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
  2452. of_device_unregister(&port->ofdev);
  2453. }
  2454. static const struct net_device_ops ehea_netdev_ops = {
  2455. .ndo_open = ehea_open,
  2456. .ndo_stop = ehea_stop,
  2457. .ndo_start_xmit = ehea_start_xmit,
  2458. #ifdef CONFIG_NET_POLL_CONTROLLER
  2459. .ndo_poll_controller = ehea_netpoll,
  2460. #endif
  2461. .ndo_get_stats64 = ehea_get_stats64,
  2462. .ndo_set_mac_address = ehea_set_mac_addr,
  2463. .ndo_validate_addr = eth_validate_addr,
  2464. .ndo_set_rx_mode = ehea_set_multicast_list,
  2465. .ndo_change_mtu = ehea_change_mtu,
  2466. .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
  2467. .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
  2468. .ndo_tx_timeout = ehea_tx_watchdog,
  2469. };
  2470. static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
  2471. u32 logical_port_id,
  2472. struct device_node *dn)
  2473. {
  2474. int ret;
  2475. struct net_device *dev;
  2476. struct ehea_port *port;
  2477. struct device *port_dev;
  2478. int jumbo;
  2479. /* allocate memory for the port structures */
  2480. dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
  2481. if (!dev) {
  2482. ret = -ENOMEM;
  2483. goto out_err;
  2484. }
  2485. port = netdev_priv(dev);
  2486. mutex_init(&port->port_lock);
  2487. port->state = EHEA_PORT_DOWN;
  2488. port->sig_comp_iv = sq_entries / 10;
  2489. port->adapter = adapter;
  2490. port->netdev = dev;
  2491. port->logical_port_id = logical_port_id;
  2492. port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
  2493. port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
  2494. if (!port->mc_list) {
  2495. ret = -ENOMEM;
  2496. goto out_free_ethdev;
  2497. }
  2498. INIT_LIST_HEAD(&port->mc_list->list);
  2499. ret = ehea_sense_port_attr(port);
  2500. if (ret)
  2501. goto out_free_mc_list;
  2502. netif_set_real_num_rx_queues(dev, port->num_def_qps);
  2503. netif_set_real_num_tx_queues(dev, port->num_def_qps);
  2504. port_dev = ehea_register_port(port, dn);
  2505. if (!port_dev)
  2506. goto out_free_mc_list;
  2507. SET_NETDEV_DEV(dev, port_dev);
  2508. /* initialize net_device structure */
  2509. memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
  2510. dev->netdev_ops = &ehea_netdev_ops;
  2511. ehea_set_ethtool_ops(dev);
  2512. dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
  2513. NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
  2514. dev->features = NETIF_F_SG | NETIF_F_TSO |
  2515. NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
  2516. NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
  2517. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
  2518. dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
  2519. NETIF_F_IP_CSUM;
  2520. dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
  2521. INIT_WORK(&port->reset_task, ehea_reset_port);
  2522. INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
  2523. init_waitqueue_head(&port->swqe_avail_wq);
  2524. init_waitqueue_head(&port->restart_wq);
  2525. memset(&port->stats, 0, sizeof(struct net_device_stats));
  2526. ret = register_netdev(dev);
  2527. if (ret) {
  2528. pr_err("register_netdev failed. ret=%d\n", ret);
  2529. goto out_unreg_port;
  2530. }
  2531. ret = ehea_get_jumboframe_status(port, &jumbo);
  2532. if (ret)
  2533. netdev_err(dev, "failed determining jumbo frame status\n");
  2534. netdev_info(dev, "Jumbo frames are %sabled\n",
  2535. jumbo == 1 ? "en" : "dis");
  2536. adapter->active_ports++;
  2537. return port;
  2538. out_unreg_port:
  2539. ehea_unregister_port(port);
  2540. out_free_mc_list:
  2541. kfree(port->mc_list);
  2542. out_free_ethdev:
  2543. free_netdev(dev);
  2544. out_err:
  2545. pr_err("setting up logical port with id=%d failed, ret=%d\n",
  2546. logical_port_id, ret);
  2547. return NULL;
  2548. }
  2549. static void ehea_shutdown_single_port(struct ehea_port *port)
  2550. {
  2551. struct ehea_adapter *adapter = port->adapter;
  2552. cancel_work_sync(&port->reset_task);
  2553. cancel_delayed_work_sync(&port->stats_work);
  2554. unregister_netdev(port->netdev);
  2555. ehea_unregister_port(port);
  2556. kfree(port->mc_list);
  2557. free_netdev(port->netdev);
  2558. adapter->active_ports--;
  2559. }
  2560. static int ehea_setup_ports(struct ehea_adapter *adapter)
  2561. {
  2562. struct device_node *lhea_dn;
  2563. struct device_node *eth_dn = NULL;
  2564. const u32 *dn_log_port_id;
  2565. int i = 0;
  2566. lhea_dn = adapter->ofdev->dev.of_node;
  2567. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2568. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2569. NULL);
  2570. if (!dn_log_port_id) {
  2571. pr_err("bad device node: eth_dn name=%s\n",
  2572. eth_dn->full_name);
  2573. continue;
  2574. }
  2575. if (ehea_add_adapter_mr(adapter)) {
  2576. pr_err("creating MR failed\n");
  2577. of_node_put(eth_dn);
  2578. return -EIO;
  2579. }
  2580. adapter->port[i] = ehea_setup_single_port(adapter,
  2581. *dn_log_port_id,
  2582. eth_dn);
  2583. if (adapter->port[i])
  2584. netdev_info(adapter->port[i]->netdev,
  2585. "logical port id #%d\n", *dn_log_port_id);
  2586. else
  2587. ehea_remove_adapter_mr(adapter);
  2588. i++;
  2589. }
  2590. return 0;
  2591. }
  2592. static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
  2593. u32 logical_port_id)
  2594. {
  2595. struct device_node *lhea_dn;
  2596. struct device_node *eth_dn = NULL;
  2597. const u32 *dn_log_port_id;
  2598. lhea_dn = adapter->ofdev->dev.of_node;
  2599. while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
  2600. dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
  2601. NULL);
  2602. if (dn_log_port_id)
  2603. if (*dn_log_port_id == logical_port_id)
  2604. return eth_dn;
  2605. }
  2606. return NULL;
  2607. }
  2608. static ssize_t ehea_probe_port(struct device *dev,
  2609. struct device_attribute *attr,
  2610. const char *buf, size_t count)
  2611. {
  2612. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2613. struct ehea_port *port;
  2614. struct device_node *eth_dn = NULL;
  2615. int i;
  2616. u32 logical_port_id;
  2617. sscanf(buf, "%d", &logical_port_id);
  2618. port = ehea_get_port(adapter, logical_port_id);
  2619. if (port) {
  2620. netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
  2621. logical_port_id);
  2622. return -EINVAL;
  2623. }
  2624. eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
  2625. if (!eth_dn) {
  2626. pr_info("no logical port with id %d found\n", logical_port_id);
  2627. return -EINVAL;
  2628. }
  2629. if (ehea_add_adapter_mr(adapter)) {
  2630. pr_err("creating MR failed\n");
  2631. return -EIO;
  2632. }
  2633. port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
  2634. of_node_put(eth_dn);
  2635. if (port) {
  2636. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2637. if (!adapter->port[i]) {
  2638. adapter->port[i] = port;
  2639. break;
  2640. }
  2641. netdev_info(port->netdev, "added: (logical port id=%d)\n",
  2642. logical_port_id);
  2643. } else {
  2644. ehea_remove_adapter_mr(adapter);
  2645. return -EIO;
  2646. }
  2647. return (ssize_t) count;
  2648. }
  2649. static ssize_t ehea_remove_port(struct device *dev,
  2650. struct device_attribute *attr,
  2651. const char *buf, size_t count)
  2652. {
  2653. struct ehea_adapter *adapter = dev_get_drvdata(dev);
  2654. struct ehea_port *port;
  2655. int i;
  2656. u32 logical_port_id;
  2657. sscanf(buf, "%d", &logical_port_id);
  2658. port = ehea_get_port(adapter, logical_port_id);
  2659. if (port) {
  2660. netdev_info(port->netdev, "removed: (logical port id=%d)\n",
  2661. logical_port_id);
  2662. ehea_shutdown_single_port(port);
  2663. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2664. if (adapter->port[i] == port) {
  2665. adapter->port[i] = NULL;
  2666. break;
  2667. }
  2668. } else {
  2669. pr_err("removing port with logical port id=%d failed. port not configured.\n",
  2670. logical_port_id);
  2671. return -EINVAL;
  2672. }
  2673. ehea_remove_adapter_mr(adapter);
  2674. return (ssize_t) count;
  2675. }
  2676. static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
  2677. static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
  2678. static int ehea_create_device_sysfs(struct platform_device *dev)
  2679. {
  2680. int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
  2681. if (ret)
  2682. goto out;
  2683. ret = device_create_file(&dev->dev, &dev_attr_remove_port);
  2684. out:
  2685. return ret;
  2686. }
  2687. static void ehea_remove_device_sysfs(struct platform_device *dev)
  2688. {
  2689. device_remove_file(&dev->dev, &dev_attr_probe_port);
  2690. device_remove_file(&dev->dev, &dev_attr_remove_port);
  2691. }
  2692. static int ehea_reboot_notifier(struct notifier_block *nb,
  2693. unsigned long action, void *unused)
  2694. {
  2695. if (action == SYS_RESTART) {
  2696. pr_info("Reboot: freeing all eHEA resources\n");
  2697. ibmebus_unregister_driver(&ehea_driver);
  2698. }
  2699. return NOTIFY_DONE;
  2700. }
  2701. static struct notifier_block ehea_reboot_nb = {
  2702. .notifier_call = ehea_reboot_notifier,
  2703. };
  2704. static int ehea_mem_notifier(struct notifier_block *nb,
  2705. unsigned long action, void *data)
  2706. {
  2707. int ret = NOTIFY_BAD;
  2708. struct memory_notify *arg = data;
  2709. mutex_lock(&dlpar_mem_lock);
  2710. switch (action) {
  2711. case MEM_CANCEL_OFFLINE:
  2712. pr_info("memory offlining canceled");
  2713. /* Fall through: re-add canceled memory block */
  2714. case MEM_ONLINE:
  2715. pr_info("memory is going online");
  2716. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2717. if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
  2718. goto out_unlock;
  2719. ehea_rereg_mrs();
  2720. break;
  2721. case MEM_GOING_OFFLINE:
  2722. pr_info("memory is going offline");
  2723. set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
  2724. if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
  2725. goto out_unlock;
  2726. ehea_rereg_mrs();
  2727. break;
  2728. default:
  2729. break;
  2730. }
  2731. ehea_update_firmware_handles();
  2732. ret = NOTIFY_OK;
  2733. out_unlock:
  2734. mutex_unlock(&dlpar_mem_lock);
  2735. return ret;
  2736. }
  2737. static struct notifier_block ehea_mem_nb = {
  2738. .notifier_call = ehea_mem_notifier,
  2739. };
  2740. static void ehea_crash_handler(void)
  2741. {
  2742. int i;
  2743. if (ehea_fw_handles.arr)
  2744. for (i = 0; i < ehea_fw_handles.num_entries; i++)
  2745. ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
  2746. ehea_fw_handles.arr[i].fwh,
  2747. FORCE_FREE);
  2748. if (ehea_bcmc_regs.arr)
  2749. for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
  2750. ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
  2751. ehea_bcmc_regs.arr[i].port_id,
  2752. ehea_bcmc_regs.arr[i].reg_type,
  2753. ehea_bcmc_regs.arr[i].macaddr,
  2754. 0, H_DEREG_BCMC);
  2755. }
  2756. static atomic_t ehea_memory_hooks_registered;
  2757. /* Register memory hooks on probe of first adapter */
  2758. static int ehea_register_memory_hooks(void)
  2759. {
  2760. int ret = 0;
  2761. if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
  2762. return 0;
  2763. ret = ehea_create_busmap();
  2764. if (ret) {
  2765. pr_info("ehea_create_busmap failed\n");
  2766. goto out;
  2767. }
  2768. ret = register_reboot_notifier(&ehea_reboot_nb);
  2769. if (ret) {
  2770. pr_info("register_reboot_notifier failed\n");
  2771. goto out;
  2772. }
  2773. ret = register_memory_notifier(&ehea_mem_nb);
  2774. if (ret) {
  2775. pr_info("register_memory_notifier failed\n");
  2776. goto out2;
  2777. }
  2778. ret = crash_shutdown_register(ehea_crash_handler);
  2779. if (ret) {
  2780. pr_info("crash_shutdown_register failed\n");
  2781. goto out3;
  2782. }
  2783. return 0;
  2784. out3:
  2785. unregister_memory_notifier(&ehea_mem_nb);
  2786. out2:
  2787. unregister_reboot_notifier(&ehea_reboot_nb);
  2788. out:
  2789. atomic_dec(&ehea_memory_hooks_registered);
  2790. return ret;
  2791. }
  2792. static void ehea_unregister_memory_hooks(void)
  2793. {
  2794. /* Only remove the hooks if we've registered them */
  2795. if (atomic_read(&ehea_memory_hooks_registered) == 0)
  2796. return;
  2797. unregister_reboot_notifier(&ehea_reboot_nb);
  2798. if (crash_shutdown_unregister(ehea_crash_handler))
  2799. pr_info("failed unregistering crash handler\n");
  2800. unregister_memory_notifier(&ehea_mem_nb);
  2801. }
  2802. static int ehea_probe_adapter(struct platform_device *dev)
  2803. {
  2804. struct ehea_adapter *adapter;
  2805. const u64 *adapter_handle;
  2806. int ret;
  2807. int i;
  2808. ret = ehea_register_memory_hooks();
  2809. if (ret)
  2810. return ret;
  2811. if (!dev || !dev->dev.of_node) {
  2812. pr_err("Invalid ibmebus device probed\n");
  2813. return -EINVAL;
  2814. }
  2815. adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
  2816. if (!adapter) {
  2817. ret = -ENOMEM;
  2818. dev_err(&dev->dev, "no mem for ehea_adapter\n");
  2819. goto out;
  2820. }
  2821. list_add(&adapter->list, &adapter_list);
  2822. adapter->ofdev = dev;
  2823. adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
  2824. NULL);
  2825. if (adapter_handle)
  2826. adapter->handle = *adapter_handle;
  2827. if (!adapter->handle) {
  2828. dev_err(&dev->dev, "failed getting handle for adapter"
  2829. " '%s'\n", dev->dev.of_node->full_name);
  2830. ret = -ENODEV;
  2831. goto out_free_ad;
  2832. }
  2833. adapter->pd = EHEA_PD_ID;
  2834. platform_set_drvdata(dev, adapter);
  2835. /* initialize adapter and ports */
  2836. /* get adapter properties */
  2837. ret = ehea_sense_adapter_attr(adapter);
  2838. if (ret) {
  2839. dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
  2840. goto out_free_ad;
  2841. }
  2842. adapter->neq = ehea_create_eq(adapter,
  2843. EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
  2844. if (!adapter->neq) {
  2845. ret = -EIO;
  2846. dev_err(&dev->dev, "NEQ creation failed\n");
  2847. goto out_free_ad;
  2848. }
  2849. tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
  2850. (unsigned long)adapter);
  2851. ret = ehea_create_device_sysfs(dev);
  2852. if (ret)
  2853. goto out_kill_eq;
  2854. ret = ehea_setup_ports(adapter);
  2855. if (ret) {
  2856. dev_err(&dev->dev, "setup_ports failed\n");
  2857. goto out_rem_dev_sysfs;
  2858. }
  2859. ret = ibmebus_request_irq(adapter->neq->attr.ist1,
  2860. ehea_interrupt_neq, 0,
  2861. "ehea_neq", adapter);
  2862. if (ret) {
  2863. dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
  2864. goto out_shutdown_ports;
  2865. }
  2866. /* Handle any events that might be pending. */
  2867. tasklet_hi_schedule(&adapter->neq_tasklet);
  2868. ret = 0;
  2869. goto out;
  2870. out_shutdown_ports:
  2871. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2872. if (adapter->port[i]) {
  2873. ehea_shutdown_single_port(adapter->port[i]);
  2874. adapter->port[i] = NULL;
  2875. }
  2876. out_rem_dev_sysfs:
  2877. ehea_remove_device_sysfs(dev);
  2878. out_kill_eq:
  2879. ehea_destroy_eq(adapter->neq);
  2880. out_free_ad:
  2881. list_del(&adapter->list);
  2882. out:
  2883. ehea_update_firmware_handles();
  2884. return ret;
  2885. }
  2886. static int ehea_remove(struct platform_device *dev)
  2887. {
  2888. struct ehea_adapter *adapter = platform_get_drvdata(dev);
  2889. int i;
  2890. for (i = 0; i < EHEA_MAX_PORTS; i++)
  2891. if (adapter->port[i]) {
  2892. ehea_shutdown_single_port(adapter->port[i]);
  2893. adapter->port[i] = NULL;
  2894. }
  2895. ehea_remove_device_sysfs(dev);
  2896. ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
  2897. tasklet_kill(&adapter->neq_tasklet);
  2898. ehea_destroy_eq(adapter->neq);
  2899. ehea_remove_adapter_mr(adapter);
  2900. list_del(&adapter->list);
  2901. ehea_update_firmware_handles();
  2902. return 0;
  2903. }
  2904. static int check_module_parm(void)
  2905. {
  2906. int ret = 0;
  2907. if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
  2908. (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
  2909. pr_info("Bad parameter: rq1_entries\n");
  2910. ret = -EINVAL;
  2911. }
  2912. if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
  2913. (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
  2914. pr_info("Bad parameter: rq2_entries\n");
  2915. ret = -EINVAL;
  2916. }
  2917. if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
  2918. (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
  2919. pr_info("Bad parameter: rq3_entries\n");
  2920. ret = -EINVAL;
  2921. }
  2922. if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
  2923. (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
  2924. pr_info("Bad parameter: sq_entries\n");
  2925. ret = -EINVAL;
  2926. }
  2927. return ret;
  2928. }
  2929. static ssize_t ehea_show_capabilities(struct device_driver *drv,
  2930. char *buf)
  2931. {
  2932. return sprintf(buf, "%d", EHEA_CAPABILITIES);
  2933. }
  2934. static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
  2935. ehea_show_capabilities, NULL);
  2936. static int __init ehea_module_init(void)
  2937. {
  2938. int ret;
  2939. pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
  2940. memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
  2941. memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
  2942. mutex_init(&ehea_fw_handles.lock);
  2943. spin_lock_init(&ehea_bcmc_regs.lock);
  2944. ret = check_module_parm();
  2945. if (ret)
  2946. goto out;
  2947. ret = ibmebus_register_driver(&ehea_driver);
  2948. if (ret) {
  2949. pr_err("failed registering eHEA device driver on ebus\n");
  2950. goto out;
  2951. }
  2952. ret = driver_create_file(&ehea_driver.driver,
  2953. &driver_attr_capabilities);
  2954. if (ret) {
  2955. pr_err("failed to register capabilities attribute, ret=%d\n",
  2956. ret);
  2957. goto out2;
  2958. }
  2959. return ret;
  2960. out2:
  2961. ibmebus_unregister_driver(&ehea_driver);
  2962. out:
  2963. return ret;
  2964. }
  2965. static void __exit ehea_module_exit(void)
  2966. {
  2967. driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
  2968. ibmebus_unregister_driver(&ehea_driver);
  2969. ehea_unregister_memory_hooks();
  2970. kfree(ehea_fw_handles.arr);
  2971. kfree(ehea_bcmc_regs.arr);
  2972. ehea_destroy_busmap();
  2973. }
  2974. module_init(ehea_module_init);
  2975. module_exit(ehea_module_exit);