qseecom.c 133 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061
  1. /*Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
  2. *
  3. * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
  15. #include <linux/kernel.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <linux/fs.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/cdev.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/sched.h>
  24. #include <linux/list.h>
  25. #include <linux/mutex.h>
  26. #include <linux/io.h>
  27. #include <linux/msm_ion.h>
  28. #include <linux/types.h>
  29. #include <linux/clk.h>
  30. #include <linux/qseecom.h>
  31. #include <linux/elf.h>
  32. #include <linux/firmware.h>
  33. #include <linux/freezer.h>
  34. #include <linux/scatterlist.h>
  35. #include <mach/board.h>
  36. #include <mach/msm_bus.h>
  37. #include <mach/msm_bus_board.h>
  38. #include <mach/scm.h>
  39. #include <mach/subsystem_restart.h>
  40. #include <mach/socinfo.h>
  41. #include <mach/qseecomi.h>
  42. #include <asm/cacheflush.h>
  43. #ifdef CONFIG_SEC_DEBUG
  44. #include <mach/sec_debug.h>
  45. #endif
  46. #include "qseecom_legacy.h"
  47. #include "qseecom_kernel.h"
  48. #define QSEECOM_DEV "qseecom"
  49. #define QSEOS_VERSION_14 0x14
  50. #define QSEEE_VERSION_00 0x400000
  51. #define QSEE_VERSION_01 0x401000
  52. #define QSEE_VERSION_02 0x402000
  53. #define QSEE_VERSION_03 0x403000
  54. #define QSEE_VERSION_04 0x404000
  55. #define QSEE_VERSION_05 0x405000
  56. #define QSEOS_CHECK_VERSION_CMD 0x00001803
  57. #define QSEE_CE_CLK_100MHZ 100000000
  58. #define QSEECOM_MAX_SG_ENTRY 512
  59. #define QSEECOM_INVALID_KEY_ID 0xff
  60. /* Save partition image hash for authentication check */
  61. #define SCM_SAVE_PARTITION_HASH_ID 0x01
  62. /* Check if enterprise security is activate */
  63. #define SCM_IS_ACTIVATED_ID 0x02
  64. #define RPMB_SERVICE 0x2000
  65. #define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
  66. #define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
  67. #define TWO 2
  68. #define U32_MAX ((u32)~0U)
  69. enum qseecom_clk_definitions {
  70. CLK_DFAB = 0,
  71. CLK_SFPB,
  72. };
  73. enum qseecom_client_handle_type {
  74. QSEECOM_CLIENT_APP = 1,
  75. QSEECOM_LISTENER_SERVICE,
  76. QSEECOM_SECURE_SERVICE,
  77. QSEECOM_GENERIC,
  78. QSEECOM_UNAVAILABLE_CLIENT_APP,
  79. };
  80. enum qseecom_ce_hw_instance {
  81. CLK_QSEE = 0,
  82. CLK_CE_DRV,
  83. };
  84. static struct class *driver_class;
  85. static dev_t qseecom_device_no;
  86. static DEFINE_MUTEX(qsee_bw_mutex);
  87. static DEFINE_MUTEX(app_access_lock);
  88. static DEFINE_MUTEX(clk_access_lock);
  89. struct qseecom_registered_listener_list {
  90. struct list_head list;
  91. struct qseecom_register_listener_req svc;
  92. uint32_t user_virt_sb_base;
  93. u8 *sb_virt;
  94. s32 sb_phys;
  95. size_t sb_length;
  96. struct ion_handle *ihandle; /* Retrieve phy addr */
  97. wait_queue_head_t rcv_req_wq;
  98. int rcv_req_flag;
  99. };
  100. struct qseecom_registered_app_list {
  101. struct list_head list;
  102. u32 app_id;
  103. u32 ref_cnt;
  104. char app_name[MAX_APP_NAME_SIZE];
  105. };
  106. struct qseecom_registered_kclient_list {
  107. struct list_head list;
  108. struct qseecom_handle *handle;
  109. };
  110. struct ce_hw_usage_info {
  111. uint32_t qsee_ce_hw_instance;
  112. uint32_t hlos_ce_hw_instance;
  113. uint32_t disk_encrypt_pipe;
  114. uint32_t file_encrypt_pipe;
  115. };
  116. struct qseecom_clk {
  117. enum qseecom_ce_hw_instance instance;
  118. struct clk *ce_core_clk;
  119. struct clk *ce_clk;
  120. struct clk *ce_core_src_clk;
  121. struct clk *ce_bus_clk;
  122. uint32_t clk_access_cnt;
  123. };
  124. struct qseecom_control {
  125. struct ion_client *ion_clnt; /* Ion client */
  126. struct list_head registered_listener_list_head;
  127. spinlock_t registered_listener_list_lock;
  128. struct list_head registered_app_list_head;
  129. spinlock_t registered_app_list_lock;
  130. struct list_head registered_kclient_list_head;
  131. spinlock_t registered_kclient_list_lock;
  132. wait_queue_head_t send_resp_wq;
  133. int send_resp_flag;
  134. uint32_t qseos_version;
  135. uint32_t qsee_version;
  136. struct device *pdev;
  137. bool commonlib_loaded;
  138. struct ion_handle *cmnlib_ion_handle;
  139. struct ce_hw_usage_info ce_info;
  140. int qsee_bw_count;
  141. int qsee_sfpb_bw_count;
  142. uint32_t qsee_perf_client;
  143. struct qseecom_clk qsee;
  144. struct qseecom_clk ce_drv;
  145. bool support_bus_scaling;
  146. bool support_fde;
  147. bool support_pfe;
  148. uint32_t cumulative_mode;
  149. enum qseecom_bandwidth_request_mode current_mode;
  150. struct timer_list bw_scale_down_timer;
  151. struct work_struct bw_inactive_req_ws;
  152. struct cdev cdev;
  153. bool timer_running;
  154. bool appsbl_qseecom_support;
  155. };
  156. struct qseecom_client_handle {
  157. u32 app_id;
  158. u8 *sb_virt;
  159. s32 sb_phys;
  160. uint32_t user_virt_sb_base;
  161. size_t sb_length;
  162. struct ion_handle *ihandle; /* Retrieve phy addr */
  163. char app_name[MAX_APP_NAME_SIZE];
  164. };
  165. struct qseecom_listener_handle {
  166. u32 id;
  167. };
  168. static struct qseecom_control qseecom;
  169. struct qseecom_dev_handle {
  170. enum qseecom_client_handle_type type;
  171. union {
  172. struct qseecom_client_handle client;
  173. struct qseecom_listener_handle listener;
  174. };
  175. bool released;
  176. int abort;
  177. wait_queue_head_t abort_wq;
  178. atomic_t ioctl_count;
  179. bool perf_enabled;
  180. bool fast_load_enabled;
  181. enum qseecom_bandwidth_request_mode mode;
  182. };
  183. enum qseecom_set_clear_key_flag {
  184. QSEECOM_CLEAR_CE_KEY_CMD = 0,
  185. QSEECOM_SET_CE_KEY_CMD,
  186. };
  187. struct qseecom_set_key_parameter {
  188. uint32_t ce_hw;
  189. uint32_t pipe;
  190. uint32_t flags;
  191. uint8_t key_id[QSEECOM_KEY_ID_SIZE];
  192. unsigned char hash32[QSEECOM_HASH_SIZE];
  193. enum qseecom_set_clear_key_flag set_clear_key_flag;
  194. };
  195. struct qseecom_sg_entry {
  196. uint32_t phys_addr;
  197. uint32_t len;
  198. };
  199. struct qseecom_key_id_usage_desc {
  200. uint8_t desc[QSEECOM_KEY_ID_SIZE];
  201. };
  202. static struct qseecom_key_id_usage_desc key_id_array[] = {
  203. {
  204. .desc = "Undefined Usage Index",
  205. },
  206. {
  207. .desc = "Full Disk Encryption",
  208. },
  209. {
  210. .desc = "Per File Encryption",
  211. },
  212. };
  213. /* Function proto types */
  214. static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
  215. static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
  216. static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
  217. static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
  218. static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
  219. struct qseecom_register_listener_req *svc)
  220. {
  221. struct qseecom_registered_listener_list *ptr;
  222. int unique = 1;
  223. unsigned long flags;
  224. spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
  225. list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
  226. if (ptr->svc.listener_id == svc->listener_id) {
  227. pr_err("Service id: %u is already registered\n",
  228. ptr->svc.listener_id);
  229. unique = 0;
  230. break;
  231. }
  232. }
  233. spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
  234. return unique;
  235. }
  236. static struct qseecom_registered_listener_list *__qseecom_find_svc(
  237. int32_t listener_id)
  238. {
  239. struct qseecom_registered_listener_list *entry = NULL;
  240. unsigned long flags;
  241. spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
  242. list_for_each_entry(entry, &qseecom.registered_listener_list_head, list)
  243. {
  244. if (entry->svc.listener_id == listener_id)
  245. break;
  246. }
  247. spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
  248. if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
  249. pr_err("Service id: %u is not found\n", listener_id);
  250. return NULL;
  251. }
  252. return entry;
  253. }
  254. static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
  255. struct qseecom_dev_handle *handle,
  256. struct qseecom_register_listener_req *listener)
  257. {
  258. int ret = 0;
  259. struct qseecom_register_listener_ireq req;
  260. struct qseecom_command_scm_resp resp;
  261. ion_phys_addr_t pa;
  262. /* Get the handle of the shared fd */
  263. svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt,
  264. listener->ifd_data_fd);
  265. if (IS_ERR_OR_NULL(svc->ihandle)) {
  266. pr_err("Ion client could not retrieve the handle\n");
  267. return -ENOMEM;
  268. }
  269. /* Get the physical address of the ION BUF */
  270. ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
  271. if (ret) {
  272. pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
  273. ret);
  274. return ret;
  275. }
  276. /* Populate the structure for sending scm call to load image */
  277. svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
  278. svc->sb_phys = pa;
  279. req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
  280. req.listener_id = svc->svc.listener_id;
  281. req.sb_len = svc->sb_length;
  282. req.sb_ptr = (void *)svc->sb_phys;
  283. resp.result = QSEOS_RESULT_INCOMPLETE;
  284. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  285. sizeof(req), &resp, sizeof(resp));
  286. if (ret) {
  287. pr_err("qseecom_scm_call failed with err: %d\n", ret);
  288. return -EINVAL;
  289. }
  290. if (resp.result != QSEOS_RESULT_SUCCESS) {
  291. pr_err("Error SB registration req: resp.result = %d\n",
  292. resp.result);
  293. return -EPERM;
  294. }
  295. return 0;
  296. }
  297. static int qseecom_register_listener(struct qseecom_dev_handle *data,
  298. void __user *argp)
  299. {
  300. int ret = 0;
  301. unsigned long flags;
  302. struct qseecom_register_listener_req rcvd_lstnr;
  303. struct qseecom_registered_listener_list *new_entry;
  304. ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
  305. if (ret) {
  306. pr_err("copy_from_user failed\n");
  307. return ret;
  308. }
  309. if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
  310. rcvd_lstnr.sb_size))
  311. return -EFAULT;
  312. data->listener.id = 0;
  313. if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
  314. pr_err("Service is not unique and is already registered\n");
  315. data->released = true;
  316. return -EBUSY;
  317. }
  318. new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
  319. if (!new_entry) {
  320. pr_err("kmalloc failed\n");
  321. return -ENOMEM;
  322. }
  323. memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
  324. new_entry->rcv_req_flag = 0;
  325. new_entry->svc.listener_id = rcvd_lstnr.listener_id;
  326. new_entry->sb_length = rcvd_lstnr.sb_size;
  327. new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
  328. if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
  329. pr_err("qseecom_set_sb_memoryfailed\n");
  330. kzfree(new_entry);
  331. return -ENOMEM;
  332. }
  333. data->listener.id = rcvd_lstnr.listener_id;
  334. init_waitqueue_head(&new_entry->rcv_req_wq);
  335. spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
  336. list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
  337. spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
  338. return ret;
  339. }
  340. static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
  341. {
  342. int ret = 0;
  343. unsigned long flags;
  344. uint32_t unmap_mem = 0;
  345. struct qseecom_register_listener_ireq req;
  346. struct qseecom_registered_listener_list *ptr_svc = NULL;
  347. struct qseecom_command_scm_resp resp;
  348. struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
  349. req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
  350. req.listener_id = data->listener.id;
  351. resp.result = QSEOS_RESULT_INCOMPLETE;
  352. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  353. sizeof(req), &resp, sizeof(resp));
  354. if (ret) {
  355. pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
  356. ret, data->listener.id);
  357. return ret;
  358. }
  359. if (resp.result != QSEOS_RESULT_SUCCESS) {
  360. pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
  361. resp.result, data->listener.id);
  362. return -EPERM;
  363. }
  364. data->abort = 1;
  365. spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
  366. list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
  367. list) {
  368. if (ptr_svc->svc.listener_id == data->listener.id) {
  369. wake_up_all(&ptr_svc->rcv_req_wq);
  370. break;
  371. }
  372. }
  373. spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
  374. while (atomic_read(&data->ioctl_count) > 1) {
  375. if (wait_event_freezable(data->abort_wq,
  376. atomic_read(&data->ioctl_count) <= 1)) {
  377. pr_err("Interrupted from abort\n");
  378. ret = -ERESTARTSYS;
  379. break;
  380. }
  381. }
  382. spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
  383. list_for_each_entry(ptr_svc,
  384. &qseecom.registered_listener_list_head,
  385. list)
  386. {
  387. if (ptr_svc->svc.listener_id == data->listener.id) {
  388. if (ptr_svc->sb_virt) {
  389. unmap_mem = 1;
  390. ihandle = ptr_svc->ihandle;
  391. }
  392. list_del(&ptr_svc->list);
  393. kzfree(ptr_svc);
  394. break;
  395. }
  396. }
  397. spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
  398. /* Unmap the memory */
  399. if (unmap_mem) {
  400. if (!IS_ERR_OR_NULL(ihandle)) {
  401. ion_unmap_kernel(qseecom.ion_clnt, ihandle);
  402. ion_free(qseecom.ion_clnt, ihandle);
  403. }
  404. }
  405. data->released = true;
  406. return ret;
  407. }
  408. static int __qseecom_set_msm_bus_request(uint32_t mode)
  409. {
  410. int ret = 0;
  411. struct qseecom_clk *qclk;
  412. qclk = &qseecom.qsee;
  413. if (qclk->ce_core_src_clk != NULL) {
  414. if (mode == INACTIVE) {
  415. __qseecom_disable_clk(CLK_QSEE);
  416. } else {
  417. ret = __qseecom_enable_clk(CLK_QSEE);
  418. if (ret)
  419. pr_err("CLK enabling failed (%d) MODE (%d)\n",
  420. ret, mode);
  421. }
  422. }
  423. if ((!ret) && (qseecom.current_mode != mode)) {
  424. ret = msm_bus_scale_client_update_request(
  425. qseecom.qsee_perf_client, mode);
  426. if (ret) {
  427. pr_err("Bandwidth req failed(%d) MODE (%d)\n",
  428. ret, mode);
  429. if (qclk->ce_core_src_clk != NULL) {
  430. if (mode == INACTIVE)
  431. __qseecom_enable_clk(CLK_QSEE);
  432. else
  433. __qseecom_disable_clk(CLK_QSEE);
  434. }
  435. }
  436. qseecom.current_mode = mode;
  437. }
  438. return ret;
  439. }
  440. static void qseecom_bw_inactive_req_work(struct work_struct *work)
  441. {
  442. mutex_lock(&app_access_lock);
  443. mutex_lock(&qsee_bw_mutex);
  444. __qseecom_set_msm_bus_request(INACTIVE);
  445. pr_debug("current_mode = %d, cumulative_mode = %d\n",
  446. qseecom.current_mode, qseecom.cumulative_mode);
  447. qseecom.timer_running = false;
  448. mutex_unlock(&qsee_bw_mutex);
  449. mutex_unlock(&app_access_lock);
  450. return;
  451. }
  452. static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
  453. {
  454. schedule_work(&qseecom.bw_inactive_req_ws);
  455. return;
  456. }
  457. static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
  458. {
  459. struct qseecom_clk *qclk;
  460. int ret = 0;
  461. mutex_lock(&clk_access_lock);
  462. if (ce == CLK_QSEE)
  463. qclk = &qseecom.qsee;
  464. else
  465. qclk = &qseecom.ce_drv;
  466. if (qclk->clk_access_cnt > 2) {
  467. pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
  468. ret = -EINVAL;
  469. goto err_dec_ref_cnt;
  470. }
  471. if (qclk->clk_access_cnt == 2)
  472. qclk->clk_access_cnt--;
  473. err_dec_ref_cnt:
  474. mutex_unlock(&clk_access_lock);
  475. return ret;
  476. }
  477. static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
  478. {
  479. int32_t ret = 0;
  480. int32_t request_mode = INACTIVE;
  481. mutex_lock(&qsee_bw_mutex);
  482. if (mode == 0) {
  483. if (qseecom.cumulative_mode > MEDIUM)
  484. request_mode = HIGH;
  485. else
  486. request_mode = qseecom.cumulative_mode;
  487. } else {
  488. request_mode = mode;
  489. }
  490. ret = __qseecom_set_msm_bus_request(request_mode);
  491. if (ret) {
  492. pr_err("set msm bus request failed (%d),request_mode (%d)\n",
  493. ret, request_mode);
  494. goto err_scale_timer;
  495. }
  496. if (qseecom.timer_running) {
  497. ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
  498. if (ret) {
  499. pr_err("Failed to decrease clk ref count.\n");
  500. goto err_scale_timer;
  501. }
  502. del_timer_sync(&(qseecom.bw_scale_down_timer));
  503. qseecom.timer_running = false;
  504. }
  505. err_scale_timer:
  506. mutex_unlock(&qsee_bw_mutex);
  507. return ret;
  508. }
  509. static int qseecom_unregister_bus_bandwidth_needs(
  510. struct qseecom_dev_handle *data)
  511. {
  512. int32_t ret = 0;
  513. qseecom.cumulative_mode -= data->mode;
  514. data->mode = INACTIVE;
  515. return ret;
  516. }
  517. static int __qseecom_register_bus_bandwidth_needs(
  518. struct qseecom_dev_handle *data, uint32_t request_mode)
  519. {
  520. int32_t ret = 0;
  521. if (data->mode == INACTIVE) {
  522. qseecom.cumulative_mode += request_mode;
  523. data->mode = request_mode;
  524. } else {
  525. if (data->mode != request_mode) {
  526. qseecom.cumulative_mode -= data->mode;
  527. qseecom.cumulative_mode += request_mode;
  528. data->mode = request_mode;
  529. }
  530. }
  531. return ret;
  532. }
  533. static int qseecom_perf_enable(struct qseecom_dev_handle *data)
  534. {
  535. int ret = 0;
  536. ret = qsee_vote_for_clock(data, CLK_DFAB);
  537. if (ret) {
  538. pr_err("Failed to vote for DFAB clock with err %d\n", ret);
  539. goto perf_enable_exit;
  540. }
  541. ret = qsee_vote_for_clock(data, CLK_SFPB);
  542. if (ret) {
  543. qsee_disable_clock_vote(data, CLK_DFAB);
  544. pr_err("Failed to vote for SFPB clock with err %d\n", ret);
  545. goto perf_enable_exit;
  546. }
  547. perf_enable_exit:
  548. return ret;
  549. }
  550. static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
  551. void __user *argp)
  552. {
  553. int32_t ret = 0;
  554. int32_t req_mode;
  555. ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
  556. if (ret) {
  557. pr_err("copy_from_user failed\n");
  558. return ret;
  559. }
  560. if (req_mode > HIGH) {
  561. pr_err("Invalid bandwidth mode (%d)\n", req_mode);
  562. return -EINVAL;
  563. }
  564. /*
  565. * Register bus bandwidth needs if bus scaling feature is enabled;
  566. * otherwise, qseecom enable/disable clocks for the client directly.
  567. */
  568. if (qseecom.support_bus_scaling) {
  569. mutex_lock(&qsee_bw_mutex);
  570. ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
  571. mutex_unlock(&qsee_bw_mutex);
  572. } else {
  573. pr_debug("Bus scaling feature is NOT enabled\n");
  574. pr_debug("request bandwidth mode %d for the client\n",
  575. req_mode);
  576. if (req_mode != INACTIVE) {
  577. ret = qseecom_perf_enable(data);
  578. if (ret)
  579. pr_err("Failed to vote for clock with err %d\n",
  580. ret);
  581. } else {
  582. qsee_disable_clock_vote(data, CLK_DFAB);
  583. qsee_disable_clock_vote(data, CLK_SFPB);
  584. }
  585. }
  586. return ret;
  587. }
  588. static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
  589. {
  590. mutex_lock(&qsee_bw_mutex);
  591. qseecom.bw_scale_down_timer.expires = jiffies +
  592. msecs_to_jiffies(duration);
  593. mod_timer(&(qseecom.bw_scale_down_timer),
  594. qseecom.bw_scale_down_timer.expires);
  595. qseecom.timer_running = true;
  596. mutex_unlock(&qsee_bw_mutex);
  597. }
  598. static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
  599. {
  600. if (!qseecom.support_bus_scaling)
  601. qsee_disable_clock_vote(data, CLK_SFPB);
  602. else
  603. __qseecom_add_bw_scale_down_timer(
  604. QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
  605. return;
  606. }
  607. static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
  608. {
  609. int ret = 0;
  610. if (qseecom.support_bus_scaling) {
  611. ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
  612. if (ret)
  613. pr_err("Failed to set bw MEDIUM.\n");
  614. } else {
  615. ret = qsee_vote_for_clock(data, CLK_SFPB);
  616. if (ret)
  617. pr_err("Fail vote for clk SFPB ret %d\n", ret);
  618. }
  619. return ret;
  620. }
  621. static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
  622. void __user *argp)
  623. {
  624. ion_phys_addr_t pa;
  625. int32_t ret;
  626. struct qseecom_set_sb_mem_param_req req;
  627. uint32_t len;
  628. /* Copy the relevant information needed for loading the image */
  629. if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
  630. return -EFAULT;
  631. if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == 0) ||
  632. (req.sb_len == 0)) {
  633. pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%x)\n",
  634. req.ifd_data_fd, req.sb_len, req.virt_sb_base);
  635. return -EFAULT;
  636. }
  637. if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
  638. req.sb_len))
  639. return -EFAULT;
  640. /* Get the handle of the shared fd */
  641. data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt,
  642. req.ifd_data_fd);
  643. if (IS_ERR_OR_NULL(data->client.ihandle)) {
  644. pr_err("Ion client could not retrieve the handle\n");
  645. return -ENOMEM;
  646. }
  647. /* Get the physical address of the ION BUF */
  648. ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
  649. if (ret) {
  650. pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
  651. ret);
  652. return ret;
  653. }
  654. /* Populate the structure for sending scm call to load image */
  655. data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
  656. data->client.ihandle);
  657. data->client.sb_phys = pa;
  658. data->client.sb_length = req.sb_len;
  659. data->client.user_virt_sb_base = req.virt_sb_base;
  660. return 0;
  661. }
  662. static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
  663. {
  664. int ret;
  665. ret = (qseecom.send_resp_flag != 0);
  666. return ret || data->abort;
  667. }
  668. static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
  669. struct qseecom_command_scm_resp *resp)
  670. {
  671. int ret = 0;
  672. int rc = 0;
  673. uint32_t lstnr;
  674. unsigned long flags;
  675. struct qseecom_client_listener_data_irsp send_data_rsp;
  676. struct qseecom_registered_listener_list *ptr_svc = NULL;
  677. sigset_t new_sigset;
  678. sigset_t old_sigset;
  679. while (resp->result == QSEOS_RESULT_INCOMPLETE) {
  680. lstnr = resp->data;
  681. /*
  682. * Wake up blocking lsitener service with the lstnr id
  683. */
  684. spin_lock_irqsave(&qseecom.registered_listener_list_lock,
  685. flags);
  686. list_for_each_entry(ptr_svc,
  687. &qseecom.registered_listener_list_head, list) {
  688. if (ptr_svc->svc.listener_id == lstnr) {
  689. ptr_svc->rcv_req_flag = 1;
  690. wake_up_interruptible(&ptr_svc->rcv_req_wq);
  691. break;
  692. }
  693. }
  694. spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
  695. flags);
  696. if (ptr_svc == NULL) {
  697. pr_err("Listener Svc %d does not exist\n", lstnr);
  698. return -EINVAL;
  699. }
  700. if (ptr_svc->svc.listener_id != lstnr) {
  701. pr_warning("Service requested for does on exist\n");
  702. return -ERESTARTSYS;
  703. }
  704. pr_debug("waking up rcv_req_wq and "
  705. "waiting for send_resp_wq\n");
  706. /* initialize the new signal mask with all signals*/
  707. sigfillset(&new_sigset);
  708. /* block all signals */
  709. sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
  710. do {
  711. if (!wait_event_freezable(qseecom.send_resp_wq,
  712. __qseecom_listener_has_sent_rsp(data)))
  713. break;
  714. } while (1);
  715. /* restore signal mask */
  716. sigprocmask(SIG_SETMASK, &old_sigset, NULL);
  717. if (data->abort) {
  718. pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
  719. data->client.app_id, lstnr, ret);
  720. rc = -ENODEV;
  721. send_data_rsp.status = QSEOS_RESULT_FAILURE;
  722. } else {
  723. send_data_rsp.status = QSEOS_RESULT_SUCCESS;
  724. }
  725. qseecom.send_resp_flag = 0;
  726. send_data_rsp.qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
  727. send_data_rsp.listener_id = lstnr ;
  728. if (ptr_svc)
  729. msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
  730. ptr_svc->sb_virt, ptr_svc->sb_length,
  731. ION_IOC_CLEAN_INV_CACHES);
  732. if (lstnr == RPMB_SERVICE)
  733. __qseecom_enable_clk(CLK_QSEE);
  734. ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
  735. (const void *)&send_data_rsp,
  736. sizeof(send_data_rsp), resp,
  737. sizeof(*resp));
  738. if (ret) {
  739. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  740. ret, data->client.app_id);
  741. if (lstnr == RPMB_SERVICE)
  742. __qseecom_disable_clk(CLK_QSEE);
  743. return ret;
  744. }
  745. if ((resp->result != QSEOS_RESULT_SUCCESS) &&
  746. (resp->result != QSEOS_RESULT_INCOMPLETE)) {
  747. pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
  748. resp->result, data->client.app_id, lstnr);
  749. ret = -EINVAL;
  750. }
  751. if (lstnr == RPMB_SERVICE)
  752. __qseecom_disable_clk(CLK_QSEE);
  753. }
  754. if (rc)
  755. return rc;
  756. return ret;
  757. }
  758. static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req)
  759. {
  760. int32_t ret;
  761. struct qseecom_command_scm_resp resp;
  762. /* SCM_CALL to check if app_id for the mentioned app exists */
  763. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  764. sizeof(struct qseecom_check_app_ireq),
  765. &resp, sizeof(resp));
  766. if (ret) {
  767. pr_err("scm_call to check if app is already loaded failed\n");
  768. return -EINVAL;
  769. }
  770. if (resp.result == QSEOS_RESULT_FAILURE) {
  771. return 0;
  772. } else {
  773. switch (resp.resp_type) {
  774. /*qsee returned listener type response */
  775. case QSEOS_LISTENER_ID:
  776. pr_err("resp type is of listener type instead of app");
  777. return -EINVAL;
  778. break;
  779. case QSEOS_APP_ID:
  780. return resp.data;
  781. default:
  782. pr_err("invalid resp type (%d) from qsee",
  783. resp.resp_type);
  784. return -ENODEV;
  785. break;
  786. }
  787. }
  788. }
  789. static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
  790. {
  791. struct qseecom_registered_app_list *entry = NULL;
  792. unsigned long flags = 0;
  793. u32 app_id = 0;
  794. struct ion_handle *ihandle; /* Ion handle */
  795. struct qseecom_load_img_req load_img_req;
  796. int32_t ret = 0;
  797. ion_phys_addr_t pa = 0;
  798. uint32_t len;
  799. struct qseecom_command_scm_resp resp;
  800. struct qseecom_check_app_ireq req;
  801. struct qseecom_load_app_ireq load_req;
  802. bool first_time = false;
  803. /* Copy the relevant information needed for loading the image */
  804. if (copy_from_user(&load_img_req,
  805. (void __user *)argp,
  806. sizeof(struct qseecom_load_img_req))) {
  807. pr_err("copy_from_user failed\n");
  808. return -EFAULT;
  809. }
  810. if (qseecom.support_bus_scaling) {
  811. mutex_lock(&qsee_bw_mutex);
  812. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  813. mutex_unlock(&qsee_bw_mutex);
  814. if (ret)
  815. return ret;
  816. }
  817. /* Vote for the SFPB clock */
  818. ret = __qseecom_enable_clk_scale_up(data);
  819. if (ret)
  820. goto enable_clk_err;
  821. req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  822. load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
  823. strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
  824. ret = __qseecom_check_app_exists(req);
  825. if (ret < 0)
  826. goto loadapp_err;
  827. app_id = ret;
  828. if (app_id) {
  829. pr_debug("App id %d (%s) already exists\n", app_id,
  830. (char *)(req.app_name));
  831. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  832. list_for_each_entry(entry,
  833. &qseecom.registered_app_list_head, list){
  834. if (entry->app_id == app_id) {
  835. entry->ref_cnt++;
  836. break;
  837. }
  838. }
  839. spin_unlock_irqrestore(
  840. &qseecom.registered_app_list_lock, flags);
  841. ret = 0;
  842. } else {
  843. first_time = true;
  844. pr_warn("App (%s) does'nt exist, loading apps for first time\n",
  845. (char *)(load_img_req.img_name));
  846. /* Get the handle of the shared fd */
  847. ihandle = ion_import_dma_buf(qseecom.ion_clnt,
  848. load_img_req.ifd_data_fd);
  849. if (IS_ERR_OR_NULL(ihandle)) {
  850. pr_err("Ion client could not retrieve the handle\n");
  851. ret = -ENOMEM;
  852. goto loadapp_err;
  853. }
  854. /* Get the physical address of the ION BUF */
  855. ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
  856. if (ret) {
  857. pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
  858. ret);
  859. goto loadapp_err;
  860. }
  861. /* Populate the structure for sending scm call to load image */
  862. strlcpy(load_req.app_name, load_img_req.img_name,
  863. MAX_APP_NAME_SIZE);
  864. load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  865. load_req.mdt_len = load_img_req.mdt_len;
  866. load_req.img_len = load_img_req.img_len;
  867. load_req.phy_addr = pa;
  868. msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
  869. ION_IOC_CLEAN_INV_CACHES);
  870. /* SCM_CALL to load the app and get the app_id back */
  871. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
  872. sizeof(struct qseecom_load_app_ireq),
  873. &resp, sizeof(resp));
  874. if (ret) {
  875. pr_err("scm_call to load app failed\n");
  876. if (!IS_ERR_OR_NULL(ihandle))
  877. ion_free(qseecom.ion_clnt, ihandle);
  878. ret = -EINVAL;
  879. goto loadapp_err;
  880. }
  881. if (resp.result == QSEOS_RESULT_FAILURE) {
  882. pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
  883. if (!IS_ERR_OR_NULL(ihandle))
  884. ion_free(qseecom.ion_clnt, ihandle);
  885. ret = -EFAULT;
  886. goto loadapp_err;
  887. }
  888. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  889. ret = __qseecom_process_incomplete_cmd(data, &resp);
  890. if (ret) {
  891. pr_err("process_incomplete_cmd failed err: %d\n",
  892. ret);
  893. if (!IS_ERR_OR_NULL(ihandle))
  894. ion_free(qseecom.ion_clnt, ihandle);
  895. ret = -EFAULT;
  896. goto loadapp_err;
  897. }
  898. }
  899. if (resp.result != QSEOS_RESULT_SUCCESS) {
  900. pr_err("scm_call failed resp.result unknown, %d\n",
  901. resp.result);
  902. if (!IS_ERR_OR_NULL(ihandle))
  903. ion_free(qseecom.ion_clnt, ihandle);
  904. ret = -EFAULT;
  905. goto loadapp_err;
  906. }
  907. app_id = resp.data;
  908. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  909. if (!entry) {
  910. pr_err("kmalloc failed\n");
  911. ret = -ENOMEM;
  912. goto loadapp_err;
  913. }
  914. entry->app_id = app_id;
  915. entry->ref_cnt = 1;
  916. strlcpy(entry->app_name, load_img_req.img_name,
  917. MAX_APP_NAME_SIZE);
  918. /* Deallocate the handle */
  919. if (!IS_ERR_OR_NULL(ihandle))
  920. ion_free(qseecom.ion_clnt, ihandle);
  921. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  922. list_add_tail(&entry->list, &qseecom.registered_app_list_head);
  923. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  924. flags);
  925. pr_warn("App with id %d (%s) now loaded\n", app_id,
  926. (char *)(load_img_req.img_name));
  927. }
  928. data->client.app_id = app_id;
  929. strlcpy(data->client.app_name, load_img_req.img_name,
  930. MAX_APP_NAME_SIZE);
  931. load_img_req.app_id = app_id;
  932. if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
  933. pr_err("copy_to_user failed\n");
  934. ret = -EFAULT;
  935. if (first_time == true) {
  936. spin_lock_irqsave(
  937. &qseecom.registered_app_list_lock, flags);
  938. list_del(&entry->list);
  939. spin_unlock_irqrestore(
  940. &qseecom.registered_app_list_lock, flags);
  941. kzfree(entry);
  942. }
  943. }
  944. loadapp_err:
  945. __qseecom_disable_clk_scale_down(data);
  946. enable_clk_err:
  947. if (qseecom.support_bus_scaling) {
  948. mutex_lock(&qsee_bw_mutex);
  949. qseecom_unregister_bus_bandwidth_needs(data);
  950. mutex_unlock(&qsee_bw_mutex);
  951. }
  952. return ret;
  953. }
  954. static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
  955. {
  956. wake_up_all(&qseecom.send_resp_wq);
  957. while (atomic_read(&data->ioctl_count) > 1) {
  958. if (wait_event_freezable(data->abort_wq,
  959. atomic_read(&data->ioctl_count) <= 1)) {
  960. pr_err("Interrupted from abort\n");
  961. return -ERESTARTSYS;
  962. break;
  963. }
  964. }
  965. /* Set unload app */
  966. return 1;
  967. }
  968. static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
  969. {
  970. int ret = 0;
  971. if (!IS_ERR_OR_NULL(data->client.ihandle)) {
  972. ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
  973. ion_free(qseecom.ion_clnt, data->client.ihandle);
  974. memset((void *)&data->client,
  975. 0, sizeof(struct qseecom_client_handle));
  976. }
  977. return ret;
  978. }
  979. static int qseecom_unload_app(struct qseecom_dev_handle *data,
  980. bool app_crash)
  981. {
  982. unsigned long flags;
  983. unsigned long flags1;
  984. int ret = 0;
  985. struct qseecom_command_scm_resp resp;
  986. struct qseecom_registered_app_list *ptr_app = NULL;
  987. bool unload = false;
  988. bool found_app = false;
  989. bool found_dead_app = false;
  990. if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
  991. pr_debug("Do not unload keymaster app from tz\n");
  992. goto unload_exit;
  993. }
  994. if (data->client.app_id > 0) {
  995. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  996. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  997. list) {
  998. if (ptr_app->app_id == data->client.app_id) {
  999. if (!memcmp((void *)ptr_app->app_name,
  1000. (void *)data->client.app_name,
  1001. strlen(data->client.app_name))) {
  1002. found_app = true;
  1003. if (app_crash || ptr_app->ref_cnt == 1)
  1004. unload = true;
  1005. break;
  1006. } else {
  1007. found_dead_app = true;
  1008. break;
  1009. }
  1010. }
  1011. }
  1012. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  1013. flags);
  1014. if (found_app == false && found_dead_app == false) {
  1015. pr_err("Cannot find app with id = %d (%s)\n",
  1016. data->client.app_id,
  1017. (char *)data->client.app_name);
  1018. return -EINVAL;
  1019. }
  1020. }
  1021. if (found_dead_app) {
  1022. pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
  1023. (char *)data->client.app_name);
  1024. __qseecom_cleanup_app(data);
  1025. }
  1026. if (unload) {
  1027. struct qseecom_unload_app_ireq req;
  1028. /* Populate the structure for sending scm call to load image */
  1029. req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
  1030. req.app_id = data->client.app_id;
  1031. /* SCM_CALL to unload the app */
  1032. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  1033. sizeof(struct qseecom_unload_app_ireq),
  1034. &resp, sizeof(resp));
  1035. if (ret) {
  1036. pr_err("scm_call to unload app (id = %d) failed\n",
  1037. req.app_id);
  1038. ret = -EFAULT;
  1039. goto unload_exit;
  1040. } else {
  1041. pr_warn("App id %d now unloaded\n", req.app_id);
  1042. }
  1043. if (resp.result == QSEOS_RESULT_FAILURE) {
  1044. pr_err("app (%d) unload_failed!!\n",
  1045. data->client.app_id);
  1046. ret = -EFAULT;
  1047. goto unload_exit;
  1048. }
  1049. if (resp.result == QSEOS_RESULT_SUCCESS)
  1050. pr_debug("App (%d) is unloaded!!\n",
  1051. data->client.app_id);
  1052. __qseecom_cleanup_app(data);
  1053. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  1054. ret = __qseecom_process_incomplete_cmd(data, &resp);
  1055. if (ret) {
  1056. pr_err("process_incomplete_cmd fail err: %d\n",
  1057. ret);
  1058. goto unload_exit;
  1059. }
  1060. }
  1061. }
  1062. if (found_app) {
  1063. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
  1064. if (app_crash) {
  1065. ptr_app->ref_cnt = 0;
  1066. pr_debug("app_crash: ref_count = 0\n");
  1067. } else {
  1068. if (ptr_app->ref_cnt == 1) {
  1069. ptr_app->ref_cnt = 0;
  1070. pr_debug("ref_count set to 0\n");
  1071. } else {
  1072. ptr_app->ref_cnt--;
  1073. pr_debug("Can't unload app(%d) inuse\n",
  1074. ptr_app->app_id);
  1075. }
  1076. }
  1077. if (unload) {
  1078. list_del(&ptr_app->list);
  1079. kzfree(ptr_app);
  1080. }
  1081. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  1082. flags1);
  1083. }
  1084. unload_exit:
  1085. qseecom_unmap_ion_allocated_memory(data);
  1086. data->released = true;
  1087. return ret;
  1088. }
  1089. static uint32_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
  1090. uint32_t virt)
  1091. {
  1092. return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
  1093. }
  1094. static uint32_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
  1095. uint32_t virt)
  1096. {
  1097. return (uint32_t)data->client.sb_virt +
  1098. (virt - data->client.user_virt_sb_base);
  1099. }
  1100. int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
  1101. struct qseecom_send_svc_cmd_req *req_ptr,
  1102. struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
  1103. {
  1104. int ret = 0;
  1105. void *req_buf = NULL;
  1106. if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
  1107. pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
  1108. req_ptr, send_svc_ireq_ptr);
  1109. return -EINVAL;
  1110. }
  1111. /* Clients need to ensure req_buf is at base offset of shared buffer */
  1112. if ((uint32_t)req_ptr->cmd_req_buf !=
  1113. data_ptr->client.user_virt_sb_base) {
  1114. pr_err("cmd buf not pointing to base offset of shared buffer\n");
  1115. return -EINVAL;
  1116. }
  1117. if (((uint32_t)req_ptr->cmd_req_buf <
  1118. data_ptr->client.user_virt_sb_base)
  1119. || ((uint32_t)req_ptr->cmd_req_buf >=
  1120. (data_ptr->client.user_virt_sb_base +
  1121. data_ptr->client.sb_length))) {
  1122. pr_err("cmd buffer address not within shared bufffer\n");
  1123. return -EINVAL;
  1124. }
  1125. if (data_ptr->client.sb_length <
  1126. sizeof(struct qseecom_rpmb_provision_key)) {
  1127. pr_err("shared buffer is too small to hold key type\n");
  1128. return -EINVAL;
  1129. }
  1130. req_buf = data_ptr->client.sb_virt;
  1131. send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
  1132. send_svc_ireq_ptr->key_type =
  1133. ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
  1134. send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
  1135. send_svc_ireq_ptr->rsp_ptr = (void *)(__qseecom_uvirt_to_kphys(data_ptr,
  1136. (uint32_t)req_ptr->resp_buf));
  1137. send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
  1138. return ret;
  1139. }
  1140. static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
  1141. struct qseecom_send_svc_cmd_req *req)
  1142. {
  1143. if (!req || !req->resp_buf || !req->cmd_req_buf) {
  1144. pr_err("req or cmd buffer or response buffer is null\n");
  1145. return -EINVAL;
  1146. }
  1147. if (data->client.sb_virt == NULL) {
  1148. pr_err("sb_virt null\n");
  1149. return -EINVAL;
  1150. }
  1151. if (data->client.user_virt_sb_base == 0) {
  1152. pr_err("user_virt_sb_base is null\n");
  1153. return -EINVAL;
  1154. }
  1155. if (data->client.sb_length == 0) {
  1156. pr_err("sb_length is 0\n");
  1157. return -EINVAL;
  1158. }
  1159. if (((uintptr_t)req->cmd_req_buf <
  1160. data->client.user_virt_sb_base) ||
  1161. ((uintptr_t)req->cmd_req_buf >=
  1162. (data->client.user_virt_sb_base + data->client.sb_length))) {
  1163. pr_err("cmd buffer address not within shared bufffer\n");
  1164. return -EINVAL;
  1165. }
  1166. if (((uintptr_t)req->resp_buf <
  1167. data->client.user_virt_sb_base) ||
  1168. ((uintptr_t)req->resp_buf >=
  1169. (data->client.user_virt_sb_base + data->client.sb_length))) {
  1170. pr_err("response buffer address not within shared bufffer\n");
  1171. return -EINVAL;
  1172. }
  1173. if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
  1174. (req->cmd_req_len > data->client.sb_length) ||
  1175. (req->resp_len > data->client.sb_length)) {
  1176. pr_err("cmd buf length or response buf length not valid\n");
  1177. return -EINVAL;
  1178. }
  1179. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  1180. pr_err("Integer overflow detected in req_len & rsp_len\n");
  1181. return -EINVAL;
  1182. }
  1183. if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
  1184. pr_debug("Not enough memory to fit cmd_buf.\n");
  1185. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  1186. (req->cmd_req_len + req->resp_len),
  1187. data->client.sb_length);
  1188. return -ENOMEM;
  1189. }
  1190. if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
  1191. pr_err("Integer overflow in req_len & cmd_req_buf\n");
  1192. return -EINVAL;
  1193. }
  1194. if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
  1195. pr_err("Integer overflow in resp_len & resp_buf\n");
  1196. return -EINVAL;
  1197. }
  1198. if (data->client.user_virt_sb_base >
  1199. (ULONG_MAX - data->client.sb_length)) {
  1200. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  1201. return -EINVAL;
  1202. }
  1203. if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
  1204. ((uintptr_t)data->client.user_virt_sb_base +
  1205. data->client.sb_length)) ||
  1206. (((uintptr_t)req->resp_buf + req->resp_len) >
  1207. ((uintptr_t)data->client.user_virt_sb_base +
  1208. data->client.sb_length))) {
  1209. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  1210. return -EINVAL;
  1211. }
  1212. return 0;
  1213. }
  1214. static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
  1215. void __user *argp)
  1216. {
  1217. int ret = 0;
  1218. struct qseecom_client_send_service_ireq send_svc_ireq;
  1219. struct qseecom_command_scm_resp resp;
  1220. struct qseecom_send_svc_cmd_req req;
  1221. /*struct qseecom_command_scm_resp resp;*/
  1222. if (copy_from_user(&req,
  1223. (void __user *)argp,
  1224. sizeof(req))) {
  1225. pr_err("copy_from_user failed\n");
  1226. return -EFAULT;
  1227. }
  1228. if (__validate_send_service_cmd_inputs(data, &req))
  1229. return -EINVAL;
  1230. data->type = QSEECOM_SECURE_SERVICE;
  1231. switch (req.cmd_id) {
  1232. case QSEOS_RPMB_PROVISION_KEY_COMMAND:
  1233. case QSEOS_RPMB_ERASE_COMMAND:
  1234. if (__qseecom_process_rpmb_svc_cmd(data, &req,
  1235. &send_svc_ireq))
  1236. return -EINVAL;
  1237. break;
  1238. default:
  1239. pr_err("Unsupported cmd_id %d\n", req.cmd_id);
  1240. return -EINVAL;
  1241. }
  1242. if (qseecom.support_bus_scaling) {
  1243. ret = qseecom_scale_bus_bandwidth_timer(HIGH);
  1244. if (ret) {
  1245. pr_err("Fail to set bw HIGH\n");
  1246. return ret;
  1247. }
  1248. } else {
  1249. ret = qseecom_perf_enable(data);
  1250. if (ret) {
  1251. pr_err("Failed to vote for clocks with err %d\n", ret);
  1252. goto exit;
  1253. }
  1254. }
  1255. msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
  1256. data->client.sb_virt, data->client.sb_length,
  1257. ION_IOC_CLEAN_INV_CACHES);
  1258. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *) &send_svc_ireq,
  1259. sizeof(send_svc_ireq),
  1260. &resp, sizeof(resp));
  1261. msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
  1262. data->client.sb_virt, data->client.sb_length,
  1263. ION_IOC_INV_CACHES);
  1264. if (ret) {
  1265. pr_err("qseecom_scm_call failed with err: %d\n", ret);
  1266. if (!qseecom.support_bus_scaling) {
  1267. qsee_disable_clock_vote(data, CLK_DFAB);
  1268. qsee_disable_clock_vote(data, CLK_SFPB);
  1269. } else {
  1270. __qseecom_add_bw_scale_down_timer(
  1271. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  1272. }
  1273. goto exit;
  1274. }
  1275. switch (resp.result) {
  1276. case QSEOS_RESULT_SUCCESS:
  1277. break;
  1278. case QSEOS_RESULT_INCOMPLETE:
  1279. pr_err("qseos_result_incomplete\n");
  1280. ret = __qseecom_process_incomplete_cmd(data, &resp);
  1281. if (ret) {
  1282. pr_err("process_incomplete_cmd fail: err: %d\n",
  1283. ret);
  1284. }
  1285. break;
  1286. case QSEOS_RESULT_FAILURE:
  1287. pr_err("process_incomplete_cmd failed err: %d\n", ret);
  1288. break;
  1289. default:
  1290. pr_err("Response result %d not supported\n",
  1291. resp.result);
  1292. ret = -EINVAL;
  1293. break;
  1294. }
  1295. if (!qseecom.support_bus_scaling) {
  1296. qsee_disable_clock_vote(data, CLK_DFAB);
  1297. qsee_disable_clock_vote(data, CLK_SFPB);
  1298. } else {
  1299. __qseecom_add_bw_scale_down_timer(
  1300. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  1301. }
  1302. exit:
  1303. return ret;
  1304. }
  1305. static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
  1306. struct qseecom_send_cmd_req *req)
  1307. {
  1308. if (!data || !data->client.ihandle) {
  1309. pr_err("Client or client handle is not initialized\n");
  1310. return -EINVAL;
  1311. }
  1312. if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
  1313. (req->cmd_req_buf == NULL)) {
  1314. pr_err("cmd buffer or response buffer is null\n");
  1315. return -EINVAL;
  1316. }
  1317. if (((uint32_t)req->cmd_req_buf < data->client.user_virt_sb_base) ||
  1318. ((uint32_t)req->cmd_req_buf >= (data->client.user_virt_sb_base +
  1319. data->client.sb_length))) {
  1320. pr_err("cmd buffer address not within shared bufffer\n");
  1321. return -EINVAL;
  1322. }
  1323. if (((uintptr_t)req->resp_buf <
  1324. data->client.user_virt_sb_base) ||
  1325. ((uintptr_t)req->resp_buf >=
  1326. (data->client.user_virt_sb_base + data->client.sb_length))) {
  1327. pr_err("response buffer address not within shared bufffer\n");
  1328. return -EINVAL;
  1329. }
  1330. if ((req->cmd_req_len == 0) ||
  1331. (req->cmd_req_len > data->client.sb_length) ||
  1332. (req->resp_len > data->client.sb_length)) {
  1333. pr_err("cmd buf length or response buf length not valid\n");
  1334. return -EINVAL;
  1335. }
  1336. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  1337. pr_err("Integer overflow detected in req_len & rsp_len\n");
  1338. return -EINVAL;
  1339. }
  1340. if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
  1341. pr_debug("Not enough memory to fit cmd_buf.\n");
  1342. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  1343. (req->cmd_req_len + req->resp_len),
  1344. data->client.sb_length);
  1345. return -ENOMEM;
  1346. }
  1347. if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
  1348. pr_err("Integer overflow in req_len & cmd_req_buf\n");
  1349. return -EINVAL;
  1350. }
  1351. if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
  1352. pr_err("Integer overflow in resp_len & resp_buf\n");
  1353. return -EINVAL;
  1354. }
  1355. if (data->client.user_virt_sb_base >
  1356. (ULONG_MAX - data->client.sb_length)) {
  1357. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  1358. return -EINVAL;
  1359. }
  1360. if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
  1361. ((uintptr_t)data->client.user_virt_sb_base +
  1362. data->client.sb_length)) ||
  1363. (((uintptr_t)req->resp_buf + req->resp_len) >
  1364. ((uintptr_t)data->client.user_virt_sb_base +
  1365. data->client.sb_length))) {
  1366. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  1367. return -EINVAL;
  1368. }
  1369. return 0;
  1370. }
  1371. static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
  1372. struct qseecom_send_cmd_req *req)
  1373. {
  1374. int ret = 0;
  1375. u32 reqd_len_sb_in = 0;
  1376. struct qseecom_client_send_data_ireq send_data_req;
  1377. struct qseecom_command_scm_resp resp;
  1378. unsigned long flags;
  1379. struct qseecom_registered_app_list *ptr_app;
  1380. bool found_app = false;
  1381. int name_len = 0;
  1382. reqd_len_sb_in = req->cmd_req_len + req->resp_len;
  1383. /* find app_id & img_name from list */
  1384. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  1385. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  1386. list) {
  1387. name_len = min(strlen(data->client.app_name),
  1388. strlen(ptr_app->app_name));
  1389. if ((ptr_app->app_id == data->client.app_id) &&
  1390. (!memcmp(ptr_app->app_name,
  1391. data->client.app_name, name_len))) {
  1392. found_app = true;
  1393. break;
  1394. }
  1395. }
  1396. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  1397. if (!found_app) {
  1398. pr_err("app_id %d (%s) is not found\n", data->client.app_id,
  1399. (char *)data->client.app_name);
  1400. return -EINVAL;
  1401. }
  1402. if (req->cmd_req_buf == NULL || req->resp_buf == NULL) {
  1403. pr_err("cmd buffer or response buffer is null\n");
  1404. return -EINVAL;
  1405. }
  1406. if (((uint32_t)req->cmd_req_buf < data->client.user_virt_sb_base) ||
  1407. ((uint32_t)req->cmd_req_buf >= (data->client.user_virt_sb_base +
  1408. data->client.sb_length))) {
  1409. pr_err("cmd buffer address not within shared bufffer\n");
  1410. return -EINVAL;
  1411. }
  1412. if (((uint32_t)req->resp_buf < data->client.user_virt_sb_base) ||
  1413. ((uint32_t)req->resp_buf >= (data->client.user_virt_sb_base +
  1414. data->client.sb_length))){
  1415. pr_err("response buffer address not within shared bufffer\n");
  1416. return -EINVAL;
  1417. }
  1418. if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
  1419. req->cmd_req_len > data->client.sb_length ||
  1420. req->resp_len > data->client.sb_length) {
  1421. pr_err("cmd buffer length or "
  1422. "response buffer length not valid\n");
  1423. return -EINVAL;
  1424. }
  1425. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  1426. pr_err("Integer overflow detected in req_len & rsp_len, exiting now\n");
  1427. return -EINVAL;
  1428. }
  1429. send_data_req.qsee_cmd_id = QSEOS_CLIENT_SEND_DATA_COMMAND;
  1430. send_data_req.app_id = data->client.app_id;
  1431. send_data_req.req_ptr = (void *)(__qseecom_uvirt_to_kphys(data,
  1432. (uint32_t)req->cmd_req_buf));
  1433. send_data_req.req_len = req->cmd_req_len;
  1434. send_data_req.rsp_ptr = (void *)(__qseecom_uvirt_to_kphys(data,
  1435. (uint32_t)req->resp_buf));
  1436. send_data_req.rsp_len = req->resp_len;
  1437. msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
  1438. data->client.sb_virt,
  1439. reqd_len_sb_in,
  1440. ION_IOC_CLEAN_INV_CACHES);
  1441. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *) &send_data_req,
  1442. sizeof(send_data_req),
  1443. &resp, sizeof(resp));
  1444. if (ret) {
  1445. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  1446. ret, data->client.app_id);
  1447. return ret;
  1448. }
  1449. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  1450. ret = __qseecom_process_incomplete_cmd(data, &resp);
  1451. if (ret) {
  1452. pr_err("process_incomplete_cmd failed err: %d\n", ret);
  1453. return ret;
  1454. }
  1455. } else {
  1456. if (resp.result != QSEOS_RESULT_SUCCESS) {
  1457. pr_err("Response result %d not supported\n",
  1458. resp.result);
  1459. ret = -EINVAL;
  1460. }
  1461. }
  1462. msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
  1463. data->client.sb_virt, data->client.sb_length,
  1464. ION_IOC_INV_CACHES);
  1465. return ret;
  1466. }
  1467. static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
  1468. {
  1469. int ret = 0;
  1470. struct qseecom_send_cmd_req req;
  1471. ret = copy_from_user(&req, argp, sizeof(req));
  1472. if (ret) {
  1473. pr_err("copy_from_user failed\n");
  1474. return ret;
  1475. }
  1476. if (__validate_send_cmd_inputs(data, &req))
  1477. return -EINVAL;
  1478. ret = __qseecom_send_cmd(data, &req);
  1479. if (ret)
  1480. return ret;
  1481. return ret;
  1482. }
  1483. int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
  1484. struct qseecom_send_modfd_listener_resp *lstnr_resp,
  1485. struct qseecom_dev_handle *data, bool qteec,
  1486. int i) {
  1487. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  1488. (req->ifd_data[i].fd > 0)) {
  1489. if (qteec) {
  1490. if ((req->cmd_req_len < (TWO * sizeof(uint32_t))) ||
  1491. (req->ifd_data[i].cmd_buf_offset >
  1492. req->cmd_req_len - (TWO * sizeof(uint32_t)))) {
  1493. pr_err("Invalid offset (QTEEC req len) 0x%x\n",
  1494. req->ifd_data[i].cmd_buf_offset);
  1495. return -EINVAL;
  1496. }
  1497. } else {
  1498. if ((req->cmd_req_len < sizeof(uint32_t)) ||
  1499. (req->ifd_data[i].cmd_buf_offset >
  1500. req->cmd_req_len - sizeof(uint32_t))) {
  1501. pr_err("Invalid offset (req len) 0x%x\n",
  1502. req->ifd_data[i].cmd_buf_offset);
  1503. return -EINVAL;
  1504. }
  1505. }
  1506. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  1507. (lstnr_resp->ifd_data[i].fd > 0)) {
  1508. if (qteec) {
  1509. if ((lstnr_resp->resp_len < TWO * sizeof(uint32_t)) ||
  1510. (lstnr_resp->ifd_data[i].cmd_buf_offset >
  1511. lstnr_resp->resp_len - TWO*sizeof(uint32_t))) {
  1512. pr_err("Invalid offset (QTEEC resp len) 0x%x\n",
  1513. lstnr_resp->ifd_data[i].cmd_buf_offset);
  1514. return -EINVAL;
  1515. }
  1516. } else {
  1517. if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
  1518. (lstnr_resp->ifd_data[i].cmd_buf_offset >
  1519. lstnr_resp->resp_len - sizeof(uint32_t))) {
  1520. pr_err("Invalid offset (lstnr resp len) 0x%x\n",
  1521. lstnr_resp->ifd_data[i].cmd_buf_offset);
  1522. return -EINVAL;
  1523. }
  1524. }
  1525. }
  1526. return 0;
  1527. }
  1528. #define SG_ENTRY_SZ sizeof(struct qseecom_sg_entry)
  1529. static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
  1530. struct qseecom_dev_handle *data,
  1531. bool listener_svc)
  1532. {
  1533. struct ion_handle *ihandle;
  1534. char *field;
  1535. int ret = 0;
  1536. int i = 0;
  1537. uint32_t len = 0;
  1538. struct scatterlist *sg;
  1539. struct qseecom_send_modfd_cmd_req *cmd_req = NULL;
  1540. struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
  1541. struct qseecom_registered_listener_list *this_lstnr = NULL;
  1542. if (msg == NULL) {
  1543. pr_err("Invalid address\n");
  1544. return -EINVAL;
  1545. }
  1546. if (listener_svc) {
  1547. lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
  1548. this_lstnr = __qseecom_find_svc(data->listener.id);
  1549. if (IS_ERR_OR_NULL(this_lstnr)) {
  1550. pr_err("Invalid listener ID\n");
  1551. return -ENOMEM;
  1552. }
  1553. } else {
  1554. cmd_req = (struct qseecom_send_modfd_cmd_req *)msg;
  1555. }
  1556. for (i = 0; i < MAX_ION_FD; i++) {
  1557. struct sg_table *sg_ptr = NULL;
  1558. if ((!listener_svc) && (cmd_req->ifd_data[i].fd > 0)) {
  1559. ihandle = ion_import_dma_buf(qseecom.ion_clnt,
  1560. cmd_req->ifd_data[i].fd);
  1561. if (IS_ERR_OR_NULL(ihandle)) {
  1562. pr_err("Ion client can't retrieve the handle\n");
  1563. return -ENOMEM;
  1564. }
  1565. field = (char *) cmd_req->cmd_req_buf +
  1566. cmd_req->ifd_data[i].cmd_buf_offset;
  1567. } else if ((listener_svc) &&
  1568. (lstnr_resp->ifd_data[i].fd > 0)) {
  1569. ihandle = ion_import_dma_buf(qseecom.ion_clnt,
  1570. lstnr_resp->ifd_data[i].fd);
  1571. if (IS_ERR_OR_NULL(ihandle)) {
  1572. pr_err("Ion client can't retrieve the handle\n");
  1573. return -ENOMEM;
  1574. }
  1575. field = lstnr_resp->resp_buf_ptr +
  1576. lstnr_resp->ifd_data[i].cmd_buf_offset;
  1577. } else {
  1578. return ret;
  1579. }
  1580. /* Populate the cmd data structure with the phys_addr */
  1581. sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
  1582. if (sg_ptr == NULL) {
  1583. pr_err("IOn client could not retrieve sg table\n");
  1584. goto err;
  1585. }
  1586. if (sg_ptr->nents == 0) {
  1587. pr_err("Num of scattered entries is 0\n");
  1588. goto err;
  1589. }
  1590. if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
  1591. pr_err("Num of scattered entries");
  1592. pr_err(" (%d) is greater than max supported %d\n",
  1593. sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
  1594. goto err;
  1595. }
  1596. sg = sg_ptr->sgl;
  1597. if (sg_ptr->nents == 1) {
  1598. uint32_t *update;
  1599. update = (uint32_t *) field;
  1600. if (__boundary_checks_offset(cmd_req, lstnr_resp, data,
  1601. false, i))
  1602. goto err;
  1603. if (cleanup)
  1604. *update = 0;
  1605. else
  1606. *update = (uint32_t)sg_dma_address(
  1607. sg_ptr->sgl);
  1608. len += (uint32_t)sg->length;
  1609. } else {
  1610. struct qseecom_sg_entry *update;
  1611. int j = 0;
  1612. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  1613. (cmd_req->ifd_data[i].fd > 0)) {
  1614. if ((cmd_req->cmd_req_len <
  1615. SG_ENTRY_SZ * sg_ptr->nents) ||
  1616. (cmd_req->ifd_data[i].cmd_buf_offset >
  1617. (cmd_req->cmd_req_len -
  1618. SG_ENTRY_SZ * sg_ptr->nents))) {
  1619. pr_err("Invalid offset = 0x%x\n",
  1620. cmd_req->ifd_data[i].
  1621. cmd_buf_offset);
  1622. goto err;
  1623. }
  1624. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  1625. (lstnr_resp->ifd_data[i].fd > 0)) {
  1626. if ((lstnr_resp->resp_len <
  1627. SG_ENTRY_SZ * sg_ptr->nents) ||
  1628. (lstnr_resp->ifd_data[i].cmd_buf_offset >
  1629. (lstnr_resp->resp_len -
  1630. SG_ENTRY_SZ * sg_ptr->nents))) {
  1631. pr_err("Invalid offset = 0x%x\n",
  1632. lstnr_resp->ifd_data[i].
  1633. cmd_buf_offset);
  1634. goto err;
  1635. }
  1636. }
  1637. update = (struct qseecom_sg_entry *) field;
  1638. for (j = 0; j < sg_ptr->nents; j++) {
  1639. if (cleanup) {
  1640. update->phys_addr = 0;
  1641. update->len = 0;
  1642. } else {
  1643. update->phys_addr = (uint32_t)
  1644. sg_dma_address(sg);
  1645. update->len = sg->length;
  1646. }
  1647. len += sg->length;
  1648. update++;
  1649. sg = sg_next(sg);
  1650. }
  1651. }
  1652. if (cleanup)
  1653. msm_ion_do_cache_op(qseecom.ion_clnt,
  1654. ihandle, NULL, len,
  1655. ION_IOC_INV_CACHES);
  1656. else
  1657. msm_ion_do_cache_op(qseecom.ion_clnt,
  1658. ihandle, NULL, len,
  1659. ION_IOC_CLEAN_INV_CACHES);
  1660. /* Deallocate the handle */
  1661. if (!IS_ERR_OR_NULL(ihandle))
  1662. ion_free(qseecom.ion_clnt, ihandle);
  1663. }
  1664. return ret;
  1665. err:
  1666. if (!IS_ERR_OR_NULL(ihandle))
  1667. ion_free(qseecom.ion_clnt, ihandle);
  1668. return -ENOMEM;
  1669. }
  1670. static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
  1671. void __user *argp)
  1672. {
  1673. int ret = 0;
  1674. int i;
  1675. struct qseecom_send_modfd_cmd_req req;
  1676. struct qseecom_send_cmd_req send_cmd_req;
  1677. ret = copy_from_user(&req, argp, sizeof(req));
  1678. if (ret) {
  1679. pr_err("copy_from_user failed\n");
  1680. return ret;
  1681. }
  1682. if (req.cmd_req_len == 0 || req.cmd_req_len > data->client.sb_length ||
  1683. req.resp_len > data->client.sb_length) {
  1684. pr_err("cmd or response buffer length not valid\n");
  1685. return -EINVAL;
  1686. }
  1687. if (req.cmd_req_buf == NULL || req.resp_buf == NULL) {
  1688. pr_err("cmd buffer or response buffer is null\n");
  1689. return -EINVAL;
  1690. }
  1691. if (((uint32_t)req.cmd_req_buf < data->client.user_virt_sb_base) ||
  1692. ((uint32_t)req.cmd_req_buf >= (data->client.user_virt_sb_base +
  1693. data->client.sb_length))) {
  1694. pr_err("cmd buffer address not within shared bufffer\n");
  1695. return -EINVAL;
  1696. }
  1697. if (((uint32_t)req.resp_buf < data->client.user_virt_sb_base) ||
  1698. ((uint32_t)req.resp_buf >= (data->client.user_virt_sb_base +
  1699. data->client.sb_length))){
  1700. pr_err("response buffer address not within shared bufffer\n");
  1701. return -EINVAL;
  1702. }
  1703. send_cmd_req.cmd_req_buf = req.cmd_req_buf;
  1704. send_cmd_req.cmd_req_len = req.cmd_req_len;
  1705. send_cmd_req.resp_buf = req.resp_buf;
  1706. send_cmd_req.resp_len = req.resp_len;
  1707. if (__validate_send_cmd_inputs(data, &send_cmd_req))
  1708. return -EINVAL;
  1709. /* validate offsets */
  1710. for (i = 0; i < MAX_ION_FD; i++) {
  1711. if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
  1712. pr_err("Invalid offset %d = 0x%x\n",
  1713. i, req.ifd_data[i].cmd_buf_offset);
  1714. return -EINVAL;
  1715. }
  1716. }
  1717. req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
  1718. (uint32_t)req.cmd_req_buf);
  1719. req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
  1720. (uint32_t)req.resp_buf);
  1721. ret = __qseecom_update_cmd_buf(&req, false, data, false);
  1722. if (ret)
  1723. return ret;
  1724. ret = __qseecom_send_cmd(data, &send_cmd_req);
  1725. if (ret)
  1726. return ret;
  1727. ret = __qseecom_update_cmd_buf(&req, true, data, false);
  1728. if (ret)
  1729. return ret;
  1730. return ret;
  1731. }
  1732. static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
  1733. struct qseecom_registered_listener_list *svc)
  1734. {
  1735. int ret;
  1736. ret = (svc->rcv_req_flag != 0);
  1737. return ret || data->abort;
  1738. }
  1739. static int qseecom_receive_req(struct qseecom_dev_handle *data)
  1740. {
  1741. int ret = 0;
  1742. struct qseecom_registered_listener_list *this_lstnr;
  1743. this_lstnr = __qseecom_find_svc(data->listener.id);
  1744. if (!this_lstnr) {
  1745. pr_err("Invalid listener ID\n");
  1746. return -ENODATA;
  1747. }
  1748. while (1) {
  1749. if (wait_event_freezable(this_lstnr->rcv_req_wq,
  1750. __qseecom_listener_has_rcvd_req(data,
  1751. this_lstnr))) {
  1752. pr_debug("Interrupted: exiting Listener Service = %d\n",
  1753. (uint32_t)data->listener.id);
  1754. /* woken up for different reason */
  1755. return -ERESTARTSYS;
  1756. }
  1757. if (data->abort) {
  1758. pr_err("Aborting Listener Service = %d\n",
  1759. (uint32_t)data->listener.id);
  1760. return -ENODEV;
  1761. }
  1762. this_lstnr->rcv_req_flag = 0;
  1763. break;
  1764. }
  1765. return ret;
  1766. }
  1767. static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
  1768. {
  1769. struct elf32_hdr *ehdr;
  1770. if (fw_entry->size < sizeof(*ehdr)) {
  1771. pr_err("%s: Not big enough to be an elf header\n",
  1772. qseecom.pdev->init_name);
  1773. return false;
  1774. }
  1775. ehdr = (struct elf32_hdr *)fw_entry->data;
  1776. if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
  1777. pr_err("%s: Not an elf header\n",
  1778. qseecom.pdev->init_name);
  1779. return false;
  1780. }
  1781. if (ehdr->e_phnum == 0) {
  1782. pr_err("%s: No loadable segments\n",
  1783. qseecom.pdev->init_name);
  1784. return false;
  1785. }
  1786. if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
  1787. sizeof(struct elf32_hdr) > fw_entry->size) {
  1788. pr_err("%s: Program headers not within mdt\n",
  1789. qseecom.pdev->init_name);
  1790. return false;
  1791. }
  1792. return true;
  1793. }
  1794. static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size)
  1795. {
  1796. int ret = -1;
  1797. int i = 0, rc = 0;
  1798. const struct firmware *fw_entry = NULL;
  1799. struct elf32_phdr *phdr;
  1800. char fw_name[MAX_APP_NAME_SIZE];
  1801. struct elf32_hdr *ehdr;
  1802. int num_images = 0;
  1803. snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
  1804. rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
  1805. if (rc) {
  1806. pr_err("error with request_firmware\n");
  1807. ret = -EIO;
  1808. goto err;
  1809. }
  1810. if (!__qseecom_is_fw_image_valid(fw_entry)) {
  1811. ret = -EIO;
  1812. goto err;
  1813. }
  1814. *fw_size = fw_entry->size;
  1815. phdr = (struct elf32_phdr *)(fw_entry->data + sizeof(struct elf32_hdr));
  1816. ehdr = (struct elf32_hdr *)fw_entry->data;
  1817. num_images = ehdr->e_phnum;
  1818. release_firmware(fw_entry);
  1819. fw_entry = NULL;
  1820. for (i = 0; i < num_images; i++, phdr++) {
  1821. memset(fw_name, 0, sizeof(fw_name));
  1822. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
  1823. ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
  1824. if (ret)
  1825. goto err;
  1826. if (*fw_size > U32_MAX - fw_entry->size) {
  1827. pr_err("QSEE %s app file size overflow\n", appname);
  1828. ret = -EINVAL;
  1829. goto err;
  1830. }
  1831. *fw_size += fw_entry->size;
  1832. release_firmware(fw_entry);
  1833. fw_entry = NULL;
  1834. }
  1835. return ret;
  1836. err:
  1837. if (fw_entry)
  1838. release_firmware(fw_entry);
  1839. *fw_size = 0;
  1840. return ret;
  1841. }
  1842. static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
  1843. uint32_t fw_size,
  1844. struct qseecom_load_app_ireq *load_req)
  1845. {
  1846. int ret = -1;
  1847. int i = 0, rc = 0;
  1848. const struct firmware *fw_entry = NULL;
  1849. char fw_name[MAX_APP_NAME_SIZE];
  1850. u8 *img_data_ptr = img_data;
  1851. struct elf32_hdr *ehdr;
  1852. int num_images = 0;
  1853. snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
  1854. rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
  1855. if (rc) {
  1856. ret = -EIO;
  1857. goto err;
  1858. }
  1859. load_req->img_len = fw_entry->size;
  1860. if (load_req->img_len > fw_size) {
  1861. pr_err("app %s size %zu is larger than buf size %u\n",
  1862. appname, fw_entry->size, fw_size);
  1863. ret = -EINVAL;
  1864. goto err;
  1865. }
  1866. memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
  1867. img_data_ptr = img_data_ptr + fw_entry->size;
  1868. load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
  1869. ehdr = (struct elf32_hdr *)fw_entry->data;
  1870. num_images = ehdr->e_phnum;
  1871. release_firmware(fw_entry);
  1872. fw_entry = NULL;
  1873. for (i = 0; i < num_images; i++) {
  1874. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
  1875. ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
  1876. if (ret) {
  1877. pr_err("Failed to locate blob %s\n", fw_name);
  1878. goto err;
  1879. }
  1880. if ((fw_entry->size > U32_MAX - load_req->img_len) ||
  1881. (fw_entry->size + load_req->img_len > fw_size)) {
  1882. pr_err("Invalid file size for %s\n", fw_name);
  1883. ret = -EINVAL;
  1884. goto err;
  1885. }
  1886. memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
  1887. img_data_ptr = img_data_ptr + fw_entry->size;
  1888. load_req->img_len += fw_entry->size;
  1889. release_firmware(fw_entry);
  1890. fw_entry = NULL;
  1891. }
  1892. load_req->phy_addr = virt_to_phys(img_data);
  1893. return ret;
  1894. err:
  1895. release_firmware(fw_entry);
  1896. return ret;
  1897. }
  1898. static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
  1899. u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
  1900. {
  1901. size_t len = 0;
  1902. int ret = 0;
  1903. ion_phys_addr_t pa;
  1904. struct ion_handle *ihandle = NULL;
  1905. u8 *img_data = NULL;
  1906. ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
  1907. SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
  1908. if (IS_ERR_OR_NULL(ihandle)) {
  1909. pr_err("ION alloc failed\n");
  1910. return -ENOMEM;
  1911. }
  1912. img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
  1913. ihandle);
  1914. if (IS_ERR_OR_NULL(img_data)) {
  1915. pr_err("ION memory mapping for image loading failed\n");
  1916. ret = -ENOMEM;
  1917. goto exit_ion_free;
  1918. }
  1919. /* Get the physical address of the ION BUF */
  1920. ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
  1921. if (ret) {
  1922. pr_err("physical memory retrieval failure\n");
  1923. ret = -EIO;
  1924. goto exit_ion_unmap_kernel;
  1925. }
  1926. *pihandle = ihandle;
  1927. *data = img_data;
  1928. *paddr = pa;
  1929. return ret;
  1930. exit_ion_unmap_kernel:
  1931. ion_unmap_kernel(qseecom.ion_clnt, ihandle);
  1932. exit_ion_free:
  1933. ion_free(qseecom.ion_clnt, ihandle);
  1934. ihandle = NULL;
  1935. return ret;
  1936. }
  1937. static void __qseecom_free_img_data(struct ion_handle **ihandle)
  1938. {
  1939. ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
  1940. ion_free(qseecom.ion_clnt, *ihandle);
  1941. *ihandle = NULL;
  1942. }
  1943. static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname)
  1944. {
  1945. int ret = -1;
  1946. uint32_t fw_size = 0;
  1947. struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
  1948. struct qseecom_command_scm_resp resp;
  1949. u8 *img_data = NULL;
  1950. ion_phys_addr_t pa = 0;
  1951. struct ion_handle *ihandle = NULL;
  1952. if (__qseecom_get_fw_size(appname, &fw_size))
  1953. return -EIO;
  1954. ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
  1955. if (ret)
  1956. return ret;
  1957. ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
  1958. if (ret) {
  1959. ret = -EIO;
  1960. goto exit_free_img_data;
  1961. }
  1962. /* Populate the remaining parameters */
  1963. load_req.phy_addr = (uint32_t)pa;
  1964. load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  1965. strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
  1966. if (qseecom.support_bus_scaling) {
  1967. mutex_lock(&qsee_bw_mutex);
  1968. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  1969. mutex_unlock(&qsee_bw_mutex);
  1970. if (ret) {
  1971. ret = -EIO;
  1972. goto exit_free_img_data;
  1973. }
  1974. }
  1975. ret = __qseecom_enable_clk_scale_up(data);
  1976. if (ret) {
  1977. ret = -EIO;
  1978. goto exit_unregister_bus_bw_need;
  1979. }
  1980. __cpuc_flush_dcache_area((void *)img_data, fw_size);
  1981. /* SCM_CALL to load the image */
  1982. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
  1983. sizeof(struct qseecom_load_app_ireq),
  1984. &resp, sizeof(resp));
  1985. if (ret) {
  1986. pr_err("scm_call to load failed : ret %d\n", ret);
  1987. ret = -EIO;
  1988. goto exit_disable_clk_vote;
  1989. }
  1990. switch (resp.result) {
  1991. case QSEOS_RESULT_SUCCESS:
  1992. ret = resp.data;
  1993. break;
  1994. case QSEOS_RESULT_INCOMPLETE:
  1995. ret = __qseecom_process_incomplete_cmd(data, &resp);
  1996. if (ret)
  1997. pr_err("process_incomplete_cmd FAILED\n");
  1998. else
  1999. ret = resp.data;
  2000. break;
  2001. case QSEOS_RESULT_FAILURE:
  2002. pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
  2003. break;
  2004. default:
  2005. pr_err("scm call return unknown response %d\n", resp.result);
  2006. ret = -EINVAL;
  2007. break;
  2008. }
  2009. exit_disable_clk_vote:
  2010. __qseecom_disable_clk_scale_down(data);
  2011. exit_unregister_bus_bw_need:
  2012. if (qseecom.support_bus_scaling) {
  2013. mutex_lock(&qsee_bw_mutex);
  2014. qseecom_unregister_bus_bandwidth_needs(data);
  2015. mutex_unlock(&qsee_bw_mutex);
  2016. }
  2017. exit_free_img_data:
  2018. __qseecom_free_img_data(&ihandle);
  2019. return ret;
  2020. }
  2021. static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data)
  2022. {
  2023. int ret = 0;
  2024. uint32_t fw_size = 0;
  2025. struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
  2026. struct qseecom_command_scm_resp resp;
  2027. u8 *img_data = NULL;
  2028. ion_phys_addr_t pa = 0;
  2029. if (__qseecom_get_fw_size("cmnlib", &fw_size))
  2030. return -EIO;
  2031. ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
  2032. &img_data, fw_size, &pa);
  2033. if (ret)
  2034. return -EIO;
  2035. ret = __qseecom_get_fw_data("cmnlib", img_data, fw_size, &load_req);
  2036. if (ret) {
  2037. ret = -EIO;
  2038. goto exit_free_img_data;
  2039. }
  2040. load_req.phy_addr = (uint32_t)pa;
  2041. load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
  2042. if (qseecom.support_bus_scaling) {
  2043. mutex_lock(&qsee_bw_mutex);
  2044. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  2045. mutex_unlock(&qsee_bw_mutex);
  2046. if (ret) {
  2047. ret = -EIO;
  2048. goto exit_free_img_data;
  2049. }
  2050. }
  2051. /* Vote for the SFPB clock */
  2052. ret = __qseecom_enable_clk_scale_up(data);
  2053. if (ret) {
  2054. ret = -EIO;
  2055. goto exit_unregister_bus_bw_need;
  2056. }
  2057. __cpuc_flush_dcache_area((void *)img_data, fw_size);
  2058. /* SCM_CALL to load the image */
  2059. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
  2060. sizeof(struct qseecom_load_lib_image_ireq),
  2061. &resp, sizeof(resp));
  2062. if (ret) {
  2063. pr_err("scm_call to load failed : ret %d\n", ret);
  2064. ret = -EIO;
  2065. goto exit_disable_clk_vote;
  2066. }
  2067. switch (resp.result) {
  2068. case QSEOS_RESULT_SUCCESS:
  2069. break;
  2070. case QSEOS_RESULT_FAILURE:
  2071. pr_err("scm call failed w/response result%d\n", resp.result);
  2072. ret = -EINVAL;
  2073. goto exit_disable_clk_vote;
  2074. case QSEOS_RESULT_INCOMPLETE:
  2075. ret = __qseecom_process_incomplete_cmd(data, &resp);
  2076. if (ret) {
  2077. pr_err("process_incomplete_cmd failed err: %d\n", ret);
  2078. goto exit_disable_clk_vote;
  2079. }
  2080. break;
  2081. default:
  2082. pr_err("scm call return unknown response %d\n", resp.result);
  2083. ret = -EINVAL;
  2084. goto exit_disable_clk_vote;
  2085. }
  2086. __qseecom_disable_clk_scale_down(data);
  2087. if (qseecom.support_bus_scaling) {
  2088. mutex_lock(&qsee_bw_mutex);
  2089. qseecom_unregister_bus_bandwidth_needs(data);
  2090. mutex_unlock(&qsee_bw_mutex);
  2091. }
  2092. return ret;
  2093. exit_disable_clk_vote:
  2094. __qseecom_disable_clk_scale_down(data);
  2095. exit_unregister_bus_bw_need:
  2096. if (qseecom.support_bus_scaling) {
  2097. mutex_lock(&qsee_bw_mutex);
  2098. qseecom_unregister_bus_bandwidth_needs(data);
  2099. mutex_unlock(&qsee_bw_mutex);
  2100. }
  2101. exit_free_img_data:
  2102. __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
  2103. return ret;
  2104. }
  2105. static int qseecom_unload_commonlib_image(void)
  2106. {
  2107. int ret = -EINVAL;
  2108. struct qseecom_unload_lib_image_ireq unload_req = {0};
  2109. struct qseecom_command_scm_resp resp;
  2110. /* Populate the remaining parameters */
  2111. unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
  2112. /* SCM_CALL to load the image */
  2113. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
  2114. sizeof(struct qseecom_unload_lib_image_ireq),
  2115. &resp, sizeof(resp));
  2116. if (ret) {
  2117. pr_err("scm_call to unload lib failed : ret %d\n", ret);
  2118. ret = -EIO;
  2119. } else {
  2120. switch (resp.result) {
  2121. case QSEOS_RESULT_SUCCESS:
  2122. break;
  2123. case QSEOS_RESULT_FAILURE:
  2124. pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
  2125. break;
  2126. default:
  2127. pr_err("scm call return unknown response %d\n",
  2128. resp.result);
  2129. ret = -EINVAL;
  2130. break;
  2131. }
  2132. }
  2133. __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
  2134. return ret;
  2135. }
  2136. int qseecom_start_app(struct qseecom_handle **handle,
  2137. char *app_name, uint32_t size)
  2138. {
  2139. int32_t ret = 0;
  2140. unsigned long flags = 0;
  2141. struct qseecom_dev_handle *data = NULL;
  2142. struct qseecom_check_app_ireq app_ireq;
  2143. struct qseecom_registered_app_list *entry = NULL;
  2144. struct qseecom_registered_kclient_list *kclient_entry = NULL;
  2145. bool found_app = false;
  2146. uint32_t len;
  2147. ion_phys_addr_t pa;
  2148. if (!app_name || strlen(app_name) >= MAX_APP_NAME_SIZE) {
  2149. pr_err("The app_name (%s) is not valid\n", app_name);
  2150. return -EINVAL;
  2151. }
  2152. *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
  2153. if (!(*handle)) {
  2154. pr_err("failed to allocate memory for kernel client handle\n");
  2155. return -ENOMEM;
  2156. }
  2157. data = kzalloc(sizeof(*data), GFP_KERNEL);
  2158. if (!data) {
  2159. pr_err("kmalloc failed\n");
  2160. if (ret == 0) {
  2161. kfree(*handle);
  2162. *handle = NULL;
  2163. }
  2164. return -ENOMEM;
  2165. }
  2166. data->abort = 0;
  2167. data->type = QSEECOM_CLIENT_APP;
  2168. data->released = false;
  2169. data->client.sb_length = size;
  2170. data->client.user_virt_sb_base = 0;
  2171. data->client.ihandle = NULL;
  2172. init_waitqueue_head(&data->abort_wq);
  2173. atomic_set(&data->ioctl_count, 0);
  2174. data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
  2175. ION_HEAP(ION_QSECOM_HEAP_ID), 0);
  2176. if (IS_ERR_OR_NULL(data->client.ihandle)) {
  2177. pr_err("Ion client could not retrieve the handle\n");
  2178. kfree(data);
  2179. kfree(*handle);
  2180. *handle = NULL;
  2181. return -EINVAL;
  2182. }
  2183. mutex_lock(&app_access_lock);
  2184. if (qseecom.qsee_version > QSEEE_VERSION_00) {
  2185. if (qseecom.commonlib_loaded == false) {
  2186. ret = qseecom_load_commonlib_image(data);
  2187. if (ret == 0)
  2188. qseecom.commonlib_loaded = true;
  2189. }
  2190. }
  2191. if (ret) {
  2192. pr_err("Failed to load commonlib image\n");
  2193. ret = -EIO;
  2194. goto err;
  2195. }
  2196. app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  2197. strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
  2198. ret = __qseecom_check_app_exists(app_ireq);
  2199. if (ret < 0)
  2200. goto err;
  2201. data->client.app_id = ret;
  2202. if (ret > 0) {
  2203. pr_warn("App id %d for [%s] app exists\n", ret,
  2204. (char *)app_ireq.app_name);
  2205. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2206. list_for_each_entry(entry,
  2207. &qseecom.registered_app_list_head, list){
  2208. if (entry->app_id == ret) {
  2209. entry->ref_cnt++;
  2210. found_app = true;
  2211. break;
  2212. }
  2213. }
  2214. spin_unlock_irqrestore(
  2215. &qseecom.registered_app_list_lock, flags);
  2216. if (!found_app)
  2217. pr_warn("App_id %d [%s] was loaded but not registered\n",
  2218. ret, (char *)app_ireq.app_name);
  2219. } else {
  2220. /* load the app and get the app_id */
  2221. pr_debug("%s: Loading app for the first time'\n",
  2222. qseecom.pdev->init_name);
  2223. ret = __qseecom_load_fw(data, app_name);
  2224. if (ret < 0)
  2225. goto err;
  2226. data->client.app_id = ret;
  2227. strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
  2228. }
  2229. if (!found_app) {
  2230. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  2231. if (!entry) {
  2232. pr_err("kmalloc for app entry failed\n");
  2233. ret = -ENOMEM;
  2234. goto err;
  2235. }
  2236. entry->app_id = ret;
  2237. entry->ref_cnt = 1;
  2238. strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
  2239. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2240. list_add_tail(&entry->list, &qseecom.registered_app_list_head);
  2241. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  2242. flags);
  2243. }
  2244. /* Get the physical address of the ION BUF */
  2245. ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
  2246. if (ret) {
  2247. pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
  2248. ret);
  2249. goto err;
  2250. }
  2251. /* Populate the structure for sending scm call to load image */
  2252. data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
  2253. data->client.ihandle);
  2254. data->client.user_virt_sb_base = (uint32_t)data->client.sb_virt;
  2255. data->client.sb_phys = pa;
  2256. (*handle)->dev = (void *)data;
  2257. (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
  2258. (*handle)->sbuf_len = data->client.sb_length;
  2259. kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
  2260. if (!kclient_entry) {
  2261. pr_err("kmalloc failed\n");
  2262. ret = -ENOMEM;
  2263. goto err;
  2264. }
  2265. kclient_entry->handle = *handle;
  2266. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  2267. list_add_tail(&kclient_entry->list,
  2268. &qseecom.registered_kclient_list_head);
  2269. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  2270. mutex_unlock(&app_access_lock);
  2271. return 0;
  2272. err:
  2273. kfree(data);
  2274. kfree(*handle);
  2275. *handle = NULL;
  2276. mutex_unlock(&app_access_lock);
  2277. return ret;
  2278. }
  2279. EXPORT_SYMBOL(qseecom_start_app);
  2280. int qseecom_shutdown_app(struct qseecom_handle **handle)
  2281. {
  2282. int ret = -EINVAL;
  2283. struct qseecom_dev_handle *data;
  2284. struct qseecom_registered_kclient_list *kclient = NULL;
  2285. unsigned long flags = 0;
  2286. bool found_handle = false;
  2287. if ((handle == NULL) || (*handle == NULL)) {
  2288. pr_err("Handle is not initialized\n");
  2289. return -EINVAL;
  2290. }
  2291. data = (struct qseecom_dev_handle *) ((*handle)->dev);
  2292. mutex_lock(&app_access_lock);
  2293. atomic_inc(&data->ioctl_count);
  2294. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  2295. list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
  2296. list) {
  2297. if (kclient->handle == (*handle)) {
  2298. list_del(&kclient->list);
  2299. found_handle = true;
  2300. break;
  2301. }
  2302. }
  2303. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  2304. if (!found_handle)
  2305. pr_err("Unable to find the handle, exiting\n");
  2306. else
  2307. ret = qseecom_unload_app(data, false);
  2308. if (qseecom.support_bus_scaling) {
  2309. mutex_lock(&qsee_bw_mutex);
  2310. if (data->mode != INACTIVE) {
  2311. qseecom_unregister_bus_bandwidth_needs(data);
  2312. if (qseecom.cumulative_mode == INACTIVE) {
  2313. ret = __qseecom_set_msm_bus_request(INACTIVE);
  2314. if (ret)
  2315. pr_err("Fail to scale down bus\n");
  2316. }
  2317. }
  2318. mutex_unlock(&qsee_bw_mutex);
  2319. } else {
  2320. if (data->fast_load_enabled == true)
  2321. qsee_disable_clock_vote(data, CLK_SFPB);
  2322. if (data->perf_enabled == true)
  2323. qsee_disable_clock_vote(data, CLK_DFAB);
  2324. }
  2325. atomic_dec(&data->ioctl_count);
  2326. mutex_unlock(&app_access_lock);
  2327. if (ret == 0) {
  2328. kzfree(data);
  2329. kzfree(*handle);
  2330. kzfree(kclient);
  2331. *handle = NULL;
  2332. }
  2333. return ret;
  2334. }
  2335. EXPORT_SYMBOL(qseecom_shutdown_app);
  2336. int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
  2337. uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
  2338. {
  2339. int ret = 0;
  2340. struct qseecom_send_cmd_req req = {0, 0, 0, 0};
  2341. struct qseecom_dev_handle *data;
  2342. bool perf_enabled = false;
  2343. if (handle == NULL) {
  2344. pr_err("Handle is not initialized\n");
  2345. return -EINVAL;
  2346. }
  2347. data = handle->dev;
  2348. req.cmd_req_len = sbuf_len;
  2349. req.resp_len = rbuf_len;
  2350. req.cmd_req_buf = send_buf;
  2351. req.resp_buf = resp_buf;
  2352. if (__validate_send_cmd_inputs(data, &req))
  2353. return -EINVAL;
  2354. mutex_lock(&app_access_lock);
  2355. atomic_inc(&data->ioctl_count);
  2356. if (qseecom.support_bus_scaling) {
  2357. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  2358. if (ret) {
  2359. pr_err("Failed to set bw.\n");
  2360. atomic_dec(&data->ioctl_count);
  2361. mutex_unlock(&app_access_lock);
  2362. return ret;
  2363. }
  2364. }
  2365. /*
  2366. * On targets where crypto clock is handled by HLOS,
  2367. * if clk_access_cnt is zero and perf_enabled is false,
  2368. * then the crypto clock was not enabled before sending cmd
  2369. * to tz, qseecom will enable the clock to avoid service failure.
  2370. */
  2371. if (!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  2372. pr_debug("ce clock is not enabled!\n");
  2373. ret = qseecom_perf_enable(data);
  2374. if (ret) {
  2375. pr_err("Failed to vote for clock with err %d\n",
  2376. ret);
  2377. atomic_dec(&data->ioctl_count);
  2378. mutex_unlock(&app_access_lock);
  2379. return -EINVAL;
  2380. }
  2381. perf_enabled = true;
  2382. }
  2383. ret = __qseecom_send_cmd(data, &req);
  2384. if (qseecom.support_bus_scaling)
  2385. __qseecom_add_bw_scale_down_timer(
  2386. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  2387. if (perf_enabled) {
  2388. qsee_disable_clock_vote(data, CLK_DFAB);
  2389. qsee_disable_clock_vote(data, CLK_SFPB);
  2390. }
  2391. atomic_dec(&data->ioctl_count);
  2392. mutex_unlock(&app_access_lock);
  2393. if (ret)
  2394. return ret;
  2395. pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
  2396. req.resp_len, req.resp_buf);
  2397. return ret;
  2398. }
  2399. EXPORT_SYMBOL(qseecom_send_command);
  2400. int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
  2401. {
  2402. int ret = 0;
  2403. if ((handle == NULL) || (handle->dev == NULL)) {
  2404. pr_err("No valid kernel client\n");
  2405. return -EINVAL;
  2406. }
  2407. if (high) {
  2408. if (qseecom.support_bus_scaling) {
  2409. mutex_lock(&qsee_bw_mutex);
  2410. __qseecom_register_bus_bandwidth_needs(handle->dev,
  2411. HIGH);
  2412. mutex_unlock(&qsee_bw_mutex);
  2413. if (ret)
  2414. pr_err("Failed to scale bus (med) %d\n", ret);
  2415. } else {
  2416. ret = qseecom_perf_enable(handle->dev);
  2417. if (ret)
  2418. pr_err("Failed to vote for clock with err %d\n",
  2419. ret);
  2420. }
  2421. } else {
  2422. if (!qseecom.support_bus_scaling) {
  2423. qsee_disable_clock_vote(handle->dev, CLK_DFAB);
  2424. qsee_disable_clock_vote(handle->dev, CLK_SFPB);
  2425. } else {
  2426. mutex_lock(&qsee_bw_mutex);
  2427. qseecom_unregister_bus_bandwidth_needs(handle->dev);
  2428. mutex_unlock(&qsee_bw_mutex);
  2429. }
  2430. }
  2431. return ret;
  2432. }
  2433. EXPORT_SYMBOL(qseecom_set_bandwidth);
  2434. static int qseecom_send_resp(void)
  2435. {
  2436. qseecom.send_resp_flag = 1;
  2437. wake_up_interruptible(&qseecom.send_resp_wq);
  2438. return 0;
  2439. }
  2440. static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
  2441. struct qseecom_send_modfd_listener_resp *resp,
  2442. struct qseecom_registered_listener_list *this_lstnr)
  2443. {
  2444. int i;
  2445. if (!data || !resp || !this_lstnr) {
  2446. pr_err("listener handle or resp msg is null\n");
  2447. return -EINVAL;
  2448. }
  2449. if (resp->resp_buf_ptr == NULL) {
  2450. pr_err("resp buffer is null\n");
  2451. return -EINVAL;
  2452. }
  2453. /* validate resp buf length */
  2454. if ((resp->resp_len == 0) ||
  2455. (resp->resp_len > this_lstnr->sb_length)) {
  2456. pr_err("resp buf length %d not valid\n", resp->resp_len);
  2457. return -EINVAL;
  2458. }
  2459. if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
  2460. pr_err("Integer overflow in resp_len & resp_buf\n");
  2461. return -EINVAL;
  2462. }
  2463. if ((uintptr_t)this_lstnr->user_virt_sb_base >
  2464. (ULONG_MAX - this_lstnr->sb_length)) {
  2465. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  2466. return -EINVAL;
  2467. }
  2468. /* validate resp buf */
  2469. if (((uintptr_t)resp->resp_buf_ptr <
  2470. (uintptr_t)this_lstnr->user_virt_sb_base) ||
  2471. ((uintptr_t)resp->resp_buf_ptr >=
  2472. ((uintptr_t)this_lstnr->user_virt_sb_base +
  2473. this_lstnr->sb_length)) ||
  2474. (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
  2475. ((uintptr_t)this_lstnr->user_virt_sb_base +
  2476. this_lstnr->sb_length))) {
  2477. pr_err("resp buf is out of shared buffer region\n");
  2478. return -EINVAL;
  2479. }
  2480. /* validate offsets */
  2481. for (i = 0; i < MAX_ION_FD; i++) {
  2482. if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
  2483. pr_err("Invalid offset %d = 0x%x\n",
  2484. i, resp->ifd_data[i].cmd_buf_offset);
  2485. return -EINVAL;
  2486. }
  2487. }
  2488. return 0;
  2489. }
  2490. static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
  2491. void __user *argp, bool is_64bit_addr)
  2492. {
  2493. struct qseecom_send_modfd_listener_resp resp;
  2494. struct qseecom_registered_listener_list *this_lstnr = NULL;
  2495. if (copy_from_user(&resp, argp, sizeof(resp))) {
  2496. pr_err("copy_from_user failed");
  2497. return -EINVAL;
  2498. }
  2499. this_lstnr = __qseecom_find_svc(data->listener.id);
  2500. if (this_lstnr == NULL)
  2501. return -EINVAL;
  2502. if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
  2503. return -EINVAL;
  2504. resp.resp_buf_ptr = this_lstnr->sb_virt +
  2505. (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
  2506. __qseecom_update_cmd_buf(&resp, false, data, true);
  2507. qseecom.send_resp_flag = 1;
  2508. wake_up_interruptible(&qseecom.send_resp_wq);
  2509. return 0;
  2510. }
  2511. static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
  2512. void __user *argp)
  2513. {
  2514. return __qseecom_send_modfd_resp(data, argp, false);
  2515. }
  2516. static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
  2517. void __user *argp)
  2518. {
  2519. struct qseecom_qseos_version_req req;
  2520. if (copy_from_user(&req, argp, sizeof(req))) {
  2521. pr_err("copy_from_user failed");
  2522. return -EINVAL;
  2523. }
  2524. req.qseos_version = qseecom.qseos_version;
  2525. if (copy_to_user(argp, &req, sizeof(req))) {
  2526. pr_err("copy_to_user failed");
  2527. return -EINVAL;
  2528. }
  2529. return 0;
  2530. }
  2531. static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
  2532. {
  2533. int rc = 0;
  2534. struct qseecom_clk *qclk;
  2535. if (ce == CLK_QSEE)
  2536. qclk = &qseecom.qsee;
  2537. else
  2538. qclk = &qseecom.ce_drv;
  2539. mutex_lock(&clk_access_lock);
  2540. if (qclk->clk_access_cnt == ULONG_MAX)
  2541. goto err;
  2542. if (qclk->clk_access_cnt > 0) {
  2543. qclk->clk_access_cnt++;
  2544. mutex_unlock(&clk_access_lock);
  2545. return rc;
  2546. }
  2547. /* Enable CE core clk */
  2548. rc = clk_prepare_enable(qclk->ce_core_clk);
  2549. if (rc) {
  2550. pr_err("Unable to enable/prepare CE core clk\n");
  2551. goto err;
  2552. }
  2553. /* Enable CE clk */
  2554. rc = clk_prepare_enable(qclk->ce_clk);
  2555. if (rc) {
  2556. pr_err("Unable to enable/prepare CE iface clk\n");
  2557. goto ce_clk_err;
  2558. }
  2559. /* Enable AXI clk */
  2560. rc = clk_prepare_enable(qclk->ce_bus_clk);
  2561. if (rc) {
  2562. pr_err("Unable to enable/prepare CE bus clk\n");
  2563. goto ce_bus_clk_err;
  2564. }
  2565. qclk->clk_access_cnt++;
  2566. mutex_unlock(&clk_access_lock);
  2567. return 0;
  2568. ce_bus_clk_err:
  2569. clk_disable_unprepare(qclk->ce_clk);
  2570. ce_clk_err:
  2571. clk_disable_unprepare(qclk->ce_core_clk);
  2572. err:
  2573. mutex_unlock(&clk_access_lock);
  2574. return -EIO;
  2575. }
  2576. static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
  2577. {
  2578. struct qseecom_clk *qclk;
  2579. if (ce == CLK_QSEE)
  2580. qclk = &qseecom.qsee;
  2581. else
  2582. qclk = &qseecom.ce_drv;
  2583. mutex_lock(&clk_access_lock);
  2584. if (qclk->clk_access_cnt == 0) {
  2585. mutex_unlock(&clk_access_lock);
  2586. return;
  2587. }
  2588. if (qclk->clk_access_cnt == 1) {
  2589. if (qclk->ce_clk != NULL)
  2590. clk_disable_unprepare(qclk->ce_clk);
  2591. if (qclk->ce_core_clk != NULL)
  2592. clk_disable_unprepare(qclk->ce_core_clk);
  2593. if (qclk->ce_bus_clk != NULL)
  2594. clk_disable_unprepare(qclk->ce_bus_clk);
  2595. }
  2596. qclk->clk_access_cnt--;
  2597. mutex_unlock(&clk_access_lock);
  2598. }
  2599. static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
  2600. int32_t clk_type)
  2601. {
  2602. int ret = 0;
  2603. struct qseecom_clk *qclk;
  2604. qclk = &qseecom.qsee;
  2605. if (!qseecom.qsee_perf_client)
  2606. return ret;
  2607. switch (clk_type) {
  2608. case CLK_DFAB:
  2609. mutex_lock(&qsee_bw_mutex);
  2610. if (!qseecom.qsee_bw_count) {
  2611. if (qseecom.qsee_sfpb_bw_count > 0)
  2612. ret = msm_bus_scale_client_update_request(
  2613. qseecom.qsee_perf_client, 3);
  2614. else {
  2615. if (qclk->ce_core_src_clk != NULL)
  2616. ret = __qseecom_enable_clk(CLK_QSEE);
  2617. if (!ret) {
  2618. ret =
  2619. msm_bus_scale_client_update_request(
  2620. qseecom.qsee_perf_client, 1);
  2621. if ((ret) &&
  2622. (qclk->ce_core_src_clk != NULL))
  2623. __qseecom_disable_clk(CLK_QSEE);
  2624. }
  2625. }
  2626. if (ret)
  2627. pr_err("DFAB Bandwidth req failed (%d)\n",
  2628. ret);
  2629. else {
  2630. qseecom.qsee_bw_count++;
  2631. data->perf_enabled = true;
  2632. }
  2633. } else {
  2634. qseecom.qsee_bw_count++;
  2635. data->perf_enabled = true;
  2636. }
  2637. mutex_unlock(&qsee_bw_mutex);
  2638. break;
  2639. case CLK_SFPB:
  2640. mutex_lock(&qsee_bw_mutex);
  2641. if (!qseecom.qsee_sfpb_bw_count) {
  2642. if (qseecom.qsee_bw_count > 0)
  2643. ret = msm_bus_scale_client_update_request(
  2644. qseecom.qsee_perf_client, 3);
  2645. else {
  2646. if (qclk->ce_core_src_clk != NULL)
  2647. ret = __qseecom_enable_clk(CLK_QSEE);
  2648. if (!ret) {
  2649. ret =
  2650. msm_bus_scale_client_update_request(
  2651. qseecom.qsee_perf_client, 2);
  2652. if ((ret) &&
  2653. (qclk->ce_core_src_clk != NULL))
  2654. __qseecom_disable_clk(CLK_QSEE);
  2655. }
  2656. }
  2657. if (ret)
  2658. pr_err("SFPB Bandwidth req failed (%d)\n",
  2659. ret);
  2660. else {
  2661. qseecom.qsee_sfpb_bw_count++;
  2662. data->fast_load_enabled = true;
  2663. }
  2664. } else {
  2665. qseecom.qsee_sfpb_bw_count++;
  2666. data->fast_load_enabled = true;
  2667. }
  2668. mutex_unlock(&qsee_bw_mutex);
  2669. break;
  2670. default:
  2671. pr_err("Clock type not defined\n");
  2672. break;
  2673. }
  2674. return ret;
  2675. }
  2676. static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
  2677. int32_t clk_type)
  2678. {
  2679. int32_t ret = 0;
  2680. struct qseecom_clk *qclk;
  2681. qclk = &qseecom.qsee;
  2682. if (!qseecom.qsee_perf_client)
  2683. return;
  2684. switch (clk_type) {
  2685. case CLK_DFAB:
  2686. mutex_lock(&qsee_bw_mutex);
  2687. if (qseecom.qsee_bw_count == 0) {
  2688. pr_err("Client error.Extra call to disable DFAB clk\n");
  2689. mutex_unlock(&qsee_bw_mutex);
  2690. return;
  2691. }
  2692. if (qseecom.qsee_bw_count == 1) {
  2693. if (qseecom.qsee_sfpb_bw_count > 0)
  2694. ret = msm_bus_scale_client_update_request(
  2695. qseecom.qsee_perf_client, 2);
  2696. else {
  2697. ret = msm_bus_scale_client_update_request(
  2698. qseecom.qsee_perf_client, 0);
  2699. if ((!ret) && (qclk->ce_core_src_clk != NULL))
  2700. __qseecom_disable_clk(CLK_QSEE);
  2701. }
  2702. if (ret)
  2703. pr_err("SFPB Bandwidth req fail (%d)\n",
  2704. ret);
  2705. else {
  2706. qseecom.qsee_bw_count--;
  2707. data->perf_enabled = false;
  2708. }
  2709. } else {
  2710. qseecom.qsee_bw_count--;
  2711. data->perf_enabled = false;
  2712. }
  2713. mutex_unlock(&qsee_bw_mutex);
  2714. break;
  2715. case CLK_SFPB:
  2716. mutex_lock(&qsee_bw_mutex);
  2717. if (qseecom.qsee_sfpb_bw_count == 0) {
  2718. pr_err("Client error.Extra call to disable SFPB clk\n");
  2719. mutex_unlock(&qsee_bw_mutex);
  2720. return;
  2721. }
  2722. if (qseecom.qsee_sfpb_bw_count == 1) {
  2723. if (qseecom.qsee_bw_count > 0)
  2724. ret = msm_bus_scale_client_update_request(
  2725. qseecom.qsee_perf_client, 1);
  2726. else {
  2727. ret = msm_bus_scale_client_update_request(
  2728. qseecom.qsee_perf_client, 0);
  2729. if ((!ret) && (qclk->ce_core_src_clk != NULL))
  2730. __qseecom_disable_clk(CLK_QSEE);
  2731. }
  2732. if (ret)
  2733. pr_err("SFPB Bandwidth req fail (%d)\n",
  2734. ret);
  2735. else {
  2736. qseecom.qsee_sfpb_bw_count--;
  2737. data->fast_load_enabled = false;
  2738. }
  2739. } else {
  2740. qseecom.qsee_sfpb_bw_count--;
  2741. data->fast_load_enabled = false;
  2742. }
  2743. mutex_unlock(&qsee_bw_mutex);
  2744. break;
  2745. default:
  2746. pr_err("Clock type not defined\n");
  2747. break;
  2748. }
  2749. }
  2750. static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
  2751. void __user *argp)
  2752. {
  2753. struct ion_handle *ihandle; /* Ion handle */
  2754. struct qseecom_load_img_req load_img_req;
  2755. int ret;
  2756. int set_cpu_ret = 0;
  2757. ion_phys_addr_t pa = 0;
  2758. uint32_t len;
  2759. struct cpumask mask;
  2760. struct qseecom_load_app_ireq load_req;
  2761. struct qseecom_command_scm_resp resp;
  2762. /* Copy the relevant information needed for loading the image */
  2763. if (copy_from_user(&load_img_req,
  2764. (void __user *)argp,
  2765. sizeof(struct qseecom_load_img_req))) {
  2766. pr_err("copy_from_user failed\n");
  2767. return -EFAULT;
  2768. }
  2769. /* Get the handle of the shared fd */
  2770. ihandle = ion_import_dma_buf(qseecom.ion_clnt,
  2771. load_img_req.ifd_data_fd);
  2772. if (IS_ERR_OR_NULL(ihandle)) {
  2773. pr_err("Ion client could not retrieve the handle\n");
  2774. return -ENOMEM;
  2775. }
  2776. /* Get the physical address of the ION BUF */
  2777. ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
  2778. if (ret) {
  2779. pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
  2780. ret);
  2781. return ret;
  2782. }
  2783. /* Populate the structure for sending scm call to load image */
  2784. load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
  2785. load_req.mdt_len = load_img_req.mdt_len;
  2786. load_req.img_len = load_img_req.img_len;
  2787. load_req.phy_addr = pa;
  2788. /* SCM_CALL tied to Core0 */
  2789. mask = CPU_MASK_CPU0;
  2790. set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
  2791. if (set_cpu_ret) {
  2792. pr_err("set_cpus_allowed_ptr failed : ret %d\n",
  2793. set_cpu_ret);
  2794. ret = -EFAULT;
  2795. goto exit_ion_free;
  2796. }
  2797. if (qseecom.support_bus_scaling) {
  2798. mutex_lock(&qsee_bw_mutex);
  2799. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  2800. mutex_unlock(&qsee_bw_mutex);
  2801. if (ret) {
  2802. ret = -EIO;
  2803. goto exit_cpu_restore;
  2804. }
  2805. }
  2806. /* Vote for the SFPB clock */
  2807. ret = __qseecom_enable_clk_scale_up(data);
  2808. if (ret) {
  2809. ret = -EIO;
  2810. goto exit_register_bus_bandwidth_needs;
  2811. }
  2812. msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
  2813. ION_IOC_CLEAN_INV_CACHES);
  2814. /* SCM_CALL to load the external elf */
  2815. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
  2816. sizeof(struct qseecom_load_app_ireq),
  2817. &resp, sizeof(resp));
  2818. if (ret) {
  2819. pr_err("scm_call to load failed : ret %d\n",
  2820. ret);
  2821. ret = -EFAULT;
  2822. goto exit_disable_clock;
  2823. }
  2824. switch (resp.result) {
  2825. case QSEOS_RESULT_SUCCESS:
  2826. break;
  2827. case QSEOS_RESULT_INCOMPLETE:
  2828. pr_err("%s: qseos result incomplete\n", __func__);
  2829. ret = __qseecom_process_incomplete_cmd(data, &resp);
  2830. if (ret)
  2831. pr_err("process_incomplete_cmd failed: err: %d\n", ret);
  2832. break;
  2833. case QSEOS_RESULT_FAILURE:
  2834. pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
  2835. ret = -EFAULT;
  2836. break;
  2837. default:
  2838. pr_err("scm_call response result %d not supported\n",
  2839. resp.result);
  2840. ret = -EFAULT;
  2841. break;
  2842. }
  2843. exit_disable_clock:
  2844. __qseecom_disable_clk_scale_down(data);
  2845. exit_register_bus_bandwidth_needs:
  2846. if (qseecom.support_bus_scaling) {
  2847. int ret2;
  2848. mutex_lock(&qsee_bw_mutex);
  2849. ret2 = qseecom_unregister_bus_bandwidth_needs(data);
  2850. pr_info("qseecom_unregister_bus_bandwidth_needs returned %d (ret=%d)\n", ret2, ret); // for debug
  2851. if (ret2) {
  2852. pr_err("qseecom_unregister_bus_bandwidth_needs returned %d\n", ret2);
  2853. ret = (ret || ret2);
  2854. }
  2855. mutex_unlock(&qsee_bw_mutex);
  2856. }
  2857. exit_cpu_restore:
  2858. /* Restore the CPU mask */
  2859. mask = CPU_MASK_ALL;
  2860. set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
  2861. if (set_cpu_ret) {
  2862. pr_err("set_cpus_allowed_ptr failed to restore mask: ret %d\n",
  2863. set_cpu_ret);
  2864. ret = -EFAULT;
  2865. }
  2866. exit_ion_free:
  2867. /* Deallocate the handle */
  2868. if (!IS_ERR_OR_NULL(ihandle))
  2869. ion_free(qseecom.ion_clnt, ihandle);
  2870. return ret;
  2871. }
  2872. static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
  2873. {
  2874. int ret = 0;
  2875. int set_cpu_ret = 0;
  2876. struct qseecom_command_scm_resp resp;
  2877. struct qseecom_unload_app_ireq req;
  2878. struct cpumask mask;
  2879. /* unavailable client app */
  2880. data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
  2881. /* Populate the structure for sending scm call to unload image */
  2882. req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
  2883. /* SCM_CALL tied to Core0 */
  2884. mask = CPU_MASK_CPU0;
  2885. ret = set_cpus_allowed_ptr(current, &mask);
  2886. if (ret) {
  2887. pr_err("set_cpus_allowed_ptr failed : ret %d\n",
  2888. ret);
  2889. return -EFAULT;
  2890. }
  2891. /* SCM_CALL to unload the external elf */
  2892. ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  2893. sizeof(struct qseecom_unload_app_ireq),
  2894. &resp, sizeof(resp));
  2895. if (ret) {
  2896. pr_err("scm_call to unload failed : ret %d\n",
  2897. ret);
  2898. ret = -EFAULT;
  2899. goto qseecom_unload_external_elf_scm_err;
  2900. }
  2901. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  2902. ret = __qseecom_process_incomplete_cmd(data, &resp);
  2903. if (ret)
  2904. pr_err("process_incomplete_cmd fail err: %d\n",
  2905. ret);
  2906. } else {
  2907. if (resp.result != QSEOS_RESULT_SUCCESS) {
  2908. pr_err("scm_call to unload image failed resp.result =%d\n",
  2909. resp.result);
  2910. ret = -EFAULT;
  2911. }
  2912. }
  2913. qseecom_unload_external_elf_scm_err:
  2914. /* Restore the CPU mask */
  2915. mask = CPU_MASK_ALL;
  2916. set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
  2917. if (set_cpu_ret) {
  2918. pr_err("set_cpus_allowed_ptr failed to restore mask: ret %d\n",
  2919. set_cpu_ret);
  2920. ret = -EFAULT;
  2921. }
  2922. return ret;
  2923. }
  2924. static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
  2925. void __user *argp)
  2926. {
  2927. int32_t ret;
  2928. struct qseecom_qseos_app_load_query query_req;
  2929. struct qseecom_check_app_ireq req;
  2930. struct qseecom_registered_app_list *entry = NULL;
  2931. unsigned long flags = 0;
  2932. bool found_app = false;
  2933. /* Copy the relevant information needed for loading the image */
  2934. if (copy_from_user(&query_req,
  2935. (void __user *)argp,
  2936. sizeof(struct qseecom_qseos_app_load_query))) {
  2937. pr_err("copy_from_user failed\n");
  2938. return -EFAULT;
  2939. }
  2940. req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  2941. query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
  2942. strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
  2943. ret = __qseecom_check_app_exists(req);
  2944. if ((ret == -EINVAL) || (ret == -ENODEV)) {
  2945. pr_err(" scm call to check if app is loaded failed");
  2946. return ret; /* scm call failed */
  2947. } else if (ret > 0) {
  2948. pr_debug("App id %d (%s) already exists\n", ret,
  2949. (char *)(req.app_name));
  2950. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2951. list_for_each_entry(entry,
  2952. &qseecom.registered_app_list_head, list){
  2953. if (entry->app_id == ret) {
  2954. entry->ref_cnt++;
  2955. found_app = true;
  2956. break;
  2957. }
  2958. }
  2959. spin_unlock_irqrestore(
  2960. &qseecom.registered_app_list_lock, flags);
  2961. data->client.app_id = ret;
  2962. query_req.app_id = ret;
  2963. strlcpy(data->client.app_name, query_req.app_name,
  2964. MAX_APP_NAME_SIZE);
  2965. /*
  2966. * If app was loaded by appsbl or kernel client before
  2967. * and was not registered, regiser this app now.
  2968. */
  2969. if (!found_app) {
  2970. pr_debug("Register app %d [%s] which was loaded before\n",
  2971. ret, (char *)query_req.app_name);
  2972. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  2973. if (!entry) {
  2974. pr_err("kmalloc for app entry failed\n");
  2975. return -ENOMEM;
  2976. }
  2977. entry->app_id = ret;
  2978. entry->ref_cnt = 1;
  2979. strlcpy(entry->app_name, data->client.app_name,
  2980. MAX_APP_NAME_SIZE);
  2981. spin_lock_irqsave(&qseecom.registered_app_list_lock,
  2982. flags);
  2983. list_add_tail(&entry->list,
  2984. &qseecom.registered_app_list_head);
  2985. spin_unlock_irqrestore(
  2986. &qseecom.registered_app_list_lock, flags);
  2987. }
  2988. if (copy_to_user(argp, &query_req, sizeof(query_req))) {
  2989. pr_err("copy_to_user failed\n");
  2990. return -EFAULT;
  2991. }
  2992. return -EEXIST; /* app already loaded */
  2993. } else {
  2994. return 0; /* app not loaded */
  2995. }
  2996. }
  2997. static int __qseecom_get_ce_pipe_info(
  2998. enum qseecom_key_management_usage_type usage,
  2999. uint32_t *pipe, uint32_t *ce_hw)
  3000. {
  3001. int ret;
  3002. switch (usage) {
  3003. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  3004. if (qseecom.support_fde) {
  3005. *pipe = qseecom.ce_info.disk_encrypt_pipe;
  3006. *ce_hw = qseecom.ce_info.hlos_ce_hw_instance;
  3007. ret = 0;
  3008. } else {
  3009. pr_err("info unavailable: disk encr pipe %d ce_hw %d\n",
  3010. qseecom.ce_info.disk_encrypt_pipe,
  3011. qseecom.ce_info.hlos_ce_hw_instance);
  3012. ret = -EINVAL;
  3013. }
  3014. break;
  3015. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  3016. if (qseecom.support_pfe) {
  3017. *pipe = qseecom.ce_info.file_encrypt_pipe;
  3018. *ce_hw = qseecom.ce_info.hlos_ce_hw_instance;
  3019. ret = 0;
  3020. } else {
  3021. pr_err("info unavailable: file encr pipe %d ce_hw %d\n",
  3022. qseecom.ce_info.file_encrypt_pipe,
  3023. qseecom.ce_info.hlos_ce_hw_instance);
  3024. ret = -EINVAL;
  3025. }
  3026. break;
  3027. default:
  3028. ret = -EINVAL;
  3029. break;
  3030. }
  3031. return ret;
  3032. }
  3033. static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
  3034. enum qseecom_key_management_usage_type usage,
  3035. struct qseecom_key_generate_ireq *ireq)
  3036. {
  3037. struct qseecom_command_scm_resp resp;
  3038. int ret;
  3039. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  3040. usage >= QSEOS_KM_USAGE_MAX) {
  3041. pr_err("Error:: unsupported usage %d\n", usage);
  3042. return -EFAULT;
  3043. }
  3044. __qseecom_enable_clk(CLK_QSEE);
  3045. ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
  3046. ireq, sizeof(struct qseecom_key_generate_ireq),
  3047. &resp, sizeof(resp));
  3048. if (ret) {
  3049. pr_err("scm call to generate key failed : %d\n", ret);
  3050. __qseecom_disable_clk(CLK_QSEE);
  3051. return -EFAULT;
  3052. }
  3053. switch (resp.result) {
  3054. case QSEOS_RESULT_SUCCESS:
  3055. break;
  3056. case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
  3057. pr_debug("Key ID exists.\n");
  3058. break;
  3059. case QSEOS_RESULT_INCOMPLETE:
  3060. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3061. if (ret) {
  3062. if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
  3063. pr_debug("Key ID exists.\n");
  3064. ret = 0;
  3065. } else {
  3066. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  3067. resp.result);
  3068. }
  3069. }
  3070. break;
  3071. case QSEOS_RESULT_FAILURE:
  3072. default:
  3073. pr_err("gen key scm call failed resp.result %d\n", resp.result);
  3074. ret = -EINVAL;
  3075. break;
  3076. }
  3077. __qseecom_disable_clk(CLK_QSEE);
  3078. return ret;
  3079. }
  3080. static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
  3081. enum qseecom_key_management_usage_type usage,
  3082. struct qseecom_key_delete_ireq *ireq)
  3083. {
  3084. struct qseecom_command_scm_resp resp;
  3085. int ret;
  3086. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  3087. usage >= QSEOS_KM_USAGE_MAX) {
  3088. pr_err("Error:: unsupported usage %d\n", usage);
  3089. return -EFAULT;
  3090. }
  3091. __qseecom_enable_clk(CLK_QSEE);
  3092. ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
  3093. ireq, sizeof(struct qseecom_key_delete_ireq),
  3094. &resp, sizeof(struct qseecom_command_scm_resp));
  3095. if (ret) {
  3096. pr_err("scm call to delete key failed : %d\n", ret);
  3097. __qseecom_disable_clk(CLK_QSEE);
  3098. return -EFAULT;
  3099. }
  3100. switch (resp.result) {
  3101. case QSEOS_RESULT_SUCCESS:
  3102. break;
  3103. case QSEOS_RESULT_INCOMPLETE:
  3104. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3105. if (ret) {
  3106. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  3107. resp.result);
  3108. if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  3109. pr_debug("Max attempts to input password reached.\n");
  3110. ret = -ERANGE;
  3111. }
  3112. }
  3113. break;
  3114. case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
  3115. pr_debug("Max attempts to input password reached.\n");
  3116. ret = -ERANGE;
  3117. break;
  3118. case QSEOS_RESULT_FAILURE:
  3119. default:
  3120. pr_err("Delete key scm call failed resp.result %d\n",
  3121. resp.result);
  3122. ret = -EINVAL;
  3123. break;
  3124. }
  3125. __qseecom_disable_clk(CLK_QSEE);
  3126. return ret;
  3127. }
  3128. static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
  3129. enum qseecom_key_management_usage_type usage,
  3130. struct qseecom_key_select_ireq *ireq)
  3131. {
  3132. struct qseecom_command_scm_resp resp;
  3133. int ret;
  3134. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  3135. usage >= QSEOS_KM_USAGE_MAX) {
  3136. pr_err("Error:: unsupported usage %d\n", usage);
  3137. return -EFAULT;
  3138. }
  3139. __qseecom_enable_clk(CLK_QSEE);
  3140. if (qseecom.qsee.instance != qseecom.ce_drv.instance)
  3141. __qseecom_enable_clk(CLK_CE_DRV);
  3142. ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
  3143. ireq, sizeof(struct qseecom_key_select_ireq),
  3144. &resp, sizeof(struct qseecom_command_scm_resp));
  3145. if (ret) {
  3146. pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n", ret);
  3147. __qseecom_disable_clk(CLK_QSEE);
  3148. if (qseecom.qsee.instance != qseecom.ce_drv.instance)
  3149. __qseecom_disable_clk(CLK_CE_DRV);
  3150. return -EFAULT;
  3151. }
  3152. switch (resp.result) {
  3153. case QSEOS_RESULT_SUCCESS:
  3154. break;
  3155. case QSEOS_RESULT_INCOMPLETE:
  3156. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3157. if (ret) {
  3158. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  3159. resp.result);
  3160. if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  3161. pr_debug("Max attempts to input password reached.\n");
  3162. ret = -ERANGE;
  3163. }
  3164. }
  3165. break;
  3166. case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
  3167. pr_debug("Max attempts to input password reached.\n");
  3168. ret = -ERANGE;
  3169. break;
  3170. case QSEOS_RESULT_FAILURE:
  3171. default:
  3172. pr_err("Set key scm call failed resp.result %d\n", resp.result);
  3173. ret = -EINVAL;
  3174. break;
  3175. }
  3176. __qseecom_disable_clk(CLK_QSEE);
  3177. if (qseecom.qsee.instance != qseecom.ce_drv.instance)
  3178. __qseecom_disable_clk(CLK_CE_DRV);
  3179. return ret;
  3180. }
  3181. static int __qseecom_update_current_key_user_info(
  3182. struct qseecom_dev_handle *data,
  3183. enum qseecom_key_management_usage_type usage,
  3184. struct qseecom_key_userinfo_update_ireq *ireq)
  3185. {
  3186. struct qseecom_command_scm_resp resp;
  3187. int ret;
  3188. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  3189. usage >= QSEOS_KM_USAGE_MAX) {
  3190. pr_err("Error:: unsupported usage %d\n", usage);
  3191. return -EFAULT;
  3192. }
  3193. __qseecom_enable_clk(CLK_QSEE);
  3194. ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
  3195. ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
  3196. &resp, sizeof(struct qseecom_command_scm_resp));
  3197. if (ret) {
  3198. pr_err("scm call to update key userinfo failed : %d\n", ret);
  3199. __qseecom_disable_clk(CLK_QSEE);
  3200. return -EFAULT;
  3201. }
  3202. switch (resp.result) {
  3203. case QSEOS_RESULT_SUCCESS:
  3204. break;
  3205. case QSEOS_RESULT_INCOMPLETE:
  3206. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3207. if (ret)
  3208. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  3209. resp.result);
  3210. break;
  3211. case QSEOS_RESULT_FAILURE:
  3212. default:
  3213. pr_err("Set key scm call failed resp.result %d\n", resp.result);
  3214. ret = -EINVAL;
  3215. break;
  3216. }
  3217. __qseecom_disable_clk(CLK_QSEE);
  3218. return ret;
  3219. }
  3220. static int qseecom_create_key(struct qseecom_dev_handle *data,
  3221. void __user *argp)
  3222. {
  3223. uint32_t ce_hw = 0;
  3224. uint32_t pipe = 0;
  3225. int ret = 0;
  3226. uint32_t flags = 0;
  3227. struct qseecom_create_key_req create_key_req;
  3228. struct qseecom_key_generate_ireq generate_key_ireq;
  3229. struct qseecom_key_select_ireq set_key_ireq;
  3230. ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
  3231. if (ret) {
  3232. pr_err("copy_from_user failed\n");
  3233. return ret;
  3234. }
  3235. if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  3236. create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  3237. pr_err("Error:: unsupported usage %d\n", create_key_req.usage);
  3238. return -EFAULT;
  3239. }
  3240. ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw);
  3241. if (ret) {
  3242. pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
  3243. return -EINVAL;
  3244. }
  3245. generate_key_ireq.flags = flags;
  3246. generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
  3247. memset((void *)generate_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  3248. memset((void *)generate_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  3249. memcpy((void *)generate_key_ireq.key_id,
  3250. (void *)key_id_array[create_key_req.usage].desc,
  3251. QSEECOM_KEY_ID_SIZE);
  3252. memcpy((void *)generate_key_ireq.hash32,
  3253. (void *)create_key_req.hash32, QSEECOM_HASH_SIZE);
  3254. ret = __qseecom_generate_and_save_key(data, create_key_req.usage,
  3255. &generate_key_ireq);
  3256. if (ret) {
  3257. pr_err("Failed to generate key on storage: %d\n", ret);
  3258. return ret;
  3259. }
  3260. set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
  3261. set_key_ireq.ce = ce_hw;
  3262. set_key_ireq.pipe = pipe;
  3263. set_key_ireq.flags = flags;
  3264. /* set both PIPE_ENC and PIPE_ENC_XTS*/
  3265. set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
  3266. memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  3267. memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  3268. memcpy((void *)set_key_ireq.key_id,
  3269. (void *)key_id_array[create_key_req.usage].desc,
  3270. QSEECOM_KEY_ID_SIZE);
  3271. memcpy((void *)set_key_ireq.hash32, (void *)create_key_req.hash32,
  3272. QSEECOM_HASH_SIZE);
  3273. ret = __qseecom_set_clear_ce_key(data, create_key_req.usage,
  3274. &set_key_ireq);
  3275. if (ret) {
  3276. pr_err("Failed to create key: pipe %d, ce %d: %d\n",
  3277. pipe, ce_hw, ret);
  3278. return ret;
  3279. }
  3280. return ret;
  3281. }
  3282. static int qseecom_wipe_key(struct qseecom_dev_handle *data,
  3283. void __user *argp)
  3284. {
  3285. uint32_t ce_hw = 0;
  3286. uint32_t pipe = 0;
  3287. int ret = 0;
  3288. uint32_t flags = 0;
  3289. int i;
  3290. struct qseecom_wipe_key_req wipe_key_req;
  3291. struct qseecom_key_delete_ireq delete_key_ireq;
  3292. struct qseecom_key_select_ireq clear_key_ireq;
  3293. ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
  3294. if (ret) {
  3295. pr_err("copy_from_user failed\n");
  3296. return ret;
  3297. }
  3298. if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  3299. wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  3300. pr_err("Error:: unsupported usage %d\n", wipe_key_req.usage);
  3301. return -EFAULT;
  3302. }
  3303. ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw);
  3304. if (ret) {
  3305. pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
  3306. return -EINVAL;
  3307. }
  3308. if (wipe_key_req.wipe_key_flag) {
  3309. delete_key_ireq.flags = flags;
  3310. delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
  3311. memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  3312. memcpy((void *)delete_key_ireq.key_id,
  3313. (void *)key_id_array[wipe_key_req.usage].desc,
  3314. QSEECOM_KEY_ID_SIZE);
  3315. memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  3316. ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
  3317. &delete_key_ireq);
  3318. if (ret) {
  3319. pr_err("Failed to delete key from ssd storage: %d\n",
  3320. ret);
  3321. return -EFAULT;
  3322. }
  3323. }
  3324. clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
  3325. clear_key_ireq.ce = ce_hw;
  3326. clear_key_ireq.pipe = pipe;
  3327. clear_key_ireq.flags = flags;
  3328. clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
  3329. for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
  3330. clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
  3331. memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  3332. ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
  3333. &clear_key_ireq);
  3334. if (ret) {
  3335. pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
  3336. pipe, ce_hw, ret);
  3337. return -EFAULT;
  3338. }
  3339. return ret;
  3340. }
  3341. static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
  3342. void __user *argp)
  3343. {
  3344. int ret = 0;
  3345. uint32_t flags = 0;
  3346. struct qseecom_update_key_userinfo_req update_key_req;
  3347. struct qseecom_key_userinfo_update_ireq ireq;
  3348. ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
  3349. if (ret) {
  3350. pr_err("copy_from_user failed\n");
  3351. return ret;
  3352. }
  3353. if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  3354. update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  3355. pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
  3356. return -EFAULT;
  3357. }
  3358. ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
  3359. ireq.flags = flags;
  3360. memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  3361. memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
  3362. memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
  3363. memcpy((void *)ireq.key_id,
  3364. (void *)key_id_array[update_key_req.usage].desc,
  3365. QSEECOM_KEY_ID_SIZE);
  3366. memcpy((void *)ireq.current_hash32,
  3367. (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
  3368. memcpy((void *)ireq.new_hash32,
  3369. (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
  3370. ret = __qseecom_update_current_key_user_info(data, update_key_req.usage,
  3371. &ireq);
  3372. if (ret) {
  3373. pr_err("Failed to update key info: %d\n", ret);
  3374. return ret;
  3375. }
  3376. return ret;
  3377. }
  3378. static int qseecom_is_es_activated(void __user *argp)
  3379. {
  3380. struct qseecom_is_es_activated_req req;
  3381. int ret;
  3382. int resp_buf;
  3383. if (qseecom.qsee_version < QSEE_VERSION_04) {
  3384. pr_err("invalid qsee version");
  3385. return -ENODEV;
  3386. }
  3387. if (argp == NULL) {
  3388. pr_err("arg is null");
  3389. return -EINVAL;
  3390. }
  3391. ret = scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID, NULL, 0,
  3392. (void *) &resp_buf, sizeof(resp_buf));
  3393. if (ret) {
  3394. pr_err("scm_call failed");
  3395. return ret;
  3396. }
  3397. req.is_activated = resp_buf;
  3398. ret = copy_to_user(argp, &req, sizeof(req));
  3399. if (ret) {
  3400. pr_err("copy_to_user failed");
  3401. return ret;
  3402. }
  3403. return 0;
  3404. }
  3405. static int qseecom_save_partition_hash(void __user *argp)
  3406. {
  3407. struct qseecom_save_partition_hash_req req;
  3408. int ret;
  3409. if (qseecom.qsee_version < QSEE_VERSION_04) {
  3410. pr_err("invalid qsee version ");
  3411. return -ENODEV;
  3412. }
  3413. if (argp == NULL) {
  3414. pr_err("arg is null");
  3415. return -EINVAL;
  3416. }
  3417. ret = copy_from_user(&req, argp, sizeof(req));
  3418. if (ret) {
  3419. pr_err("copy_from_user failed");
  3420. return ret;
  3421. }
  3422. ret = scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
  3423. (void *) &req, sizeof(req), NULL, 0);
  3424. if (ret) {
  3425. pr_err("qseecom_scm_call failed");
  3426. return ret;
  3427. }
  3428. return 0;
  3429. }
  3430. static long qseecom_ioctl(struct file *file, unsigned cmd,
  3431. unsigned long arg)
  3432. {
  3433. int ret = 0;
  3434. struct qseecom_dev_handle *data = file->private_data;
  3435. void __user *argp = (void __user *) arg;
  3436. bool perf_enabled = false;
  3437. if (!data) {
  3438. pr_err("Invalid/uninitialized device handle\n");
  3439. return -EINVAL;
  3440. }
  3441. if (data->abort) {
  3442. pr_err("Aborting qseecom driver\n");
  3443. return -ENODEV;
  3444. }
  3445. switch (cmd) {
  3446. case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
  3447. if (data->type != QSEECOM_GENERIC) {
  3448. pr_err("reg lstnr req: invalid handle (%d)\n",
  3449. data->type);
  3450. ret = -EINVAL;
  3451. break;
  3452. }
  3453. pr_debug("ioctl register_listener_req()\n");
  3454. atomic_inc(&data->ioctl_count);
  3455. data->type = QSEECOM_LISTENER_SERVICE;
  3456. ret = qseecom_register_listener(data, argp);
  3457. atomic_dec(&data->ioctl_count);
  3458. wake_up_all(&data->abort_wq);
  3459. if (ret)
  3460. pr_err("failed qseecom_register_listener: %d\n", ret);
  3461. break;
  3462. }
  3463. case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
  3464. if ((data->listener.id == 0) ||
  3465. (data->type != QSEECOM_LISTENER_SERVICE)) {
  3466. pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
  3467. data->type, data->listener.id);
  3468. ret = -EINVAL;
  3469. break;
  3470. }
  3471. pr_debug("ioctl unregister_listener_req()\n");
  3472. atomic_inc(&data->ioctl_count);
  3473. ret = qseecom_unregister_listener(data);
  3474. atomic_dec(&data->ioctl_count);
  3475. wake_up_all(&data->abort_wq);
  3476. if (ret)
  3477. pr_err("failed qseecom_unregister_listener: %d\n", ret);
  3478. break;
  3479. }
  3480. case QSEECOM_IOCTL_SEND_CMD_REQ: {
  3481. pr_debug("qseecom.current_mode %d\n", qseecom.current_mode);
  3482. if ((data->client.app_id == 0) ||
  3483. (data->type != QSEECOM_CLIENT_APP)) {
  3484. pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
  3485. data->type, data->client.app_id);
  3486. ret = -EINVAL;
  3487. break;
  3488. }
  3489. /* Only one client allowed here at a time */
  3490. mutex_lock(&app_access_lock);
  3491. if (qseecom.support_bus_scaling) {
  3492. /* register bus bw in case the client doesn't do it */
  3493. if (!data->mode) {
  3494. mutex_lock(&qsee_bw_mutex);
  3495. __qseecom_register_bus_bandwidth_needs(
  3496. data, HIGH);
  3497. mutex_unlock(&qsee_bw_mutex);
  3498. }
  3499. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  3500. if (ret) {
  3501. pr_err("Failed to set bw.\n");
  3502. ret = -EINVAL;
  3503. mutex_unlock(&app_access_lock);
  3504. break;
  3505. }
  3506. }
  3507. /*
  3508. * On targets where crypto clock is handled by HLOS,
  3509. * if clk_access_cnt is zero and perf_enabled is false,
  3510. * then the crypto clock was not enabled before sending cmd
  3511. * to tz, qseecom will enable the clock to avoid service failure.
  3512. */
  3513. if (!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  3514. pr_debug("ce clock is not enabled!\n");
  3515. ret = qseecom_perf_enable(data);
  3516. if (ret) {
  3517. pr_err("Failed to vote for clock with err %d\n",
  3518. ret);
  3519. mutex_unlock(&app_access_lock);
  3520. ret = -EINVAL;
  3521. break;
  3522. }
  3523. perf_enabled = true;
  3524. }
  3525. atomic_inc(&data->ioctl_count);
  3526. ret = qseecom_send_cmd(data, argp);
  3527. if (qseecom.support_bus_scaling)
  3528. __qseecom_add_bw_scale_down_timer(
  3529. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  3530. if (perf_enabled) {
  3531. qsee_disable_clock_vote(data, CLK_DFAB);
  3532. qsee_disable_clock_vote(data, CLK_SFPB);
  3533. }
  3534. atomic_dec(&data->ioctl_count);
  3535. wake_up_all(&data->abort_wq);
  3536. mutex_unlock(&app_access_lock);
  3537. if (ret)
  3538. pr_err("failed qseecom_send_cmd: %d\n", ret);
  3539. break;
  3540. }
  3541. case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: {
  3542. pr_debug("qseecom.current_mode %d\n", qseecom.current_mode);
  3543. if ((data->client.app_id == 0) ||
  3544. (data->type != QSEECOM_CLIENT_APP)) {
  3545. pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
  3546. data->type, data->client.app_id);
  3547. ret = -EINVAL;
  3548. break;
  3549. }
  3550. /* Only one client allowed here at a time */
  3551. mutex_lock(&app_access_lock);
  3552. if (qseecom.support_bus_scaling) {
  3553. if (!data->mode) {
  3554. mutex_lock(&qsee_bw_mutex);
  3555. __qseecom_register_bus_bandwidth_needs(
  3556. data, HIGH);
  3557. mutex_unlock(&qsee_bw_mutex);
  3558. }
  3559. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  3560. if (ret) {
  3561. pr_err("Failed to set bw.\n");
  3562. mutex_unlock(&app_access_lock);
  3563. ret = -EINVAL;
  3564. break;
  3565. }
  3566. }
  3567. /*
  3568. * On targets where crypto clock is handled by HLOS,
  3569. * if clk_access_cnt is zero and perf_enabled is false,
  3570. * then the crypto clock was not enabled before sending cmd
  3571. * to tz, qseecom will enable the clock to avoid service failure.
  3572. */
  3573. if (!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  3574. pr_debug("ce clock is not enabled!\n");
  3575. ret = qseecom_perf_enable(data);
  3576. if (ret) {
  3577. pr_err("Failed to vote for clock with err %d\n",
  3578. ret);
  3579. mutex_unlock(&app_access_lock);
  3580. ret = -EINVAL;
  3581. break;
  3582. }
  3583. perf_enabled = true;
  3584. }
  3585. atomic_inc(&data->ioctl_count);
  3586. ret = qseecom_send_modfd_cmd(data, argp);
  3587. if (qseecom.support_bus_scaling)
  3588. __qseecom_add_bw_scale_down_timer(
  3589. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  3590. if (perf_enabled) {
  3591. qsee_disable_clock_vote(data, CLK_DFAB);
  3592. qsee_disable_clock_vote(data, CLK_SFPB);
  3593. }
  3594. atomic_dec(&data->ioctl_count);
  3595. wake_up_all(&data->abort_wq);
  3596. mutex_unlock(&app_access_lock);
  3597. if (ret)
  3598. pr_err("failed qseecom_send_cmd: %d\n", ret);
  3599. break;
  3600. }
  3601. case QSEECOM_IOCTL_RECEIVE_REQ: {
  3602. if ((data->listener.id == 0) ||
  3603. (data->type != QSEECOM_LISTENER_SERVICE)) {
  3604. pr_err("receive req: invalid handle (%d), lid(%d)\n",
  3605. data->type, data->listener.id);
  3606. ret = -EINVAL;
  3607. break;
  3608. }
  3609. atomic_inc(&data->ioctl_count);
  3610. ret = qseecom_receive_req(data);
  3611. atomic_dec(&data->ioctl_count);
  3612. wake_up_all(&data->abort_wq);
  3613. if (ret && (ret != -ERESTARTSYS))
  3614. pr_err("failed qseecom_receive_req: %d\n", ret);
  3615. break;
  3616. }
  3617. case QSEECOM_IOCTL_SEND_RESP_REQ: {
  3618. if ((data->listener.id == 0) ||
  3619. (data->type != QSEECOM_LISTENER_SERVICE)) {
  3620. pr_err("send resp req: invalid handle (%d), lid(%d)\n",
  3621. data->type, data->listener.id);
  3622. ret = -EINVAL;
  3623. break;
  3624. }
  3625. atomic_inc(&data->ioctl_count);
  3626. ret = qseecom_send_resp();
  3627. atomic_dec(&data->ioctl_count);
  3628. wake_up_all(&data->abort_wq);
  3629. if (ret)
  3630. pr_err("failed qseecom_send_resp: %d\n", ret);
  3631. break;
  3632. }
  3633. case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
  3634. if ((data->type != QSEECOM_CLIENT_APP) &&
  3635. (data->type != QSEECOM_GENERIC) &&
  3636. (data->type != QSEECOM_SECURE_SERVICE)) {
  3637. pr_err("set mem param req: invalid handle (%d)\n",
  3638. data->type);
  3639. ret = -EINVAL;
  3640. break;
  3641. }
  3642. pr_debug("SET_MEM_PARAM: qseecom addr = 0x%x\n", (u32)data);
  3643. mutex_lock(&app_access_lock);
  3644. atomic_inc(&data->ioctl_count);
  3645. ret = qseecom_set_client_mem_param(data, argp);
  3646. atomic_dec(&data->ioctl_count);
  3647. mutex_unlock(&app_access_lock);
  3648. if (ret)
  3649. pr_err("failed Qqseecom_set_mem_param request: %d\n",
  3650. ret);
  3651. break;
  3652. }
  3653. case QSEECOM_IOCTL_LOAD_APP_REQ: {
  3654. if ((data->type != QSEECOM_GENERIC) &&
  3655. (data->type != QSEECOM_CLIENT_APP)) {
  3656. pr_err("load app req: invalid handle (%d)\n",
  3657. data->type);
  3658. ret = -EINVAL;
  3659. break;
  3660. }
  3661. data->type = QSEECOM_CLIENT_APP;
  3662. pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%x\n", (u32)data);
  3663. mutex_lock(&app_access_lock);
  3664. atomic_inc(&data->ioctl_count);
  3665. if (qseecom.qsee_version > QSEEE_VERSION_00) {
  3666. if (qseecom.commonlib_loaded == false) {
  3667. ret = qseecom_load_commonlib_image(data);
  3668. if (ret == 0)
  3669. qseecom.commonlib_loaded = true;
  3670. }
  3671. }
  3672. if (ret == 0)
  3673. ret = qseecom_load_app(data, argp);
  3674. atomic_dec(&data->ioctl_count);
  3675. mutex_unlock(&app_access_lock);
  3676. if (ret)
  3677. pr_err("failed load_app request: %d\n", ret);
  3678. break;
  3679. }
  3680. case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
  3681. if ((data->client.app_id == 0) ||
  3682. (data->type != QSEECOM_CLIENT_APP)) {
  3683. pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
  3684. data->type, data->client.app_id);
  3685. ret = -EINVAL;
  3686. break;
  3687. }
  3688. pr_debug("UNLOAD_APP: qseecom_addr = 0x%x\n", (u32)data);
  3689. mutex_lock(&app_access_lock);
  3690. atomic_inc(&data->ioctl_count);
  3691. ret = qseecom_unload_app(data, false);
  3692. atomic_dec(&data->ioctl_count);
  3693. mutex_unlock(&app_access_lock);
  3694. if (ret)
  3695. pr_err("failed unload_app request: %d\n", ret);
  3696. break;
  3697. }
  3698. case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
  3699. atomic_inc(&data->ioctl_count);
  3700. ret = qseecom_get_qseos_version(data, argp);
  3701. if (ret)
  3702. pr_err("qseecom_get_qseos_version: %d\n", ret);
  3703. atomic_dec(&data->ioctl_count);
  3704. break;
  3705. }
  3706. case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
  3707. if ((data->type != QSEECOM_GENERIC) &&
  3708. (data->type != QSEECOM_CLIENT_APP)) {
  3709. pr_err("perf enable req: invalid handle (%d)\n",
  3710. data->type);
  3711. ret = -EINVAL;
  3712. break;
  3713. }
  3714. if ((data->type == QSEECOM_CLIENT_APP) &&
  3715. (data->client.app_id == 0)) {
  3716. pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
  3717. data->type, data->client.app_id);
  3718. ret = -EINVAL;
  3719. break;
  3720. }
  3721. pr_debug("%s : Perf Enable ioctl (Process:%s PID:%d)\n", __func__, \
  3722. current->comm, current->pid);
  3723. atomic_inc(&data->ioctl_count);
  3724. if (qseecom.support_bus_scaling) {
  3725. mutex_lock(&qsee_bw_mutex);
  3726. __qseecom_register_bus_bandwidth_needs(data, HIGH);
  3727. mutex_unlock(&qsee_bw_mutex);
  3728. } else {
  3729. ret = qseecom_perf_enable(data);
  3730. if (ret)
  3731. pr_err("Fail to vote for clocks %d\n", ret);
  3732. }
  3733. atomic_dec(&data->ioctl_count);
  3734. break;
  3735. }
  3736. case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
  3737. if ((data->type != QSEECOM_SECURE_SERVICE) &&
  3738. (data->type != QSEECOM_CLIENT_APP)) {
  3739. pr_err("perf disable req: invalid handle (%d)\n",
  3740. data->type);
  3741. ret = -EINVAL;
  3742. break;
  3743. }
  3744. if ((data->type == QSEECOM_CLIENT_APP) &&
  3745. (data->client.app_id == 0)) {
  3746. pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
  3747. data->type, data->client.app_id);
  3748. ret = -EINVAL;
  3749. break;
  3750. }
  3751. pr_debug("%s : Perf Disable ioctl (Process:%s PID:%d)\n", __func__, \
  3752. current->comm, current->pid);
  3753. atomic_inc(&data->ioctl_count);
  3754. if (!qseecom.support_bus_scaling) {
  3755. qsee_disable_clock_vote(data, CLK_DFAB);
  3756. qsee_disable_clock_vote(data, CLK_SFPB);
  3757. } else {
  3758. mutex_lock(&qsee_bw_mutex);
  3759. qseecom_unregister_bus_bandwidth_needs(data);
  3760. mutex_unlock(&qsee_bw_mutex);
  3761. }
  3762. atomic_dec(&data->ioctl_count);
  3763. break;
  3764. }
  3765. case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
  3766. if ((data->client.app_id == 0) ||
  3767. (data->type != QSEECOM_CLIENT_APP)) {
  3768. pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
  3769. data->type, data->client.app_id);
  3770. ret = -EINVAL;
  3771. break;
  3772. }
  3773. atomic_inc(&data->ioctl_count);
  3774. ret = qseecom_scale_bus_bandwidth(data, argp);
  3775. atomic_dec(&data->ioctl_count);
  3776. break;
  3777. }
  3778. case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
  3779. if (data->type != QSEECOM_GENERIC) {
  3780. pr_err("load ext elf req: invalid client handle (%d)\n",
  3781. data->type);
  3782. ret = -EINVAL;
  3783. break;
  3784. }
  3785. data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
  3786. data->released = true;
  3787. mutex_lock(&app_access_lock);
  3788. atomic_inc(&data->ioctl_count);
  3789. ret = qseecom_load_external_elf(data, argp);
  3790. atomic_dec(&data->ioctl_count);
  3791. mutex_unlock(&app_access_lock);
  3792. if (ret)
  3793. pr_err("failed load_external_elf request: %d\n", ret);
  3794. break;
  3795. }
  3796. case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
  3797. if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
  3798. pr_err("unload ext elf req: invalid handle (%d)\n",
  3799. data->type);
  3800. ret = -EINVAL;
  3801. break;
  3802. }
  3803. data->released = true;
  3804. mutex_lock(&app_access_lock);
  3805. atomic_inc(&data->ioctl_count);
  3806. ret = qseecom_unload_external_elf(data);
  3807. atomic_dec(&data->ioctl_count);
  3808. mutex_unlock(&app_access_lock);
  3809. if (ret)
  3810. pr_err("failed unload_app request: %d\n", ret);
  3811. break;
  3812. }
  3813. case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
  3814. if ((data->type != QSEECOM_GENERIC) &&
  3815. (data->type != QSEECOM_CLIENT_APP)) {
  3816. pr_err("app loaded query req: invalid handle (%d)\n",
  3817. data->type);
  3818. ret = -EINVAL;
  3819. break;
  3820. }
  3821. data->type = QSEECOM_CLIENT_APP;
  3822. pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%x\n", (u32)data);
  3823. mutex_lock(&app_access_lock);
  3824. atomic_inc(&data->ioctl_count);
  3825. pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%x\n", (u32)data);
  3826. ret = qseecom_query_app_loaded(data, argp);
  3827. atomic_dec(&data->ioctl_count);
  3828. mutex_unlock(&app_access_lock);
  3829. break;
  3830. }
  3831. case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
  3832. if (data->type != QSEECOM_GENERIC) {
  3833. pr_err("send cmd svc req: invalid handle (%d)\n",
  3834. data->type);
  3835. ret = -EINVAL;
  3836. break;
  3837. }
  3838. data->type = QSEECOM_SECURE_SERVICE;
  3839. if (qseecom.qsee_version < QSEE_VERSION_03) {
  3840. pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
  3841. qseecom.qsee_version);
  3842. return -EINVAL;
  3843. }
  3844. mutex_lock(&app_access_lock);
  3845. atomic_inc(&data->ioctl_count);
  3846. ret = qseecom_send_service_cmd(data, argp);
  3847. atomic_dec(&data->ioctl_count);
  3848. mutex_unlock(&app_access_lock);
  3849. break;
  3850. }
  3851. case QSEECOM_IOCTL_CREATE_KEY_REQ: {
  3852. if (!(qseecom.support_pfe || qseecom.support_fde))
  3853. pr_err("Features requiring key init not supported\n");
  3854. if (data->type != QSEECOM_GENERIC) {
  3855. pr_err("create key req: invalid handle (%d)\n",
  3856. data->type);
  3857. ret = -EINVAL;
  3858. break;
  3859. }
  3860. if (qseecom.qsee_version < QSEE_VERSION_05) {
  3861. pr_err("Create Key feature unsupported: qsee ver %u\n",
  3862. qseecom.qsee_version);
  3863. return -EINVAL;
  3864. }
  3865. data->released = true;
  3866. atomic_inc(&data->ioctl_count);
  3867. ret = qseecom_create_key(data, argp);
  3868. if (ret)
  3869. pr_err("failed to create encryption key: %d\n", ret);
  3870. atomic_dec(&data->ioctl_count);
  3871. break;
  3872. }
  3873. case QSEECOM_IOCTL_WIPE_KEY_REQ: {
  3874. if (!(qseecom.support_pfe || qseecom.support_fde))
  3875. pr_err("Features requiring key init not supported\n");
  3876. if (data->type != QSEECOM_GENERIC) {
  3877. pr_err("wipe key req: invalid handle (%d)\n",
  3878. data->type);
  3879. ret = -EINVAL;
  3880. break;
  3881. }
  3882. if (qseecom.qsee_version < QSEE_VERSION_05) {
  3883. pr_err("Wipe Key feature unsupported in qsee ver %u\n",
  3884. qseecom.qsee_version);
  3885. return -EINVAL;
  3886. }
  3887. data->released = true;
  3888. atomic_inc(&data->ioctl_count);
  3889. ret = qseecom_wipe_key(data, argp);
  3890. if (ret)
  3891. pr_err("failed to wipe encryption key: %d\n", ret);
  3892. atomic_dec(&data->ioctl_count);
  3893. break;
  3894. }
  3895. case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
  3896. if (!(qseecom.support_pfe || qseecom.support_fde))
  3897. pr_err("Features requiring key init not supported\n");
  3898. if (data->type != QSEECOM_GENERIC) {
  3899. pr_err("update key req: invalid handle (%d)\n",
  3900. data->type);
  3901. ret = -EINVAL;
  3902. break;
  3903. }
  3904. if (qseecom.qsee_version < QSEE_VERSION_05) {
  3905. pr_err("Update Key feature unsupported in qsee ver %u\n",
  3906. qseecom.qsee_version);
  3907. return -EINVAL;
  3908. }
  3909. data->released = true;
  3910. atomic_inc(&data->ioctl_count);
  3911. ret = qseecom_update_key_user_info(data, argp);
  3912. if (ret)
  3913. pr_err("failed to update key user info: %d\n", ret);
  3914. atomic_dec(&data->ioctl_count);
  3915. break;
  3916. }
  3917. case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
  3918. if (data->type != QSEECOM_GENERIC) {
  3919. pr_err("save part hash req: invalid handle (%d)\n",
  3920. data->type);
  3921. ret = -EINVAL;
  3922. break;
  3923. }
  3924. data->released = true;
  3925. mutex_lock(&app_access_lock);
  3926. atomic_inc(&data->ioctl_count);
  3927. ret = qseecom_save_partition_hash(argp);
  3928. atomic_dec(&data->ioctl_count);
  3929. mutex_unlock(&app_access_lock);
  3930. break;
  3931. }
  3932. case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
  3933. if (data->type != QSEECOM_GENERIC) {
  3934. pr_err("ES activated req: invalid handle (%d)\n",
  3935. data->type);
  3936. ret = -EINVAL;
  3937. break;
  3938. }
  3939. data->released = true;
  3940. mutex_lock(&app_access_lock);
  3941. atomic_inc(&data->ioctl_count);
  3942. ret = qseecom_is_es_activated(argp);
  3943. atomic_dec(&data->ioctl_count);
  3944. mutex_unlock(&app_access_lock);
  3945. break;
  3946. }
  3947. case QSEECOM_IOCTL_SEND_MODFD_RESP: {
  3948. if ((data->listener.id == 0) ||
  3949. (data->type != QSEECOM_LISTENER_SERVICE)) {
  3950. pr_err("receive req: invalid handle (%d), lid(%d)\n",
  3951. data->type, data->listener.id);
  3952. ret = -EINVAL;
  3953. break;
  3954. }
  3955. /* Only one client allowed here at a time */
  3956. atomic_inc(&data->ioctl_count);
  3957. ret = qseecom_send_modfd_resp(data, argp);
  3958. atomic_dec(&data->ioctl_count);
  3959. wake_up_all(&data->abort_wq);
  3960. if (ret)
  3961. pr_err("failed qseecom_send_mod_resp: %d\n", ret);
  3962. break;
  3963. }
  3964. default:
  3965. pr_err("Invalid IOCTL: %d\n", cmd);
  3966. return -EINVAL;
  3967. }
  3968. return ret;
  3969. }
  3970. static int qseecom_open(struct inode *inode, struct file *file)
  3971. {
  3972. int ret = 0;
  3973. struct qseecom_dev_handle *data;
  3974. data = kzalloc(sizeof(*data), GFP_KERNEL);
  3975. if (!data) {
  3976. pr_err("kmalloc failed\n");
  3977. return -ENOMEM;
  3978. }
  3979. file->private_data = data;
  3980. data->abort = 0;
  3981. data->type = QSEECOM_GENERIC;
  3982. data->released = false;
  3983. memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
  3984. data->mode = INACTIVE;
  3985. init_waitqueue_head(&data->abort_wq);
  3986. atomic_set(&data->ioctl_count, 0);
  3987. return ret;
  3988. }
  3989. static int qseecom_release(struct inode *inode, struct file *file)
  3990. {
  3991. struct qseecom_dev_handle *data = file->private_data;
  3992. int ret = 0;
  3993. if (data->released == false) {
  3994. pr_debug("data: released = false, type = %d, data = 0x%x\n",
  3995. data->type, (u32)data);
  3996. switch (data->type) {
  3997. case QSEECOM_LISTENER_SERVICE:
  3998. ret = qseecom_unregister_listener(data);
  3999. break;
  4000. case QSEECOM_CLIENT_APP:
  4001. mutex_lock(&app_access_lock);
  4002. ret = qseecom_unload_app(data, true);
  4003. mutex_unlock(&app_access_lock);
  4004. break;
  4005. case QSEECOM_SECURE_SERVICE:
  4006. case QSEECOM_GENERIC:
  4007. ret = qseecom_unmap_ion_allocated_memory(data);
  4008. if (ret)
  4009. pr_err("Ion Unmap failed\n");
  4010. break;
  4011. case QSEECOM_UNAVAILABLE_CLIENT_APP:
  4012. break;
  4013. default:
  4014. pr_err("Unsupported clnt_handle_type %d",
  4015. data->type);
  4016. break;
  4017. }
  4018. }
  4019. if (qseecom.support_bus_scaling) {
  4020. mutex_lock(&qsee_bw_mutex);
  4021. if (data->mode != INACTIVE) {
  4022. qseecom_unregister_bus_bandwidth_needs(data);
  4023. if (qseecom.cumulative_mode == INACTIVE) {
  4024. ret = __qseecom_set_msm_bus_request(INACTIVE);
  4025. if (ret)
  4026. pr_err("Fail to scale down bus\n");
  4027. }
  4028. }
  4029. mutex_unlock(&qsee_bw_mutex);
  4030. } else {
  4031. if (data->fast_load_enabled == true)
  4032. qsee_disable_clock_vote(data, CLK_SFPB);
  4033. if (data->perf_enabled == true)
  4034. qsee_disable_clock_vote(data, CLK_DFAB);
  4035. }
  4036. kfree(data);
  4037. return ret;
  4038. }
  4039. static const struct file_operations qseecom_fops = {
  4040. .owner = THIS_MODULE,
  4041. .unlocked_ioctl = qseecom_ioctl,
  4042. .open = qseecom_open,
  4043. .release = qseecom_release
  4044. };
  4045. static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
  4046. {
  4047. int rc = 0;
  4048. struct device *pdev;
  4049. struct qseecom_clk *qclk;
  4050. char *core_clk_src = NULL;
  4051. char *core_clk = NULL;
  4052. char *iface_clk = NULL;
  4053. char *bus_clk = NULL;
  4054. switch (ce) {
  4055. case CLK_QSEE: {
  4056. core_clk_src = "core_clk_src";
  4057. core_clk = "core_clk";
  4058. iface_clk = "iface_clk";
  4059. bus_clk = "bus_clk";
  4060. qclk = &qseecom.qsee;
  4061. qclk->instance = CLK_QSEE;
  4062. break;
  4063. };
  4064. case CLK_CE_DRV: {
  4065. core_clk_src = "ce_drv_core_clk_src";
  4066. core_clk = "ce_drv_core_clk";
  4067. iface_clk = "ce_drv_iface_clk";
  4068. bus_clk = "ce_drv_bus_clk";
  4069. qclk = &qseecom.ce_drv;
  4070. qclk->instance = CLK_CE_DRV;
  4071. break;
  4072. };
  4073. default:
  4074. pr_err("Invalid ce hw instance: %d!\n", ce);
  4075. return -EIO;
  4076. }
  4077. pdev = qseecom.pdev;
  4078. /* Get CE3 src core clk. */
  4079. qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
  4080. if (!IS_ERR(qclk->ce_core_src_clk)) {
  4081. /* Set the core src clk @100Mhz */
  4082. rc = clk_set_rate(qclk->ce_core_src_clk, QSEE_CE_CLK_100MHZ);
  4083. if (rc) {
  4084. clk_put(qclk->ce_core_src_clk);
  4085. pr_err("Unable to set the core src clk @100Mhz.\n");
  4086. return -EIO;
  4087. }
  4088. } else {
  4089. pr_warn("Unable to get CE core src clk, set to NULL\n");
  4090. qclk->ce_core_src_clk = NULL;
  4091. }
  4092. /* Get CE core clk */
  4093. qclk->ce_core_clk = clk_get(pdev, core_clk);
  4094. if (IS_ERR(qclk->ce_core_clk)) {
  4095. rc = PTR_ERR(qclk->ce_core_clk);
  4096. pr_err("Unable to get CE core clk\n");
  4097. if (qclk->ce_core_src_clk != NULL)
  4098. clk_put(qclk->ce_core_src_clk);
  4099. return -EIO;
  4100. }
  4101. /* Get CE Interface clk */
  4102. qclk->ce_clk = clk_get(pdev, iface_clk);
  4103. if (IS_ERR(qclk->ce_clk)) {
  4104. rc = PTR_ERR(qclk->ce_clk);
  4105. pr_err("Unable to get CE interface clk\n");
  4106. if (qclk->ce_core_src_clk != NULL)
  4107. clk_put(qclk->ce_core_src_clk);
  4108. clk_put(qclk->ce_core_clk);
  4109. return -EIO;
  4110. }
  4111. /* Get CE AXI clk */
  4112. qclk->ce_bus_clk = clk_get(pdev, bus_clk);
  4113. if (IS_ERR(qclk->ce_bus_clk)) {
  4114. rc = PTR_ERR(qclk->ce_bus_clk);
  4115. pr_err("Unable to get CE BUS interface clk\n");
  4116. if (qclk->ce_core_src_clk != NULL)
  4117. clk_put(qclk->ce_core_src_clk);
  4118. clk_put(qclk->ce_core_clk);
  4119. clk_put(qclk->ce_clk);
  4120. return -EIO;
  4121. }
  4122. return rc;
  4123. }
  4124. static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
  4125. {
  4126. struct qseecom_clk *qclk;
  4127. if (ce == CLK_QSEE)
  4128. qclk = &qseecom.qsee;
  4129. else
  4130. qclk = &qseecom.ce_drv;
  4131. if (qclk->ce_clk != NULL) {
  4132. clk_put(qclk->ce_clk);
  4133. qclk->ce_clk = NULL;
  4134. }
  4135. if (qclk->ce_core_clk != NULL) {
  4136. clk_put(qclk->ce_core_clk);
  4137. qclk->ce_clk = NULL;
  4138. }
  4139. if (qclk->ce_bus_clk != NULL) {
  4140. clk_put(qclk->ce_bus_clk);
  4141. qclk->ce_clk = NULL;
  4142. }
  4143. if (qclk->ce_core_src_clk != NULL) {
  4144. clk_put(qclk->ce_core_src_clk);
  4145. qclk->ce_core_src_clk = NULL;
  4146. }
  4147. }
  4148. static int __devinit qseecom_probe(struct platform_device *pdev)
  4149. {
  4150. int rc;
  4151. int ret = 0;
  4152. struct device *class_dev;
  4153. char qsee_not_legacy = 0;
  4154. struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
  4155. uint32_t system_call_id = QSEOS_CHECK_VERSION_CMD;
  4156. qseecom.qsee_bw_count = 0;
  4157. qseecom.qsee_perf_client = 0;
  4158. qseecom.qsee_sfpb_bw_count = 0;
  4159. qseecom.qsee.ce_core_clk = NULL;
  4160. qseecom.qsee.ce_clk = NULL;
  4161. qseecom.qsee.ce_core_src_clk = NULL;
  4162. qseecom.qsee.ce_bus_clk = NULL;
  4163. qseecom.cumulative_mode = 0;
  4164. qseecom.current_mode = INACTIVE;
  4165. qseecom.support_bus_scaling = false;
  4166. qseecom.support_fde = false;
  4167. qseecom.support_pfe = false;
  4168. qseecom.ce_drv.ce_core_clk = NULL;
  4169. qseecom.ce_drv.ce_clk = NULL;
  4170. qseecom.ce_drv.ce_core_src_clk = NULL;
  4171. qseecom.ce_drv.ce_bus_clk = NULL;
  4172. rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
  4173. if (rc < 0) {
  4174. pr_err("alloc_chrdev_region failed %d\n", rc);
  4175. return rc;
  4176. }
  4177. driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
  4178. if (IS_ERR(driver_class)) {
  4179. rc = -ENOMEM;
  4180. pr_err("class_create failed %d\n", rc);
  4181. goto exit_unreg_chrdev_region;
  4182. }
  4183. class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
  4184. QSEECOM_DEV);
  4185. if (!class_dev) {
  4186. pr_err("class_device_create failed %d\n", rc);
  4187. rc = -ENOMEM;
  4188. goto exit_destroy_class;
  4189. }
  4190. cdev_init(&qseecom.cdev, &qseecom_fops);
  4191. qseecom.cdev.owner = THIS_MODULE;
  4192. rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
  4193. if (rc < 0) {
  4194. pr_err("cdev_add failed %d\n", rc);
  4195. goto exit_destroy_device;
  4196. }
  4197. INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
  4198. spin_lock_init(&qseecom.registered_listener_list_lock);
  4199. INIT_LIST_HEAD(&qseecom.registered_app_list_head);
  4200. spin_lock_init(&qseecom.registered_app_list_lock);
  4201. INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
  4202. spin_lock_init(&qseecom.registered_kclient_list_lock);
  4203. init_waitqueue_head(&qseecom.send_resp_wq);
  4204. qseecom.send_resp_flag = 0;
  4205. rc = scm_call(6, 1, &system_call_id, sizeof(system_call_id),
  4206. &qsee_not_legacy, sizeof(qsee_not_legacy));
  4207. if (rc) {
  4208. pr_err("Failed to retrieve QSEOS version information %d\n", rc);
  4209. goto exit_del_cdev;
  4210. }
  4211. if (qsee_not_legacy) {
  4212. uint32_t feature = 10;
  4213. qseecom.qsee_version = QSEEE_VERSION_00;
  4214. rc = scm_call(6, 3, &feature, sizeof(feature),
  4215. &qseecom.qsee_version, sizeof(qseecom.qsee_version));
  4216. if (rc) {
  4217. pr_err("Failed to get QSEE version info %d\n", rc);
  4218. goto exit_del_cdev;
  4219. }
  4220. qseecom.qseos_version = QSEOS_VERSION_14;
  4221. } else {
  4222. pr_err("QSEE legacy version is not supported:");
  4223. pr_err("Support for TZ1.3 and earlier is deprecated\n");
  4224. rc = -EINVAL;
  4225. goto exit_del_cdev;
  4226. }
  4227. qseecom.commonlib_loaded = false;
  4228. qseecom.pdev = class_dev;
  4229. /* Create ION msm client */
  4230. qseecom.ion_clnt = msm_ion_client_create(-1, "qseecom-kernel");
  4231. if (qseecom.ion_clnt == NULL) {
  4232. pr_err("Ion client cannot be created\n");
  4233. rc = -ENOMEM;
  4234. goto exit_del_cdev;
  4235. }
  4236. /* register client for bus scaling */
  4237. if (pdev->dev.of_node) {
  4238. qseecom.pdev->of_node = pdev->dev.of_node;
  4239. qseecom.support_bus_scaling =
  4240. of_property_read_bool((&pdev->dev)->of_node,
  4241. "qcom,support-bus-scaling");
  4242. pr_warn("support_bus_scaling=0x%x",
  4243. qseecom.support_bus_scaling);
  4244. qseecom.support_fde =
  4245. of_property_read_bool((&pdev->dev)->of_node,
  4246. "qcom,support-fde");
  4247. if (qseecom.support_fde) {
  4248. if (of_property_read_u32((&pdev->dev)->of_node,
  4249. "qcom,disk-encrypt-pipe-pair",
  4250. &qseecom.ce_info.disk_encrypt_pipe)) {
  4251. pr_err("Fail to get FDE pipe information.\n");
  4252. rc = -EINVAL;
  4253. goto exit_destroy_ion_client;
  4254. } else {
  4255. pr_warn("disk-encrypt-pipe-pair=0x%x",
  4256. qseecom.ce_info.disk_encrypt_pipe);
  4257. }
  4258. } else {
  4259. pr_warn("Device does not support FDE");
  4260. qseecom.ce_info.disk_encrypt_pipe = 0xff;
  4261. }
  4262. qseecom.support_pfe =
  4263. of_property_read_bool((&pdev->dev)->of_node,
  4264. "qcom,support-pfe");
  4265. if (qseecom.support_pfe) {
  4266. if (of_property_read_u32((&pdev->dev)->of_node,
  4267. "qcom,file-encrypt-pipe-pair",
  4268. &qseecom.ce_info.disk_encrypt_pipe)) {
  4269. pr_err("Fail to get PFE pipe information.\n");
  4270. rc = -EINVAL;
  4271. goto exit_destroy_ion_client;
  4272. } else {
  4273. pr_warn("file-encrypt-pipe-pair=0x%x",
  4274. qseecom.ce_info.file_encrypt_pipe);
  4275. }
  4276. } else {
  4277. pr_warn("Device does not support PFE");
  4278. qseecom.ce_info.file_encrypt_pipe = 0xff;
  4279. }
  4280. if (qseecom.support_pfe || qseecom.support_fde) {
  4281. if (of_property_read_u32((&pdev->dev)->of_node,
  4282. "qcom,hlos-ce-hw-instance",
  4283. &qseecom.ce_info.hlos_ce_hw_instance)) {
  4284. pr_err("Fail: get hlos ce hw instanc info\n");
  4285. rc = -EINVAL;
  4286. goto exit_destroy_ion_client;
  4287. } else {
  4288. pr_warn("hlos-ce-hw-instance=0x%x",
  4289. qseecom.ce_info.hlos_ce_hw_instance);
  4290. }
  4291. } else {
  4292. pr_warn("Device does not support PFE/FDE");
  4293. qseecom.ce_info.hlos_ce_hw_instance = 0xff;
  4294. }
  4295. if (of_property_read_u32((&pdev->dev)->of_node,
  4296. "qcom,qsee-ce-hw-instance",
  4297. &qseecom.ce_info.qsee_ce_hw_instance)) {
  4298. pr_err("Fail to get qsee ce hw instance information.\n");
  4299. rc = -EINVAL;
  4300. goto exit_destroy_ion_client;
  4301. } else {
  4302. pr_warn("qsee-ce-hw-instance=0x%x",
  4303. qseecom.ce_info.qsee_ce_hw_instance);
  4304. }
  4305. qseecom.appsbl_qseecom_support =
  4306. of_property_read_bool((&pdev->dev)->of_node,
  4307. "qcom,appsbl-qseecom-support");
  4308. pr_info("qseecom.appsbl_qseecom_support = 0x%x",
  4309. qseecom.appsbl_qseecom_support);
  4310. qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
  4311. qseecom.ce_drv.instance = qseecom.ce_info.hlos_ce_hw_instance;
  4312. ret = __qseecom_init_clk(CLK_QSEE);
  4313. if (ret)
  4314. goto exit_destroy_ion_client;
  4315. if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
  4316. (qseecom.support_pfe || qseecom.support_fde)) {
  4317. ret = __qseecom_init_clk(CLK_CE_DRV);
  4318. if (ret) {
  4319. __qseecom_deinit_clk(CLK_QSEE);
  4320. goto exit_destroy_ion_client;
  4321. }
  4322. } else {
  4323. struct qseecom_clk *qclk;
  4324. qclk = &qseecom.qsee;
  4325. qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
  4326. qseecom.ce_drv.ce_clk = qclk->ce_clk;
  4327. qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
  4328. qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
  4329. }
  4330. qseecom_platform_support = (struct msm_bus_scale_pdata *)
  4331. msm_bus_cl_get_pdata(pdev);
  4332. if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
  4333. !qseecom.appsbl_qseecom_support) {
  4334. struct resource *resource = NULL;
  4335. struct qsee_apps_region_info_ireq req;
  4336. struct qseecom_command_scm_resp resp;
  4337. resource = platform_get_resource_byname(pdev,
  4338. IORESOURCE_MEM, "secapp-region");
  4339. if (resource) {
  4340. req.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION;
  4341. req.addr = resource->start;
  4342. req.size = resource_size(resource);
  4343. pr_warn("secure app region addr=0x%x size=0x%x",
  4344. req.addr, req.size);
  4345. #ifdef CONFIG_SEC_DEBUG
  4346. sec_debug_secure_app_addr_size(req.addr, req.size);
  4347. #endif
  4348. } else {
  4349. pr_err("Fail to get secure app region info\n");
  4350. rc = -EINVAL;
  4351. goto exit_destroy_ion_client;
  4352. }
  4353. rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
  4354. &resp, sizeof(resp));
  4355. if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
  4356. pr_err("send secapp reg fail %d resp.res %d\n",
  4357. rc, resp.result);
  4358. rc = -EINVAL;
  4359. goto exit_destroy_ion_client;
  4360. }
  4361. }
  4362. } else {
  4363. qseecom_platform_support = (struct msm_bus_scale_pdata *)
  4364. pdev->dev.platform_data;
  4365. }
  4366. if (qseecom.support_bus_scaling) {
  4367. init_timer(&(qseecom.bw_scale_down_timer));
  4368. INIT_WORK(&qseecom.bw_inactive_req_ws,
  4369. qseecom_bw_inactive_req_work);
  4370. qseecom.bw_scale_down_timer.function =
  4371. qseecom_scale_bus_bandwidth_timer_callback;
  4372. }
  4373. qseecom.timer_running = false;
  4374. qseecom.qsee_perf_client = msm_bus_scale_register_client(
  4375. qseecom_platform_support);
  4376. if (!qseecom.qsee_perf_client)
  4377. pr_err("Unable to register bus client\n");
  4378. return 0;
  4379. exit_destroy_ion_client:
  4380. ion_client_destroy(qseecom.ion_clnt);
  4381. exit_del_cdev:
  4382. cdev_del(&qseecom.cdev);
  4383. exit_destroy_device:
  4384. device_destroy(driver_class, qseecom_device_no);
  4385. exit_destroy_class:
  4386. class_destroy(driver_class);
  4387. exit_unreg_chrdev_region:
  4388. unregister_chrdev_region(qseecom_device_no, 1);
  4389. return rc;
  4390. }
  4391. static int __devinit qseecom_remove(struct platform_device *pdev)
  4392. {
  4393. struct qseecom_registered_kclient_list *kclient = NULL;
  4394. struct qseecom_registered_kclient_list *kclient_tmp = NULL;
  4395. unsigned long flags = 0;
  4396. int ret = 0;
  4397. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  4398. list_for_each_entry_safe(kclient, kclient_tmp,
  4399. &qseecom.registered_kclient_list_head, list) {
  4400. /* Break the loop if client handle is NULL */
  4401. if (!kclient->handle)
  4402. goto exit_free_kclient;
  4403. if (list_empty(&kclient->list))
  4404. goto exit_free_kc_handle;
  4405. list_del(&kclient->list);
  4406. ret = qseecom_unload_app(kclient->handle->dev, false);
  4407. if (!ret) {
  4408. kzfree(kclient->handle->dev);
  4409. kzfree(kclient->handle);
  4410. kzfree(kclient);
  4411. }
  4412. }
  4413. exit_free_kc_handle:
  4414. kzfree(kclient->handle);
  4415. exit_free_kclient:
  4416. kzfree(kclient);
  4417. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  4418. if (qseecom.qseos_version > QSEEE_VERSION_00)
  4419. qseecom_unload_commonlib_image();
  4420. if (qseecom.qsee_perf_client)
  4421. msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
  4422. 0);
  4423. if (pdev->dev.platform_data != NULL)
  4424. msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
  4425. if (qseecom.support_bus_scaling) {
  4426. cancel_work_sync(&qseecom.bw_inactive_req_ws);
  4427. del_timer_sync(&qseecom.bw_scale_down_timer);
  4428. }
  4429. /* register client for bus scaling */
  4430. if (pdev->dev.of_node) {
  4431. __qseecom_deinit_clk(CLK_QSEE);
  4432. if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
  4433. (qseecom.support_pfe || qseecom.support_fde))
  4434. __qseecom_deinit_clk(CLK_CE_DRV);
  4435. }
  4436. ion_client_destroy(qseecom.ion_clnt);
  4437. cdev_del(&qseecom.cdev);
  4438. device_destroy(driver_class, qseecom_device_no);
  4439. class_destroy(driver_class);
  4440. unregister_chrdev_region(qseecom_device_no, 1);
  4441. return ret;
  4442. }
  4443. static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
  4444. {
  4445. int ret = 0;
  4446. struct qseecom_clk *qclk;
  4447. qclk = &qseecom.qsee;
  4448. mutex_lock(&qsee_bw_mutex);
  4449. mutex_lock(&clk_access_lock);
  4450. if (qseecom.current_mode != INACTIVE) {
  4451. ret = msm_bus_scale_client_update_request(
  4452. qseecom.qsee_perf_client, INACTIVE);
  4453. if (ret)
  4454. pr_err("Fail to scale down bus\n");
  4455. else
  4456. qseecom.current_mode = INACTIVE;
  4457. }
  4458. if (qclk->clk_access_cnt) {
  4459. if (qclk->ce_clk != NULL)
  4460. clk_disable_unprepare(qclk->ce_clk);
  4461. if (qclk->ce_core_clk != NULL)
  4462. clk_disable_unprepare(qclk->ce_core_clk);
  4463. if (qclk->ce_bus_clk != NULL)
  4464. clk_disable_unprepare(qclk->ce_bus_clk);
  4465. }
  4466. del_timer_sync(&(qseecom.bw_scale_down_timer));
  4467. qseecom.timer_running = false;
  4468. mutex_unlock(&clk_access_lock);
  4469. mutex_unlock(&qsee_bw_mutex);
  4470. return 0;
  4471. }
  4472. static int qseecom_resume(struct platform_device *pdev)
  4473. {
  4474. int mode = 0;
  4475. int ret = 0;
  4476. struct qseecom_clk *qclk;
  4477. qclk = &qseecom.qsee;
  4478. mutex_lock(&qsee_bw_mutex);
  4479. mutex_lock(&clk_access_lock);
  4480. if (qseecom.cumulative_mode >= HIGH)
  4481. mode = HIGH;
  4482. else
  4483. mode = qseecom.cumulative_mode;
  4484. if (qseecom.cumulative_mode != INACTIVE) {
  4485. ret = msm_bus_scale_client_update_request(
  4486. qseecom.qsee_perf_client, mode);
  4487. if (ret)
  4488. pr_err("Fail to scale up bus to %d\n", mode);
  4489. else
  4490. qseecom.current_mode = mode;
  4491. }
  4492. if (qclk->clk_access_cnt) {
  4493. ret = clk_prepare_enable(qclk->ce_core_clk);
  4494. if (ret) {
  4495. pr_err("Unable to enable/prepare CE core clk\n");
  4496. qclk->clk_access_cnt = 0;
  4497. goto err;
  4498. }
  4499. ret = clk_prepare_enable(qclk->ce_clk);
  4500. if (ret) {
  4501. pr_err("Unable to enable/prepare CE iface clk\n");
  4502. qclk->clk_access_cnt = 0;
  4503. goto ce_clk_err;
  4504. }
  4505. ret = clk_prepare_enable(qclk->ce_bus_clk);
  4506. if (ret) {
  4507. pr_err("Unable to enable/prepare CE bus clk\n");
  4508. qclk->clk_access_cnt = 0;
  4509. goto ce_bus_clk_err;
  4510. }
  4511. }
  4512. if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
  4513. qseecom.bw_scale_down_timer.expires = jiffies +
  4514. msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  4515. mod_timer(&(qseecom.bw_scale_down_timer),
  4516. qseecom.bw_scale_down_timer.expires);
  4517. qseecom.timer_running = true;
  4518. }
  4519. mutex_unlock(&clk_access_lock);
  4520. mutex_unlock(&qsee_bw_mutex);
  4521. return 0;
  4522. ce_bus_clk_err:
  4523. clk_disable_unprepare(qclk->ce_clk);
  4524. ce_clk_err:
  4525. clk_disable_unprepare(qclk->ce_core_clk);
  4526. err:
  4527. mutex_unlock(&clk_access_lock);
  4528. mutex_unlock(&qsee_bw_mutex);
  4529. return -EIO;
  4530. }
  4531. static struct of_device_id qseecom_match[] = {
  4532. {
  4533. .compatible = "qcom,qseecom",
  4534. },
  4535. {}
  4536. };
  4537. static struct platform_driver qseecom_plat_driver = {
  4538. .probe = qseecom_probe,
  4539. .remove = qseecom_remove,
  4540. .suspend = qseecom_suspend,
  4541. .resume = qseecom_resume,
  4542. .driver = {
  4543. .name = "qseecom",
  4544. .owner = THIS_MODULE,
  4545. .of_match_table = qseecom_match,
  4546. },
  4547. };
  4548. static int __devinit qseecom_init(void)
  4549. {
  4550. return platform_driver_register(&qseecom_plat_driver);
  4551. }
  4552. static void __devexit qseecom_exit(void)
  4553. {
  4554. platform_driver_unregister(&qseecom_plat_driver);
  4555. }
  4556. MODULE_LICENSE("GPL v2");
  4557. MODULE_DESCRIPTION("Qualcomm Secure Execution Environment Communicator");
  4558. module_init(qseecom_init);
  4559. module_exit(qseecom_exit);