adreno.c 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529
  1. /* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/ioctl.h>
  17. #include <linux/sched.h>
  18. #include <linux/of.h>
  19. #include <linux/of_device.h>
  20. #include <linux/delay.h>
  21. #include <linux/input.h>
  22. #include <mach/socinfo.h>
  23. #include <mach/msm_bus_board.h>
  24. #include <mach/msm_bus.h>
  25. #include "kgsl.h"
  26. #include "kgsl_pwrscale.h"
  27. #include "kgsl_cffdump.h"
  28. #include "kgsl_sharedmem.h"
  29. #include "kgsl_iommu.h"
  30. #include "adreno.h"
  31. #include "adreno_pm4types.h"
  32. #include "adreno_trace.h"
  33. #include "a2xx_reg.h"
  34. #include "a3xx_reg.h"
  35. #define DRIVER_VERSION_MAJOR 3
  36. #define DRIVER_VERSION_MINOR 1
  37. /* Number of times to try hard reset */
  38. #define NUM_TIMES_RESET_RETRY 5
  39. /* Adreno MH arbiter config*/
  40. #define ADRENO_CFG_MHARB \
  41. (0x10 \
  42. | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
  43. | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
  44. | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
  45. | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
  46. | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
  47. | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
  48. | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
  49. | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
  50. | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
  51. | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
  52. | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
  53. | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
  54. | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
  55. | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
  56. #define ADRENO_MMU_CONFIG \
  57. (0x01 \
  58. | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \
  59. | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \
  60. | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \
  61. | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \
  62. | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \
  63. | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \
  64. | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \
  65. | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
  66. | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
  67. | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
  68. | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
  69. #define KGSL_LOG_LEVEL_DEFAULT 3
  70. static void adreno_input_work(struct work_struct *work);
  71. /*
  72. * The default values for the simpleondemand governor are 90 and 5,
  73. * we use different values here.
  74. * They have to be tuned and compare with the tz governor anyway.
  75. */
  76. static struct devfreq_simple_ondemand_data adreno_ondemand_data = {
  77. .upthreshold = 80,
  78. .downdifferential = 20,
  79. };
  80. static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
  81. .bus = {
  82. .max = 450,
  83. },
  84. .device_id = KGSL_DEVICE_3D0,
  85. };
  86. static const struct devfreq_governor_data adreno_governors[] = {
  87. { .name = "simple_ondemand", .data = &adreno_ondemand_data },
  88. { .name = "msm-adreno-tz", .data = &adreno_tz_data },
  89. };
  90. static const struct kgsl_functable adreno_functable;
  91. static struct adreno_device device_3d0 = {
  92. .dev = {
  93. KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
  94. .pwrscale = KGSL_PWRSCALE_INIT(adreno_governors,
  95. ARRAY_SIZE(adreno_governors)),
  96. .name = DEVICE_3D0_NAME,
  97. .id = KGSL_DEVICE_3D0,
  98. .mh = {
  99. .mharb = ADRENO_CFG_MHARB,
  100. /* Remove 1k boundary check in z470 to avoid a GPU
  101. * hang. Notice that this solution won't work if
  102. * both EBI and SMI are used
  103. */
  104. .mh_intf_cfg1 = 0x00032f07,
  105. /* turn off memory protection unit by setting
  106. acceptable physical address range to include
  107. all pages. */
  108. .mpu_base = 0x00000000,
  109. .mpu_range = 0xFFFFF000,
  110. },
  111. .mmu = {
  112. .config = ADRENO_MMU_CONFIG,
  113. },
  114. .pwrctrl = {
  115. .irq_name = KGSL_3D0_IRQ,
  116. },
  117. .iomemname = KGSL_3D0_REG_MEMORY,
  118. .shadermemname = KGSL_3D0_SHADER_MEMORY,
  119. .ftbl = &adreno_functable,
  120. .cmd_log = KGSL_LOG_LEVEL_DEFAULT,
  121. .ctxt_log = KGSL_LOG_LEVEL_DEFAULT,
  122. .drv_log = KGSL_LOG_LEVEL_DEFAULT,
  123. .mem_log = KGSL_LOG_LEVEL_DEFAULT,
  124. .pwr_log = KGSL_LOG_LEVEL_DEFAULT,
  125. .pm_dump_enable = 0,
  126. },
  127. .gmem_base = 0,
  128. .gmem_size = SZ_256K,
  129. .pfp_fw = NULL,
  130. .pm4_fw = NULL,
  131. .wait_timeout = 0, /* in milliseconds, 0 means disabled */
  132. .ib_check_level = 0,
  133. .ft_policy = KGSL_FT_DEFAULT_POLICY,
  134. .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
  135. .fast_hang_detect = 1,
  136. .long_ib_detect = 1,
  137. .input_work = __WORK_INITIALIZER(device_3d0.input_work,
  138. adreno_input_work),
  139. };
  140. unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
  141. /*
  142. * This is the master list of all GPU cores that are supported by this
  143. * driver.
  144. */
  145. #define ANY_ID (~0)
  146. #define NO_VER (~0)
  147. static const struct {
  148. enum adreno_gpurev gpurev;
  149. unsigned int core, major, minor, patchid;
  150. const char *pm4fw;
  151. const char *pfpfw;
  152. struct adreno_gpudev *gpudev;
  153. unsigned int istore_size;
  154. unsigned int pix_shader_start;
  155. /* Size of an instruction in dwords */
  156. unsigned int instruction_size;
  157. /* size of gmem for gpu*/
  158. unsigned int gmem_size;
  159. /* version of pm4 microcode that supports sync_lock
  160. between CPU and GPU for IOMMU-v0 programming */
  161. unsigned int sync_lock_pm4_ver;
  162. /* version of pfp microcode that supports sync_lock
  163. between CPU and GPU for IOMMU-v0 programming */
  164. unsigned int sync_lock_pfp_ver;
  165. /* PM4 jump table index */
  166. unsigned int pm4_jt_idx;
  167. /* PM4 jump table load addr */
  168. unsigned int pm4_jt_addr;
  169. /* PFP jump table index */
  170. unsigned int pfp_jt_idx;
  171. /* PFP jump table load addr */
  172. unsigned int pfp_jt_addr;
  173. /* PM4 bootstrap loader size */
  174. unsigned int pm4_bstrp_size;
  175. /* PFP bootstrap loader size */
  176. unsigned int pfp_bstrp_size;
  177. /* PFP bootstrap loader supported version */
  178. unsigned int pfp_bstrp_ver;
  179. } adreno_gpulist[] = {
  180. { ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID,
  181. "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
  182. 512, 384, 3, SZ_256K, NO_VER, NO_VER },
  183. { ADRENO_REV_A203, 0, 1, 1, ANY_ID,
  184. "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
  185. 512, 384, 3, SZ_256K, NO_VER, NO_VER },
  186. { ADRENO_REV_A205, 0, 1, 0, ANY_ID,
  187. "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
  188. 512, 384, 3, SZ_256K, NO_VER, NO_VER },
  189. { ADRENO_REV_A220, 2, 1, ANY_ID, ANY_ID,
  190. "leia_pm4_470.fw", "leia_pfp_470.fw", &adreno_a2xx_gpudev,
  191. 512, 384, 3, SZ_512K, NO_VER, NO_VER },
  192. /*
  193. * patchlevel 5 (8960v2) needs special pm4 firmware to work around
  194. * a hardware problem.
  195. */
  196. { ADRENO_REV_A225, 2, 2, 0, 5,
  197. "a225p5_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
  198. 1536, 768, 3, SZ_512K, NO_VER, NO_VER },
  199. { ADRENO_REV_A225, 2, 2, 0, 6,
  200. "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
  201. 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
  202. { ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID,
  203. "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
  204. 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
  205. /* A3XX doesn't use the pix_shader_start */
  206. { ADRENO_REV_A305, 3, 0, 5, 0,
  207. "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
  208. 512, 0, 2, SZ_256K, 0x3FF037, 0x3FF016 },
  209. /* A3XX doesn't use the pix_shader_start */
  210. { ADRENO_REV_A320, 3, 2, ANY_ID, ANY_ID,
  211. "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
  212. 512, 0, 2, SZ_512K, 0x3FF037, 0x3FF016 },
  213. { ADRENO_REV_A330, 3, 3, 0, ANY_ID,
  214. "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
  215. 512, 0, 2, SZ_1M, NO_VER, NO_VER, 0x8AD, 0x2E4, 0x201, 0x200,
  216. 0x6, 0x20, 0x330020 },
  217. { ADRENO_REV_A305B, 3, 0, 5, 0x10,
  218. "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
  219. 512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
  220. 0x201, 0x200 },
  221. /* 8226v2 */
  222. { ADRENO_REV_A305B, 3, 0, 5, 0x12,
  223. "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
  224. 512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
  225. 0x201, 0x200 },
  226. { ADRENO_REV_A305C, 3, 0, 5, 0x20,
  227. "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
  228. 512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
  229. };
  230. /* Nice level for the higher priority GPU start thread */
  231. static int _wake_nice = -7;
  232. /* Number of milliseconds to stay active active after a wake on touch */
  233. static unsigned int _wake_timeout = 100;
  234. /*
  235. * A workqueue callback responsible for actually turning on the GPU after a
  236. * touch event. kgsl_pwrctrl_wake() is used without any active_count protection
  237. * to avoid the need to maintain state. Either somebody will start using the
  238. * GPU or the idle timer will fire and put the GPU back into slumber
  239. */
  240. static void adreno_input_work(struct work_struct *work)
  241. {
  242. struct adreno_device *adreno_dev = container_of(work,
  243. struct adreno_device, input_work);
  244. struct kgsl_device *device = &adreno_dev->dev;
  245. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  246. device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
  247. /*
  248. * Don't schedule adreno_start in a high priority workqueue, we are
  249. * already in a workqueue which should be sufficient
  250. */
  251. kgsl_pwrctrl_wake(device, 0);
  252. /*
  253. * When waking up from a touch event we want to stay active long enough
  254. * for the user to send a draw command. The default idle timer timeout
  255. * is shorter than we want so go ahead and push the idle timer out
  256. * further for this special case
  257. */
  258. mod_timer(&device->idle_timer,
  259. jiffies + msecs_to_jiffies(_wake_timeout));
  260. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  261. }
  262. /*
  263. * Process input events and schedule work if needed. At this point we are only
  264. * interested in groking EV_ABS touchscreen events
  265. */
  266. static void adreno_input_event(struct input_handle *handle, unsigned int type,
  267. unsigned int code, int value)
  268. {
  269. struct kgsl_device *device = handle->handler->private;
  270. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  271. /*
  272. * Only queue the work under certain circumstances: we have to be in
  273. * slumber, the event has to be EV_EBS and we had to have processed an
  274. * IB since the last time we called wake on touch.
  275. */
  276. if ((type == EV_ABS) &&
  277. !(device->flags & KGSL_FLAG_WAKE_ON_TOUCH) &&
  278. (device->state == KGSL_STATE_SLUMBER))
  279. schedule_work(&adreno_dev->input_work);
  280. }
  281. static int adreno_input_connect(struct input_handler *handler,
  282. struct input_dev *dev, const struct input_device_id *id)
  283. {
  284. struct input_handle *handle;
  285. int ret;
  286. handle = kzalloc(sizeof(*handle), GFP_KERNEL);
  287. if (handle == NULL)
  288. return -ENOMEM;
  289. handle->dev = dev;
  290. handle->handler = handler;
  291. handle->name = handler->name;
  292. ret = input_register_handle(handle);
  293. if (ret) {
  294. kfree(handle);
  295. return ret;
  296. }
  297. ret = input_open_device(handle);
  298. if (ret) {
  299. input_unregister_handle(handle);
  300. kfree(handle);
  301. }
  302. return ret;
  303. }
  304. static void adreno_input_disconnect(struct input_handle *handle)
  305. {
  306. input_close_device(handle);
  307. input_unregister_handle(handle);
  308. kfree(handle);
  309. }
  310. /*
  311. * We are only interested in EV_ABS events so only register handlers for those
  312. * input devices that have EV_ABS events
  313. */
  314. static const struct input_device_id adreno_input_ids[] = {
  315. {
  316. .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
  317. .evbit = { BIT_MASK(EV_ABS) },
  318. /* assumption: MT_.._X & MT_.._Y are in the same long */
  319. .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
  320. BIT_MASK(ABS_MT_POSITION_X) |
  321. BIT_MASK(ABS_MT_POSITION_Y) |
  322. BIT_MASK(ABS_MT_TRACKING_ID) },
  323. },
  324. { },
  325. };
  326. static struct input_handler adreno_input_handler = {
  327. .event = adreno_input_event,
  328. .connect = adreno_input_connect,
  329. .disconnect = adreno_input_disconnect,
  330. .name = "kgsl",
  331. .id_table = adreno_input_ids,
  332. };
  333. /**
  334. * adreno_perfcounter_init: Reserve kernel performance counters
  335. * @device: device to configure
  336. *
  337. * The kernel needs/wants a certain group of performance counters for
  338. * its own activities. Reserve these performance counters at init time
  339. * to ensure that they are always reserved for the kernel. The performance
  340. * counters used by the kernel can be obtained by the user, but these
  341. * performance counters will remain active as long as the device is alive.
  342. */
  343. static int adreno_perfcounter_init(struct kgsl_device *device)
  344. {
  345. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  346. if (adreno_dev->gpudev->perfcounter_init)
  347. return adreno_dev->gpudev->perfcounter_init(adreno_dev);
  348. return 0;
  349. };
  350. /**
  351. * adreno_perfcounter_close: Release counters initialized by
  352. * adreno_perfcounter_init
  353. * @device: device to realease counters for
  354. *
  355. */
  356. static void adreno_perfcounter_close(struct kgsl_device *device)
  357. {
  358. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  359. if (adreno_dev->gpudev->perfcounter_close)
  360. return adreno_dev->gpudev->perfcounter_close(adreno_dev);
  361. }
  362. /**
  363. * adreno_perfcounter_start: Enable performance counters
  364. * @adreno_dev: Adreno device to configure
  365. *
  366. * Ensure all performance counters are enabled that are allocated. Since
  367. * the device was most likely stopped, we can't trust that the counters
  368. * are still valid so make it so.
  369. * Returns 0 on success else error code
  370. */
  371. static int adreno_perfcounter_start(struct adreno_device *adreno_dev)
  372. {
  373. struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
  374. struct adreno_perfcount_group *group;
  375. unsigned int i, j;
  376. int ret = 0;
  377. if (NULL == counters)
  378. return 0;
  379. /* group id iter */
  380. for (i = 0; i < counters->group_count; i++) {
  381. group = &(counters->groups[i]);
  382. /* countable iter */
  383. for (j = 0; j < group->reg_count; j++) {
  384. if (group->regs[j].countable ==
  385. KGSL_PERFCOUNTER_NOT_USED ||
  386. group->regs[j].countable ==
  387. KGSL_PERFCOUNTER_BROKEN)
  388. continue;
  389. if (adreno_dev->gpudev->perfcounter_enable)
  390. ret = adreno_dev->gpudev->perfcounter_enable(
  391. adreno_dev, i, j,
  392. group->regs[j].countable);
  393. if (ret)
  394. goto done;
  395. }
  396. }
  397. done:
  398. return ret;
  399. }
  400. /**
  401. * adreno_perfcounter_read_group() - Determine which countables are in counters
  402. * @adreno_dev: Adreno device to configure
  403. * @reads: List of kgsl_perfcounter_read_groups
  404. * @count: Length of list
  405. *
  406. * Read the performance counters for the groupid/countable pairs and return
  407. * the 64 bit result for each pair
  408. */
  409. int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
  410. struct kgsl_perfcounter_read_group __user *reads, unsigned int count)
  411. {
  412. struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
  413. struct kgsl_device *device = &adreno_dev->dev;
  414. struct adreno_perfcount_group *group;
  415. struct kgsl_perfcounter_read_group *list = NULL;
  416. unsigned int i, j;
  417. int ret = 0;
  418. if (NULL == counters)
  419. return -EINVAL;
  420. /* sanity check for later */
  421. if (!adreno_dev->gpudev->perfcounter_read)
  422. return -EINVAL;
  423. /* sanity check params passed in */
  424. if (reads == NULL || count == 0 || count > 100)
  425. return -EINVAL;
  426. list = kmalloc(sizeof(struct kgsl_perfcounter_read_group) * count,
  427. GFP_KERNEL);
  428. if (!list)
  429. return -ENOMEM;
  430. if (copy_from_user(list, reads,
  431. sizeof(struct kgsl_perfcounter_read_group) * count)) {
  432. ret = -EFAULT;
  433. goto done;
  434. }
  435. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  436. ret = kgsl_active_count_get(device);
  437. if (ret) {
  438. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  439. goto done;
  440. }
  441. /* list iterator */
  442. for (j = 0; j < count; j++) {
  443. list[j].value = 0;
  444. /* Verify that the group ID is within range */
  445. if (list[j].groupid >= counters->group_count) {
  446. ret = -EINVAL;
  447. break;
  448. }
  449. group = &(counters->groups[list[j].groupid]);
  450. /* group/counter iterator */
  451. for (i = 0; i < group->reg_count; i++) {
  452. if (group->regs[i].countable == list[j].countable) {
  453. list[j].value =
  454. adreno_dev->gpudev->perfcounter_read(
  455. adreno_dev, list[j].groupid, i);
  456. break;
  457. }
  458. }
  459. }
  460. kgsl_active_count_put(device);
  461. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  462. /* write the data */
  463. if (ret == 0)
  464. ret = copy_to_user(reads, list,
  465. sizeof(struct kgsl_perfcounter_read_group) * count);
  466. done:
  467. kfree(list);
  468. return ret;
  469. }
  470. /**
  471. * adreno_perfcounter_get_groupid() - Get the performance counter ID
  472. * @adreno_dev: Adreno device
  473. * @name: Performance counter group name string
  474. *
  475. * Get the groupid based on the name and return this ID
  476. */
  477. int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
  478. const char *name)
  479. {
  480. struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
  481. struct adreno_perfcount_group *group;
  482. int i;
  483. if (name == NULL)
  484. return -EINVAL;
  485. if (NULL == counters)
  486. return -EINVAL;
  487. for (i = 0; i < counters->group_count; ++i) {
  488. group = &(counters->groups[i]);
  489. if (!strcmp(group->name, name))
  490. return i;
  491. }
  492. return -EINVAL;
  493. }
  494. /**
  495. * adreno_perfcounter_get_name() - Get the group name
  496. * @adreno_dev: Adreno device
  497. * @groupid: Desired performance counter groupid
  498. *
  499. * Get the name based on the groupid and return it
  500. */
  501. const char *adreno_perfcounter_get_name(struct adreno_device *adreno_dev,
  502. unsigned int groupid)
  503. {
  504. struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
  505. if (NULL == counters)
  506. return NULL;
  507. if (groupid >= counters->group_count)
  508. return NULL;
  509. return counters->groups[groupid].name;
  510. }
  511. /**
  512. * adreno_perfcounter_query_group: Determine which countables are in counters
  513. * @adreno_dev: Adreno device to configure
  514. * @groupid: Desired performance counter group
  515. * @countables: Return list of all countables in the groups counters
  516. * @count: Max length of the array
  517. * @max_counters: max counters for the groupid
  518. *
  519. * Query the current state of counters for the group.
  520. */
  521. int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
  522. unsigned int groupid, unsigned int *countables, unsigned int count,
  523. unsigned int *max_counters)
  524. {
  525. struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
  526. struct kgsl_device *device = &adreno_dev->dev;
  527. struct adreno_perfcount_group *group;
  528. unsigned int i, t;
  529. int ret;
  530. unsigned int *buf;
  531. *max_counters = 0;
  532. if (NULL == counters)
  533. return -EINVAL;
  534. if (groupid >= counters->group_count)
  535. return -EINVAL;
  536. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  537. group = &(counters->groups[groupid]);
  538. *max_counters = group->reg_count;
  539. /*
  540. * if NULL countable or *count of zero, return max reg_count in
  541. * *max_counters and return success
  542. */
  543. if (countables == NULL || count == 0) {
  544. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  545. return 0;
  546. }
  547. t = min_t(unsigned int, group->reg_count, count);
  548. buf = kmalloc(t * sizeof(unsigned int), GFP_KERNEL);
  549. if (buf == NULL) {
  550. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  551. return -ENOMEM;
  552. }
  553. for (i = 0; i < t; i++)
  554. buf[i] = group->regs[i].countable;
  555. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  556. ret = copy_to_user(countables, buf, sizeof(unsigned int) * t);
  557. kfree(buf);
  558. return ret;
  559. }
  560. static inline void refcount_group(struct adreno_perfcount_group *group,
  561. unsigned int reg, unsigned int flags,
  562. unsigned int *lo, unsigned int *hi)
  563. {
  564. if (flags & PERFCOUNTER_FLAG_KERNEL)
  565. group->regs[reg].kernelcount++;
  566. else
  567. group->regs[reg].usercount++;
  568. if (lo)
  569. *lo = group->regs[reg].offset;
  570. if (hi)
  571. *hi = group->regs[reg].offset_hi;
  572. }
  573. /**
  574. * adreno_perfcounter_get: Try to put a countable in an available counter
  575. * @adreno_dev: Adreno device to configure
  576. * @groupid: Desired performance counter group
  577. * @countable: Countable desired to be in a counter
  578. * @offset: Return offset of the LO counter assigned
  579. * @offset_hi: Return offset of the HI counter assigned
  580. * @flags: Used to setup kernel perf counters
  581. *
  582. * Try to place a countable in an available counter. If the countable is
  583. * already in a counter, reference count the counter/countable pair resource
  584. * and return success
  585. */
  586. int adreno_perfcounter_get(struct adreno_device *adreno_dev,
  587. unsigned int groupid, unsigned int countable, unsigned int *offset,
  588. unsigned int *offset_hi, unsigned int flags)
  589. {
  590. struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
  591. struct adreno_perfcount_group *group;
  592. unsigned int empty = -1;
  593. int ret = 0;
  594. /* always clear return variables */
  595. if (offset)
  596. *offset = 0;
  597. if (offset_hi)
  598. *offset_hi = 0;
  599. if (NULL == counters)
  600. return -EINVAL;
  601. if (groupid >= counters->group_count)
  602. return -EINVAL;
  603. group = &(counters->groups[groupid]);
  604. if (group->flags & ADRENO_PERFCOUNTER_GROUP_FIXED) {
  605. /*
  606. * In fixed groups the countable equals the fixed register the
  607. * user wants. First make sure it is in range
  608. */
  609. if (countable >= group->reg_count)
  610. return -EINVAL;
  611. /* If it is already reserved, just increase the refcounts */
  612. if ((group->regs[countable].kernelcount != 0) ||
  613. (group->regs[countable].usercount != 0)) {
  614. refcount_group(group, countable, flags,
  615. offset, offset_hi);
  616. return 0;
  617. }
  618. empty = countable;
  619. } else {
  620. unsigned int i;
  621. /*
  622. * Check if the countable is already associated with a counter.
  623. * Refcount and return the offset, otherwise, try and find an
  624. * empty counter and assign the countable to it.
  625. */
  626. for (i = 0; i < group->reg_count; i++) {
  627. if (group->regs[i].countable == countable) {
  628. refcount_group(group, i, flags,
  629. offset, offset_hi);
  630. return 0;
  631. } else if (group->regs[i].countable ==
  632. KGSL_PERFCOUNTER_NOT_USED) {
  633. /* keep track of unused counter */
  634. empty = i;
  635. }
  636. }
  637. }
  638. /* no available counters, so do nothing else */
  639. if (empty == -1)
  640. return -EBUSY;
  641. /* enable the new counter */
  642. ret = adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
  643. countable);
  644. if (ret)
  645. return ret;
  646. /* initialize the new counter */
  647. group->regs[empty].countable = countable;
  648. /* set initial kernel and user count */
  649. if (flags & PERFCOUNTER_FLAG_KERNEL) {
  650. group->regs[empty].kernelcount = 1;
  651. group->regs[empty].usercount = 0;
  652. } else {
  653. group->regs[empty].kernelcount = 0;
  654. group->regs[empty].usercount = 1;
  655. }
  656. if (offset)
  657. *offset = group->regs[empty].offset;
  658. if (offset_hi)
  659. *offset_hi = group->regs[empty].offset_hi;
  660. return ret;
  661. }
  662. /**
  663. * adreno_perfcounter_put: Release a countable from counter resource
  664. * @adreno_dev: Adreno device to configure
  665. * @groupid: Desired performance counter group
  666. * @countable: Countable desired to be freed from a counter
  667. * @flags: Flag to determine if kernel or user space request
  668. *
  669. * Put a performance counter/countable pair that was previously received. If
  670. * noone else is using the countable, free up the counter for others.
  671. */
  672. int adreno_perfcounter_put(struct adreno_device *adreno_dev,
  673. unsigned int groupid, unsigned int countable, unsigned int flags)
  674. {
  675. struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
  676. struct adreno_perfcount_group *group;
  677. unsigned int i;
  678. if (NULL == counters)
  679. return -EINVAL;
  680. if (groupid >= counters->group_count)
  681. return -EINVAL;
  682. group = &(counters->groups[groupid]);
  683. /*
  684. * Find if the counter/countable pair is used currently.
  685. * Start cycling through registers in the bank.
  686. */
  687. for (i = 0; i < group->reg_count; i++) {
  688. /* check if countable assigned is what we are looking for */
  689. if (group->regs[i].countable == countable) {
  690. /* found pair, book keep count based on request type */
  691. if (flags & PERFCOUNTER_FLAG_KERNEL &&
  692. group->regs[i].kernelcount > 0)
  693. group->regs[i].kernelcount--;
  694. else if (group->regs[i].usercount > 0)
  695. group->regs[i].usercount--;
  696. else
  697. break;
  698. /* mark available if not used anymore */
  699. if (group->regs[i].kernelcount == 0 &&
  700. group->regs[i].usercount == 0)
  701. group->regs[i].countable =
  702. KGSL_PERFCOUNTER_NOT_USED;
  703. return 0;
  704. }
  705. }
  706. return -EINVAL;
  707. }
  708. /**
  709. * adreno_perfcounter_restore() - Restore performance counters
  710. * @adreno_dev: adreno device to configure
  711. *
  712. * Load the physical performance counters with 64 bit value which are
  713. * saved on GPU power collapse.
  714. */
  715. static inline void adreno_perfcounter_restore(struct adreno_device *adreno_dev)
  716. {
  717. if (adreno_dev->gpudev->perfcounter_restore)
  718. adreno_dev->gpudev->perfcounter_restore(adreno_dev);
  719. }
  720. /**
  721. * adreno_perfcounter_save() - Save performance counters
  722. * @adreno_dev: adreno device to configure
  723. *
  724. * Save the performance counter values before GPU power collapse.
  725. * The saved values are restored on restart.
  726. * This ensures physical counters are coherent across power-collapse.
  727. */
  728. static inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
  729. {
  730. if (adreno_dev->gpudev->perfcounter_save)
  731. adreno_dev->gpudev->perfcounter_save(adreno_dev);
  732. }
  733. static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
  734. {
  735. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  736. return adreno_dev->gpudev->irq_handler(adreno_dev);
  737. }
  738. static void adreno_cleanup_pt(struct kgsl_device *device,
  739. struct kgsl_pagetable *pagetable)
  740. {
  741. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  742. struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
  743. kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
  744. kgsl_mmu_unmap(pagetable, &device->memstore);
  745. kgsl_mmu_unmap(pagetable, &adreno_dev->pwron_fixup);
  746. kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
  747. kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
  748. }
  749. static int adreno_setup_pt(struct kgsl_device *device,
  750. struct kgsl_pagetable *pagetable)
  751. {
  752. int result;
  753. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  754. struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
  755. result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc);
  756. /*
  757. * ALERT: Order of these mapping is important to
  758. * Keep the most used entries like memstore
  759. * and mmu setstate memory by TLB prefetcher.
  760. */
  761. if (!result)
  762. result = kgsl_mmu_map_global(pagetable, &device->memstore);
  763. if (!result)
  764. result = kgsl_mmu_map_global(pagetable,
  765. &adreno_dev->pwron_fixup);
  766. if (!result)
  767. result = kgsl_mmu_map_global(pagetable,
  768. &device->mmu.setstate_memory);
  769. if (!result)
  770. result = kgsl_mmu_map_global(pagetable,
  771. &adreno_dev->profile.shared_buffer);
  772. if (result) {
  773. /* On error clean up what we have wrought */
  774. adreno_cleanup_pt(device, pagetable);
  775. return result;
  776. }
  777. /*
  778. * Set the mpu end to the last "normal" global memory we use.
  779. * For the IOMMU, this will be used to restrict access to the
  780. * mapped registers.
  781. */
  782. device->mh.mpu_range = adreno_dev->profile.shared_buffer.gpuaddr +
  783. adreno_dev->profile.shared_buffer.size;
  784. return 0;
  785. }
  786. static unsigned int _adreno_iommu_setstate_v0(struct kgsl_device *device,
  787. unsigned int *cmds_orig,
  788. phys_addr_t pt_val,
  789. int num_iommu_units, uint32_t flags)
  790. {
  791. phys_addr_t reg_pt_val;
  792. unsigned int *cmds = cmds_orig;
  793. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  794. int i;
  795. if (cpu_is_msm8960())
  796. cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
  797. device->mmu.setstate_memory.gpuaddr +
  798. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  799. else
  800. cmds += adreno_add_bank_change_cmds(cmds,
  801. KGSL_IOMMU_CONTEXT_USER,
  802. device->mmu.setstate_memory.gpuaddr +
  803. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  804. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  805. /* Acquire GPU-CPU sync Lock here */
  806. cmds += kgsl_mmu_sync_lock(&device->mmu, cmds);
  807. if (flags & KGSL_MMUFLAGS_PTUPDATE) {
  808. /*
  809. * We need to perfrom the following operations for all
  810. * IOMMU units
  811. */
  812. for (i = 0; i < num_iommu_units; i++) {
  813. reg_pt_val = kgsl_mmu_get_default_ttbr0(&device->mmu,
  814. i, KGSL_IOMMU_CONTEXT_USER);
  815. reg_pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  816. reg_pt_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
  817. /*
  818. * Set address of the new pagetable by writng to IOMMU
  819. * TTBR0 register
  820. */
  821. *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
  822. *cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
  823. KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0);
  824. *cmds++ = reg_pt_val;
  825. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  826. *cmds++ = 0x00000000;
  827. /*
  828. * Read back the ttbr0 register as a barrier to ensure
  829. * above writes have completed
  830. */
  831. cmds += adreno_add_read_cmds(device, cmds,
  832. kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
  833. KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0),
  834. reg_pt_val,
  835. device->mmu.setstate_memory.gpuaddr +
  836. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  837. }
  838. }
  839. if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
  840. /*
  841. * tlb flush
  842. */
  843. for (i = 0; i < num_iommu_units; i++) {
  844. reg_pt_val = (pt_val + kgsl_mmu_get_default_ttbr0(
  845. &device->mmu,
  846. i, KGSL_IOMMU_CONTEXT_USER));
  847. reg_pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  848. reg_pt_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
  849. *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
  850. *cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
  851. KGSL_IOMMU_CONTEXT_USER,
  852. KGSL_IOMMU_CTX_TLBIALL);
  853. *cmds++ = 1;
  854. cmds += __adreno_add_idle_indirect_cmds(cmds,
  855. device->mmu.setstate_memory.gpuaddr +
  856. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  857. cmds += adreno_add_read_cmds(device, cmds,
  858. kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
  859. KGSL_IOMMU_CONTEXT_USER,
  860. KGSL_IOMMU_CTX_TTBR0),
  861. reg_pt_val,
  862. device->mmu.setstate_memory.gpuaddr +
  863. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  864. }
  865. }
  866. /* Release GPU-CPU sync Lock here */
  867. cmds += kgsl_mmu_sync_unlock(&device->mmu, cmds);
  868. if (cpu_is_msm8960())
  869. cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
  870. kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
  871. 0, KGSL_IOMMU_GLOBAL_BASE),
  872. device->mmu.setstate_memory.gpuaddr +
  873. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  874. else
  875. cmds += adreno_add_bank_change_cmds(cmds,
  876. KGSL_IOMMU_CONTEXT_PRIV,
  877. device->mmu.setstate_memory.gpuaddr +
  878. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  879. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  880. return cmds - cmds_orig;
  881. }
  882. static unsigned int _adreno_iommu_setstate_v1(struct kgsl_device *device,
  883. unsigned int *cmds_orig,
  884. phys_addr_t pt_val,
  885. int num_iommu_units, uint32_t flags)
  886. {
  887. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  888. phys_addr_t ttbr0_val;
  889. unsigned int reg_pt_val;
  890. unsigned int *cmds = cmds_orig;
  891. int i;
  892. unsigned int ttbr0, tlbiall, tlbstatus, tlbsync, mmu_ctrl;
  893. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  894. for (i = 0; i < num_iommu_units; i++) {
  895. ttbr0_val = kgsl_mmu_get_default_ttbr0(&device->mmu,
  896. i, KGSL_IOMMU_CONTEXT_USER);
  897. ttbr0_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  898. ttbr0_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
  899. if (flags & KGSL_MMUFLAGS_PTUPDATE) {
  900. mmu_ctrl = kgsl_mmu_get_reg_ahbaddr(
  901. &device->mmu, i,
  902. KGSL_IOMMU_CONTEXT_USER,
  903. KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL) >> 2;
  904. ttbr0 = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
  905. KGSL_IOMMU_CONTEXT_USER,
  906. KGSL_IOMMU_CTX_TTBR0) >> 2;
  907. if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
  908. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  909. *cmds++ = 0;
  910. /*
  911. * glue commands together until next
  912. * WAIT_FOR_ME
  913. */
  914. cmds += adreno_wait_reg_eq(cmds,
  915. adreno_getreg(adreno_dev,
  916. ADRENO_REG_CP_WFI_PEND_CTR),
  917. 1, 0xFFFFFFFF, 0xF);
  918. /* set the iommu lock bit */
  919. *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
  920. *cmds++ = mmu_ctrl;
  921. /* AND to unmask the lock bit */
  922. *cmds++ =
  923. ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
  924. /* OR to set the IOMMU lock bit */
  925. *cmds++ =
  926. KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT;
  927. /* wait for smmu to lock */
  928. cmds += adreno_wait_reg_eq(cmds, mmu_ctrl,
  929. KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE,
  930. KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE, 0xF);
  931. }
  932. /* set ttbr0 */
  933. if (sizeof(phys_addr_t) > sizeof(unsigned long)) {
  934. reg_pt_val = ttbr0_val & 0xFFFFFFFF;
  935. *cmds++ = cp_type0_packet(ttbr0, 1);
  936. *cmds++ = reg_pt_val;
  937. reg_pt_val = (unsigned int)
  938. ((ttbr0_val & 0xFFFFFFFF00000000ULL) >> 32);
  939. *cmds++ = cp_type0_packet(ttbr0 + 1, 1);
  940. *cmds++ = reg_pt_val;
  941. } else {
  942. reg_pt_val = ttbr0_val;
  943. *cmds++ = cp_type0_packet(ttbr0, 1);
  944. *cmds++ = reg_pt_val;
  945. }
  946. if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
  947. /* unlock the IOMMU lock */
  948. *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
  949. *cmds++ = mmu_ctrl;
  950. /* AND to unmask the lock bit */
  951. *cmds++ =
  952. ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
  953. /* OR with 0 so lock bit is unset */
  954. *cmds++ = 0;
  955. /* release all commands with wait_for_me */
  956. *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
  957. *cmds++ = 0;
  958. }
  959. }
  960. if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
  961. tlbiall = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
  962. KGSL_IOMMU_CONTEXT_USER,
  963. KGSL_IOMMU_CTX_TLBIALL) >> 2;
  964. *cmds++ = cp_type0_packet(tlbiall, 1);
  965. *cmds++ = 1;
  966. tlbsync = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
  967. KGSL_IOMMU_CONTEXT_USER,
  968. KGSL_IOMMU_CTX_TLBSYNC) >> 2;
  969. *cmds++ = cp_type0_packet(tlbsync, 1);
  970. *cmds++ = 0;
  971. tlbstatus = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
  972. KGSL_IOMMU_CONTEXT_USER,
  973. KGSL_IOMMU_CTX_TLBSTATUS) >> 2;
  974. cmds += adreno_wait_reg_eq(cmds, tlbstatus, 0,
  975. KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
  976. /* release all commands with wait_for_me */
  977. *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
  978. *cmds++ = 0;
  979. }
  980. }
  981. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  982. return cmds - cmds_orig;
  983. }
  984. /**
  985. * adreno_use_default_setstate() - Use CPU instead of the GPU to manage the mmu?
  986. * @adreno_dev: the device
  987. *
  988. * In many cases it is preferable to poke the iommu or gpummu directly rather
  989. * than using the GPU command stream. If we are idle or trying to go to a low
  990. * power state, using the command stream will be slower and asynchronous, which
  991. * needlessly complicates the power state transitions. Additionally,
  992. * the hardware simulators do not support command stream MMU operations so
  993. * the command stream can never be used if we are capturing CFF data.
  994. *
  995. */
  996. static bool adreno_use_default_setstate(struct adreno_device *adreno_dev)
  997. {
  998. return (adreno_isidle(&adreno_dev->dev) ||
  999. KGSL_STATE_ACTIVE != adreno_dev->dev.state ||
  1000. atomic_read(&adreno_dev->dev.active_cnt) == 0 ||
  1001. adreno_dev->dev.cff_dump_enable);
  1002. }
  1003. static int adreno_iommu_setstate(struct kgsl_device *device,
  1004. unsigned int context_id,
  1005. uint32_t flags)
  1006. {
  1007. phys_addr_t pt_val;
  1008. unsigned int *link = NULL, *cmds;
  1009. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1010. int num_iommu_units;
  1011. struct kgsl_context *context;
  1012. struct adreno_context *adreno_ctx = NULL;
  1013. struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
  1014. unsigned int result;
  1015. if (adreno_use_default_setstate(adreno_dev)) {
  1016. kgsl_mmu_device_setstate(&device->mmu, flags);
  1017. return 0;
  1018. }
  1019. num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
  1020. context = kgsl_context_get(device, context_id);
  1021. if (!context) {
  1022. kgsl_mmu_device_setstate(&device->mmu, flags);
  1023. return 0;
  1024. }
  1025. adreno_ctx = ADRENO_CONTEXT(context);
  1026. link = kmalloc(PAGE_SIZE, GFP_KERNEL);
  1027. if (link == NULL) {
  1028. result = -ENOMEM;
  1029. goto done;
  1030. }
  1031. cmds = link;
  1032. result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
  1033. if (result)
  1034. goto done;
  1035. pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
  1036. device->mmu.hwpagetable);
  1037. cmds += __adreno_add_idle_indirect_cmds(cmds,
  1038. device->mmu.setstate_memory.gpuaddr +
  1039. KGSL_IOMMU_SETSTATE_NOP_OFFSET);
  1040. if (msm_soc_version_supports_iommu_v0())
  1041. cmds += _adreno_iommu_setstate_v0(device, cmds, pt_val,
  1042. num_iommu_units, flags);
  1043. else
  1044. cmds += _adreno_iommu_setstate_v1(device, cmds, pt_val,
  1045. num_iommu_units, flags);
  1046. /* invalidate all base pointers */
  1047. *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
  1048. *cmds++ = 0x7fff;
  1049. if ((unsigned int) (cmds - link) > (PAGE_SIZE / sizeof(unsigned int))) {
  1050. KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
  1051. BUG();
  1052. }
  1053. /*
  1054. * This returns the per context timestamp but we need to
  1055. * use the global timestamp for iommu clock disablement
  1056. */
  1057. result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
  1058. KGSL_CMD_FLAGS_PMODE, link,
  1059. (unsigned int)(cmds - link));
  1060. /*
  1061. * On error disable the IOMMU clock right away otherwise turn it off
  1062. * after the command has been retired
  1063. */
  1064. if (result)
  1065. kgsl_mmu_disable_clk(&device->mmu,
  1066. KGSL_IOMMU_CONTEXT_USER);
  1067. else
  1068. kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts,
  1069. KGSL_IOMMU_CONTEXT_USER);
  1070. done:
  1071. kfree(link);
  1072. kgsl_context_put(context);
  1073. return result;
  1074. }
  1075. static int adreno_gpummu_setstate(struct kgsl_device *device,
  1076. unsigned int context_id,
  1077. uint32_t flags)
  1078. {
  1079. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1080. unsigned int link[32];
  1081. unsigned int *cmds = &link[0];
  1082. int sizedwords = 0;
  1083. unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
  1084. struct kgsl_context *context;
  1085. struct adreno_context *adreno_ctx = NULL;
  1086. int ret = 0;
  1087. /*
  1088. * Fix target freeze issue by adding TLB flush for each submit
  1089. * on A20X based targets.
  1090. */
  1091. if (adreno_is_a20x(adreno_dev))
  1092. flags |= KGSL_MMUFLAGS_TLBFLUSH;
  1093. /*
  1094. * If possible, then set the state via the command stream to avoid
  1095. * a CPU idle. Otherwise, use the default setstate which uses register
  1096. * writes For CFF dump we must idle and use the registers so that it is
  1097. * easier to filter out the mmu accesses from the dump
  1098. */
  1099. if (!adreno_use_default_setstate(adreno_dev)) {
  1100. context = kgsl_context_get(device, context_id);
  1101. if (context == NULL)
  1102. return -EINVAL;
  1103. adreno_ctx = ADRENO_CONTEXT(context);
  1104. if (flags & KGSL_MMUFLAGS_PTUPDATE) {
  1105. /* wait for graphics pipe to be idle */
  1106. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  1107. *cmds++ = 0x00000000;
  1108. /* set page table base */
  1109. *cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
  1110. *cmds++ = kgsl_mmu_get_pt_base_addr(&device->mmu,
  1111. device->mmu.hwpagetable);
  1112. sizedwords += 4;
  1113. }
  1114. if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
  1115. if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
  1116. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE,
  1117. 1);
  1118. *cmds++ = 0x00000000;
  1119. sizedwords += 2;
  1120. }
  1121. *cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1);
  1122. *cmds++ = mh_mmu_invalidate;
  1123. sizedwords += 2;
  1124. }
  1125. if (flags & KGSL_MMUFLAGS_PTUPDATE &&
  1126. adreno_is_a20x(adreno_dev)) {
  1127. /* HW workaround: to resolve MMU page fault interrupts
  1128. * caused by the VGT.It prevents the CP PFP from filling
  1129. * the VGT DMA request fifo too early,thereby ensuring
  1130. * that the VGT will not fetch vertex/bin data until
  1131. * after the page table base register has been updated.
  1132. *
  1133. * Two null DRAW_INDX_BIN packets are inserted right
  1134. * after the page table base update, followed by a
  1135. * wait for idle. The null packets will fill up the
  1136. * VGT DMA request fifo and prevent any further
  1137. * vertex/bin updates from occurring until the wait
  1138. * has finished. */
  1139. *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
  1140. *cmds++ = (0x4 << 16) |
  1141. (REG_PA_SU_SC_MODE_CNTL - 0x2000);
  1142. *cmds++ = 0; /* disable faceness generation */
  1143. *cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
  1144. *cmds++ = device->mmu.setstate_memory.gpuaddr;
  1145. *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
  1146. *cmds++ = 0; /* viz query info */
  1147. *cmds++ = 0x0003C004; /* draw indicator */
  1148. *cmds++ = 0; /* bin base */
  1149. *cmds++ = 3; /* bin size */
  1150. *cmds++ =
  1151. device->mmu.setstate_memory.gpuaddr; /* dma base */
  1152. *cmds++ = 6; /* dma size */
  1153. *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
  1154. *cmds++ = 0; /* viz query info */
  1155. *cmds++ = 0x0003C004; /* draw indicator */
  1156. *cmds++ = 0; /* bin base */
  1157. *cmds++ = 3; /* bin size */
  1158. /* dma base */
  1159. *cmds++ = device->mmu.setstate_memory.gpuaddr;
  1160. *cmds++ = 6; /* dma size */
  1161. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  1162. *cmds++ = 0x00000000;
  1163. sizedwords += 21;
  1164. }
  1165. if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
  1166. *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
  1167. *cmds++ = 0x7fff; /* invalidate all base pointers */
  1168. sizedwords += 2;
  1169. }
  1170. ret = adreno_ringbuffer_issuecmds(device, adreno_ctx,
  1171. KGSL_CMD_FLAGS_PMODE,
  1172. &link[0], sizedwords);
  1173. kgsl_context_put(context);
  1174. } else {
  1175. kgsl_mmu_device_setstate(&device->mmu, flags);
  1176. }
  1177. return ret;
  1178. }
  1179. static int adreno_setstate(struct kgsl_device *device,
  1180. unsigned int context_id,
  1181. uint32_t flags)
  1182. {
  1183. /* call the mmu specific handler */
  1184. if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
  1185. return adreno_gpummu_setstate(device, context_id, flags);
  1186. else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
  1187. return adreno_iommu_setstate(device, context_id, flags);
  1188. return 0;
  1189. }
  1190. static unsigned int
  1191. a3xx_getchipid(struct kgsl_device *device)
  1192. {
  1193. struct kgsl_device_platform_data *pdata =
  1194. kgsl_device_get_drvdata(device);
  1195. /*
  1196. * All current A3XX chipids are detected at the SOC level. Leave this
  1197. * function here to support any future GPUs that have working
  1198. * chip ID registers
  1199. */
  1200. return pdata->chipid;
  1201. }
  1202. static unsigned int
  1203. a2xx_getchipid(struct kgsl_device *device)
  1204. {
  1205. unsigned int chipid = 0;
  1206. unsigned int coreid, majorid, minorid, patchid, revid;
  1207. struct kgsl_device_platform_data *pdata =
  1208. kgsl_device_get_drvdata(device);
  1209. /* If the chip id is set at the platform level, then just use that */
  1210. if (pdata->chipid != 0)
  1211. return pdata->chipid;
  1212. kgsl_regread(device, REG_RBBM_PERIPHID1, &coreid);
  1213. kgsl_regread(device, REG_RBBM_PERIPHID2, &majorid);
  1214. kgsl_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
  1215. /*
  1216. * adreno 22x gpus are indicated by coreid 2,
  1217. * but REG_RBBM_PERIPHID1 always contains 0 for this field
  1218. */
  1219. if (cpu_is_msm8x60())
  1220. chipid = 2 << 24;
  1221. else
  1222. chipid = (coreid & 0xF) << 24;
  1223. chipid |= ((majorid >> 4) & 0xF) << 16;
  1224. minorid = ((revid >> 0) & 0xFF);
  1225. patchid = ((revid >> 16) & 0xFF);
  1226. /* 8x50 returns 0 for patch release, but it should be 1 */
  1227. /* 8x25 returns 0 for minor id, but it should be 1 */
  1228. if (cpu_is_qsd8x50())
  1229. patchid = 1;
  1230. else if ((cpu_is_msm8625() || cpu_is_msm8625q()) && minorid == 0)
  1231. minorid = 1;
  1232. chipid |= (minorid << 8) | patchid;
  1233. return chipid;
  1234. }
  1235. static unsigned int
  1236. adreno_getchipid(struct kgsl_device *device)
  1237. {
  1238. struct kgsl_device_platform_data *pdata =
  1239. kgsl_device_get_drvdata(device);
  1240. /*
  1241. * All A3XX chipsets will have pdata set, so assume !pdata->chipid is
  1242. * an A2XX processor
  1243. */
  1244. if (pdata->chipid == 0 || ADRENO_CHIPID_MAJOR(pdata->chipid) == 2)
  1245. return a2xx_getchipid(device);
  1246. else
  1247. return a3xx_getchipid(device);
  1248. }
  1249. static inline bool _rev_match(unsigned int id, unsigned int entry)
  1250. {
  1251. return (entry == ANY_ID || entry == id);
  1252. }
  1253. static void
  1254. adreno_identify_gpu(struct adreno_device *adreno_dev)
  1255. {
  1256. unsigned int i, core, major, minor, patchid;
  1257. adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
  1258. core = ADRENO_CHIPID_CORE(adreno_dev->chip_id);
  1259. major = ADRENO_CHIPID_MAJOR(adreno_dev->chip_id);
  1260. minor = ADRENO_CHIPID_MINOR(adreno_dev->chip_id);
  1261. patchid = ADRENO_CHIPID_PATCH(adreno_dev->chip_id);
  1262. for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
  1263. if (core == adreno_gpulist[i].core &&
  1264. _rev_match(major, adreno_gpulist[i].major) &&
  1265. _rev_match(minor, adreno_gpulist[i].minor) &&
  1266. _rev_match(patchid, adreno_gpulist[i].patchid))
  1267. break;
  1268. }
  1269. if (i == ARRAY_SIZE(adreno_gpulist)) {
  1270. adreno_dev->gpurev = ADRENO_REV_UNKNOWN;
  1271. return;
  1272. }
  1273. adreno_dev->gpurev = adreno_gpulist[i].gpurev;
  1274. adreno_dev->gpudev = adreno_gpulist[i].gpudev;
  1275. adreno_dev->pfp_fwfile = adreno_gpulist[i].pfpfw;
  1276. adreno_dev->pm4_fwfile = adreno_gpulist[i].pm4fw;
  1277. adreno_dev->istore_size = adreno_gpulist[i].istore_size;
  1278. adreno_dev->pix_shader_start = adreno_gpulist[i].pix_shader_start;
  1279. adreno_dev->instruction_size = adreno_gpulist[i].instruction_size;
  1280. adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
  1281. adreno_dev->pm4_jt_idx = adreno_gpulist[i].pm4_jt_idx;
  1282. adreno_dev->pm4_jt_addr = adreno_gpulist[i].pm4_jt_addr;
  1283. adreno_dev->pm4_bstrp_size = adreno_gpulist[i].pm4_bstrp_size;
  1284. adreno_dev->pfp_jt_idx = adreno_gpulist[i].pfp_jt_idx;
  1285. adreno_dev->pfp_jt_addr = adreno_gpulist[i].pfp_jt_addr;
  1286. adreno_dev->pfp_bstrp_size = adreno_gpulist[i].pfp_bstrp_size;
  1287. adreno_dev->pfp_bstrp_ver = adreno_gpulist[i].pfp_bstrp_ver;
  1288. adreno_dev->gpulist_index = i;
  1289. /*
  1290. * Initialize uninitialzed gpu registers, only needs to be done once
  1291. * Make all offsets that are not initialized to ADRENO_REG_UNUSED
  1292. */
  1293. for (i = 0; i < ADRENO_REG_REGISTER_MAX; i++) {
  1294. if (adreno_dev->gpudev->reg_offsets->offset_0 != i &&
  1295. !adreno_dev->gpudev->reg_offsets->offsets[i]) {
  1296. adreno_dev->gpudev->reg_offsets->offsets[i] =
  1297. ADRENO_REG_UNUSED;
  1298. }
  1299. }
  1300. }
  1301. static struct platform_device_id adreno_id_table[] = {
  1302. { DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
  1303. {},
  1304. };
  1305. MODULE_DEVICE_TABLE(platform, adreno_id_table);
  1306. static struct of_device_id adreno_match_table[] = {
  1307. { .compatible = "qcom,kgsl-3d0", },
  1308. {}
  1309. };
  1310. static inline int adreno_of_read_property(struct device_node *node,
  1311. const char *prop, unsigned int *ptr)
  1312. {
  1313. int ret = of_property_read_u32(node, prop, ptr);
  1314. if (ret)
  1315. KGSL_CORE_ERR("Unable to read '%s'\n", prop);
  1316. return ret;
  1317. }
  1318. static struct device_node *adreno_of_find_subnode(struct device_node *parent,
  1319. const char *name)
  1320. {
  1321. struct device_node *child;
  1322. for_each_child_of_node(parent, child) {
  1323. if (of_device_is_compatible(child, name))
  1324. return child;
  1325. }
  1326. return NULL;
  1327. }
  1328. static int adreno_of_get_pwrlevels(struct device_node *parent,
  1329. struct kgsl_device_platform_data *pdata)
  1330. {
  1331. struct device_node *node, *child;
  1332. int ret = -EINVAL;
  1333. node = adreno_of_find_subnode(parent, "qcom,gpu-pwrlevels");
  1334. if (node == NULL) {
  1335. KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
  1336. return -EINVAL;
  1337. }
  1338. pdata->num_levels = 0;
  1339. for_each_child_of_node(node, child) {
  1340. unsigned int index;
  1341. struct kgsl_pwrlevel *level;
  1342. if (adreno_of_read_property(child, "reg", &index))
  1343. goto done;
  1344. if (index >= KGSL_MAX_PWRLEVELS) {
  1345. KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
  1346. index);
  1347. continue;
  1348. }
  1349. if (index >= pdata->num_levels)
  1350. pdata->num_levels = index + 1;
  1351. level = &pdata->pwrlevel[index];
  1352. if (adreno_of_read_property(child, "qcom,gpu-freq",
  1353. &level->gpu_freq))
  1354. goto done;
  1355. if (adreno_of_read_property(child, "qcom,bus-freq",
  1356. &level->bus_freq))
  1357. goto done;
  1358. if (adreno_of_read_property(child, "qcom,io-fraction",
  1359. &level->io_fraction))
  1360. level->io_fraction = 0;
  1361. }
  1362. if (adreno_of_read_property(parent, "qcom,initial-pwrlevel",
  1363. &pdata->init_level))
  1364. pdata->init_level = 1;
  1365. if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
  1366. KGSL_CORE_ERR("Initial power level out of range\n");
  1367. pdata->init_level = 1;
  1368. }
  1369. ret = 0;
  1370. done:
  1371. return ret;
  1372. }
  1373. static int adreno_of_get_iommu(struct device_node *parent,
  1374. struct kgsl_device_platform_data *pdata)
  1375. {
  1376. struct device_node *node, *child;
  1377. struct kgsl_device_iommu_data *data = NULL;
  1378. struct kgsl_iommu_ctx *ctxs = NULL;
  1379. u32 reg_val[2];
  1380. int ctx_index = 0;
  1381. node = of_parse_phandle(parent, "iommu", 0);
  1382. if (node == NULL)
  1383. return -EINVAL;
  1384. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1385. if (data == NULL) {
  1386. KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*data));
  1387. goto err;
  1388. }
  1389. if (of_property_read_u32_array(node, "reg", reg_val, 2))
  1390. goto err;
  1391. data->physstart = reg_val[0];
  1392. data->physend = data->physstart + reg_val[1] - 1;
  1393. data->iommu_halt_enable = of_property_read_bool(node,
  1394. "qcom,iommu-enable-halt");
  1395. data->iommu_ctx_count = 0;
  1396. for_each_child_of_node(node, child)
  1397. data->iommu_ctx_count++;
  1398. ctxs = kzalloc(data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx),
  1399. GFP_KERNEL);
  1400. if (ctxs == NULL) {
  1401. KGSL_CORE_ERR("kzalloc(%d) failed\n",
  1402. data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx));
  1403. goto err;
  1404. }
  1405. for_each_child_of_node(node, child) {
  1406. int ret = of_property_read_string(child, "label",
  1407. &ctxs[ctx_index].iommu_ctx_name);
  1408. if (ret) {
  1409. KGSL_CORE_ERR("Unable to read KGSL IOMMU 'label'\n");
  1410. goto err;
  1411. }
  1412. if (!strcmp("gfx3d_user", ctxs[ctx_index].iommu_ctx_name)) {
  1413. ctxs[ctx_index].ctx_id = 0;
  1414. } else if (!strcmp("gfx3d_priv",
  1415. ctxs[ctx_index].iommu_ctx_name)) {
  1416. ctxs[ctx_index].ctx_id = 1;
  1417. } else if (!strcmp("gfx3d_spare",
  1418. ctxs[ctx_index].iommu_ctx_name)) {
  1419. ctxs[ctx_index].ctx_id = 2;
  1420. } else {
  1421. KGSL_CORE_ERR("dt: IOMMU context %s is invalid\n",
  1422. ctxs[ctx_index].iommu_ctx_name);
  1423. goto err;
  1424. }
  1425. ctx_index++;
  1426. }
  1427. data->iommu_ctxs = ctxs;
  1428. pdata->iommu_data = data;
  1429. pdata->iommu_count = 1;
  1430. return 0;
  1431. err:
  1432. kfree(ctxs);
  1433. kfree(data);
  1434. return -EINVAL;
  1435. }
  1436. static int adreno_of_get_pdata(struct platform_device *pdev)
  1437. {
  1438. struct kgsl_device_platform_data *pdata = NULL;
  1439. struct kgsl_device *device;
  1440. int ret = -EINVAL;
  1441. pdev->id_entry = adreno_id_table;
  1442. pdata = pdev->dev.platform_data;
  1443. if (pdata)
  1444. return 0;
  1445. if (of_property_read_string(pdev->dev.of_node, "label", &pdev->name)) {
  1446. KGSL_CORE_ERR("Unable to read 'label'\n");
  1447. goto err;
  1448. }
  1449. if (adreno_of_read_property(pdev->dev.of_node, "qcom,id", &pdev->id))
  1450. goto err;
  1451. pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
  1452. if (pdata == NULL) {
  1453. KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
  1454. ret = -ENOMEM;
  1455. goto err;
  1456. }
  1457. if (adreno_of_read_property(pdev->dev.of_node, "qcom,chipid",
  1458. &pdata->chipid))
  1459. goto err;
  1460. /* pwrlevel Data */
  1461. ret = adreno_of_get_pwrlevels(pdev->dev.of_node, pdata);
  1462. if (ret)
  1463. goto err;
  1464. /* get pm-qos-latency from target, set it to default if not found */
  1465. if (adreno_of_read_property(pdev->dev.of_node, "qcom,pm-qos-latency",
  1466. &pdata->pm_qos_latency))
  1467. pdata->pm_qos_latency = 501;
  1468. if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
  1469. &pdata->idle_timeout))
  1470. pdata->idle_timeout = 80;
  1471. pdata->strtstp_sleepwake = of_property_read_bool(pdev->dev.of_node,
  1472. "qcom,strtstp-sleepwake");
  1473. pdata->bus_control = of_property_read_bool(pdev->dev.of_node,
  1474. "qcom,bus-control");
  1475. if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
  1476. &pdata->clk_map))
  1477. goto err;
  1478. device = (struct kgsl_device *)pdev->id_entry->driver_data;
  1479. if (device->id != KGSL_DEVICE_3D0)
  1480. goto err;
  1481. /* Bus Scale Data */
  1482. pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
  1483. if (IS_ERR_OR_NULL(pdata->bus_scale_table)) {
  1484. ret = PTR_ERR(pdata->bus_scale_table);
  1485. if (!ret)
  1486. ret = -EINVAL;
  1487. goto err;
  1488. }
  1489. ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
  1490. if (ret)
  1491. goto err;
  1492. pdev->dev.platform_data = pdata;
  1493. return 0;
  1494. err:
  1495. if (pdata) {
  1496. if (pdata->iommu_data)
  1497. kfree(pdata->iommu_data->iommu_ctxs);
  1498. kfree(pdata->iommu_data);
  1499. }
  1500. kfree(pdata);
  1501. return ret;
  1502. }
  1503. #ifdef CONFIG_MSM_OCMEM
  1504. static int
  1505. adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
  1506. {
  1507. if (!(adreno_is_a330(adreno_dev) ||
  1508. adreno_is_a305b(adreno_dev)))
  1509. return 0;
  1510. /* OCMEM is only needed once, do not support consective allocation */
  1511. if (adreno_dev->ocmem_hdl != NULL)
  1512. return 0;
  1513. adreno_dev->ocmem_hdl =
  1514. ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
  1515. if (adreno_dev->ocmem_hdl == NULL)
  1516. return -ENOMEM;
  1517. adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
  1518. adreno_dev->ocmem_base = adreno_dev->ocmem_hdl->addr;
  1519. return 0;
  1520. }
  1521. static void
  1522. adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
  1523. {
  1524. if (!(adreno_is_a330(adreno_dev) ||
  1525. adreno_is_a305b(adreno_dev)))
  1526. return;
  1527. if (adreno_dev->ocmem_hdl == NULL)
  1528. return;
  1529. ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
  1530. adreno_dev->ocmem_hdl = NULL;
  1531. }
  1532. #else
  1533. static int
  1534. adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
  1535. {
  1536. return 0;
  1537. }
  1538. static void
  1539. adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
  1540. {
  1541. }
  1542. #endif
  1543. static int __devinit
  1544. adreno_probe(struct platform_device *pdev)
  1545. {
  1546. struct kgsl_device *device;
  1547. struct kgsl_device_platform_data *pdata = NULL;
  1548. struct adreno_device *adreno_dev;
  1549. int status = -EINVAL;
  1550. bool is_dt;
  1551. is_dt = of_match_device(adreno_match_table, &pdev->dev);
  1552. if (is_dt && pdev->dev.of_node) {
  1553. status = adreno_of_get_pdata(pdev);
  1554. if (status)
  1555. goto error_return;
  1556. }
  1557. device = (struct kgsl_device *)pdev->id_entry->driver_data;
  1558. adreno_dev = ADRENO_DEVICE(device);
  1559. device->parentdev = &pdev->dev;
  1560. status = adreno_ringbuffer_init(device);
  1561. if (status != 0)
  1562. goto error;
  1563. status = kgsl_device_platform_probe(device);
  1564. if (status)
  1565. goto error_close_rb;
  1566. status = adreno_dispatcher_init(adreno_dev);
  1567. if (status)
  1568. goto error_close_device;
  1569. adreno_debugfs_init(device);
  1570. adreno_profile_init(device);
  1571. adreno_ft_init_sysfs(device);
  1572. kgsl_pwrscale_init(&pdev->dev, CONFIG_MSM_ADRENO_DEFAULT_GOVERNOR);
  1573. device->flags &= ~KGSL_FLAGS_SOFT_RESET;
  1574. pdata = kgsl_device_get_drvdata(device);
  1575. adreno_input_handler.private = device;
  1576. /*
  1577. * It isn't fatal if we cannot register the input handler. Sad,
  1578. * perhaps, but not fatal
  1579. */
  1580. if (input_register_handler(&adreno_input_handler))
  1581. KGSL_DRV_ERR(device, "Unable to register the input handler\n");
  1582. return 0;
  1583. error_close_device:
  1584. kgsl_device_platform_remove(device);
  1585. error_close_rb:
  1586. adreno_ringbuffer_close(&adreno_dev->ringbuffer);
  1587. error:
  1588. device->parentdev = NULL;
  1589. error_return:
  1590. return status;
  1591. }
  1592. static int __devexit adreno_remove(struct platform_device *pdev)
  1593. {
  1594. struct kgsl_device *device;
  1595. struct adreno_device *adreno_dev;
  1596. device = (struct kgsl_device *)pdev->id_entry->driver_data;
  1597. adreno_dev = ADRENO_DEVICE(device);
  1598. input_unregister_handler(&adreno_input_handler);
  1599. adreno_profile_close(device);
  1600. kgsl_pwrscale_close(device);
  1601. adreno_dispatcher_close(adreno_dev);
  1602. adreno_ringbuffer_close(&adreno_dev->ringbuffer);
  1603. adreno_perfcounter_close(device);
  1604. kgsl_device_platform_remove(device);
  1605. clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
  1606. return 0;
  1607. }
  1608. static int adreno_init(struct kgsl_device *device)
  1609. {
  1610. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1611. int i;
  1612. int ret;
  1613. kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
  1614. /*
  1615. * initialization only needs to be done once initially until
  1616. * device is shutdown
  1617. */
  1618. if (test_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv))
  1619. return 0;
  1620. /* Power up the device */
  1621. kgsl_pwrctrl_enable(device);
  1622. /* Identify the specific GPU */
  1623. adreno_identify_gpu(adreno_dev);
  1624. if (adreno_ringbuffer_read_pm4_ucode(device)) {
  1625. KGSL_DRV_ERR(device, "Reading pm4 microcode failed %s\n",
  1626. adreno_dev->pm4_fwfile);
  1627. BUG_ON(1);
  1628. }
  1629. if (adreno_ringbuffer_read_pfp_ucode(device)) {
  1630. KGSL_DRV_ERR(device, "Reading pfp microcode failed %s\n",
  1631. adreno_dev->pfp_fwfile);
  1632. BUG_ON(1);
  1633. }
  1634. if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) {
  1635. KGSL_DRV_ERR(device, "Unknown chip ID %x\n",
  1636. adreno_dev->chip_id);
  1637. BUG_ON(1);
  1638. }
  1639. kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
  1640. /*
  1641. * Check if firmware supports the sync lock PM4 packets needed
  1642. * for IOMMUv1
  1643. */
  1644. if ((adreno_dev->pm4_fw_version >=
  1645. adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pm4_ver) &&
  1646. (adreno_dev->pfp_fw_version >=
  1647. adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pfp_ver))
  1648. device->mmu.flags |= KGSL_MMU_FLAGS_IOMMU_SYNC;
  1649. /* Initialize ft detection register offsets */
  1650. ft_detect_regs[0] = adreno_getreg(adreno_dev,
  1651. ADRENO_REG_RBBM_STATUS);
  1652. ft_detect_regs[1] = adreno_getreg(adreno_dev,
  1653. ADRENO_REG_CP_RB_RPTR);
  1654. ft_detect_regs[2] = adreno_getreg(adreno_dev,
  1655. ADRENO_REG_CP_IB1_BASE);
  1656. ft_detect_regs[3] = adreno_getreg(adreno_dev,
  1657. ADRENO_REG_CP_IB1_BUFSZ);
  1658. ft_detect_regs[4] = adreno_getreg(adreno_dev,
  1659. ADRENO_REG_CP_IB2_BASE);
  1660. ft_detect_regs[5] = adreno_getreg(adreno_dev,
  1661. ADRENO_REG_CP_IB2_BUFSZ);
  1662. for (i = 6; i < FT_DETECT_REGS_COUNT; i++)
  1663. ft_detect_regs[i] = 0;
  1664. /* turn on hang interrupt for a330v2 by default */
  1665. if (adreno_is_a330v2(adreno_dev))
  1666. set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
  1667. ret = adreno_perfcounter_init(device);
  1668. if (ret)
  1669. goto done;
  1670. /* Power down the device */
  1671. kgsl_pwrctrl_disable(device);
  1672. /* Enable the power on shader corruption fix for all A3XX targets */
  1673. if (adreno_is_a3xx(adreno_dev))
  1674. adreno_a3xx_pwron_fixup_init(adreno_dev);
  1675. set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
  1676. done:
  1677. return ret;
  1678. }
  1679. /**
  1680. * _adreno_start - Power up the GPU and prepare to accept commands
  1681. * @adreno_dev: Pointer to an adreno_device structure
  1682. *
  1683. * The core function that powers up and initalizes the GPU. This function is
  1684. * called at init and after coming out of SLUMBER
  1685. */
  1686. static int _adreno_start(struct adreno_device *adreno_dev)
  1687. {
  1688. struct kgsl_device *device = &adreno_dev->dev;
  1689. int status = -EINVAL;
  1690. unsigned int state = device->state;
  1691. unsigned int regulator_left_on = 0;
  1692. kgsl_cffdump_open(device);
  1693. kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
  1694. regulator_left_on = (regulator_is_enabled(device->pwrctrl.gpu_reg) ||
  1695. (device->pwrctrl.gpu_cx &&
  1696. regulator_is_enabled(device->pwrctrl.gpu_cx)));
  1697. /* Clear any GPU faults that might have been left over */
  1698. adreno_clear_gpu_fault(adreno_dev);
  1699. /* Power up the device */
  1700. kgsl_pwrctrl_enable(device);
  1701. /* Set the bit to indicate that we've just powered on */
  1702. set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);
  1703. /* Set up a2xx special case */
  1704. if (adreno_is_a2xx(adreno_dev)) {
  1705. /*
  1706. * the MH_CLNT_INTF_CTRL_CONFIG registers aren't present
  1707. * on older gpus
  1708. */
  1709. if (adreno_is_a20x(adreno_dev)) {
  1710. device->mh.mh_intf_cfg1 = 0;
  1711. device->mh.mh_intf_cfg2 = 0;
  1712. }
  1713. kgsl_mh_start(device);
  1714. }
  1715. status = kgsl_mmu_start(device);
  1716. if (status)
  1717. goto error_clk_off;
  1718. status = adreno_ocmem_gmem_malloc(adreno_dev);
  1719. if (status) {
  1720. KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
  1721. goto error_mmu_off;
  1722. }
  1723. if (regulator_left_on && adreno_dev->gpudev->soft_reset) {
  1724. /*
  1725. * Reset the GPU for A3xx. A2xx does a soft reset in
  1726. * the start function.
  1727. */
  1728. adreno_dev->gpudev->soft_reset(adreno_dev);
  1729. }
  1730. /* Restore performance counter registers with saved values */
  1731. adreno_perfcounter_restore(adreno_dev);
  1732. /* Start the GPU */
  1733. adreno_dev->gpudev->start(adreno_dev);
  1734. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
  1735. device->ftbl->irqctrl(device, 1);
  1736. status = adreno_ringbuffer_cold_start(&adreno_dev->ringbuffer);
  1737. if (status)
  1738. goto error_irq_off;
  1739. status = adreno_perfcounter_start(adreno_dev);
  1740. if (status)
  1741. goto error_rb_stop;
  1742. /* Start the dispatcher */
  1743. adreno_dispatcher_start(device);
  1744. device->reset_counter++;
  1745. set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
  1746. return 0;
  1747. error_rb_stop:
  1748. adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
  1749. error_irq_off:
  1750. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
  1751. error_mmu_off:
  1752. kgsl_mmu_stop(&device->mmu);
  1753. error_clk_off:
  1754. kgsl_pwrctrl_disable(device);
  1755. /* set the state back to original state */
  1756. kgsl_pwrctrl_set_state(device, state);
  1757. return status;
  1758. }
  1759. /**
  1760. * adreno_start() - Power up and initialize the GPU
  1761. * @device: Pointer to the KGSL device to power up
  1762. * @priority: Boolean flag to specify of the start should be scheduled in a low
  1763. * latency work queue
  1764. *
  1765. * Power up the GPU and initialize it. If priority is specified then elevate
  1766. * the thread priority for the duration of the start operation
  1767. */
  1768. static int adreno_start(struct kgsl_device *device, int priority)
  1769. {
  1770. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1771. int nice = task_nice(current);
  1772. int ret;
  1773. if (priority && (_wake_nice < nice))
  1774. set_user_nice(current, _wake_nice);
  1775. ret = _adreno_start(adreno_dev);
  1776. if (priority)
  1777. set_user_nice(current, nice);
  1778. return ret;
  1779. }
  1780. static int adreno_stop(struct kgsl_device *device)
  1781. {
  1782. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1783. if (adreno_dev->drawctxt_active)
  1784. kgsl_context_put(&adreno_dev->drawctxt_active->base);
  1785. adreno_dev->drawctxt_active = NULL;
  1786. adreno_dispatcher_stop(adreno_dev);
  1787. adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
  1788. kgsl_mmu_stop(&device->mmu);
  1789. device->ftbl->irqctrl(device, 0);
  1790. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
  1791. del_timer_sync(&device->idle_timer);
  1792. adreno_ocmem_gmem_free(adreno_dev);
  1793. /* Save physical performance counter values before GPU power down*/
  1794. adreno_perfcounter_save(adreno_dev);
  1795. /* Power down the device */
  1796. kgsl_pwrctrl_disable(device);
  1797. kgsl_cffdump_close(device);
  1798. clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
  1799. return 0;
  1800. }
  1801. /**
  1802. * adreno_reset() - Helper function to reset the GPU
  1803. * @device: Pointer to the KGSL device structure for the GPU
  1804. *
  1805. * Try to reset the GPU to recover from a fault. First, try to do a low latency
  1806. * soft reset. If the soft reset fails for some reason, then bring out the big
  1807. * guns and toggle the footswitch.
  1808. */
  1809. int adreno_reset(struct kgsl_device *device)
  1810. {
  1811. int ret = -EINVAL;
  1812. struct kgsl_mmu *mmu = &device->mmu;
  1813. int i = 0;
  1814. /* Try soft reset first, for non mmu fault case only */
  1815. if (!atomic_read(&mmu->fault)) {
  1816. ret = adreno_soft_reset(device);
  1817. if (ret)
  1818. KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
  1819. }
  1820. if (ret) {
  1821. /* If soft reset failed/skipped, then pull the power */
  1822. adreno_stop(device);
  1823. /* Keep trying to start the device until it works */
  1824. for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
  1825. ret = adreno_start(device, 0);
  1826. if (!ret)
  1827. break;
  1828. msleep(20);
  1829. }
  1830. }
  1831. if (ret)
  1832. return ret;
  1833. if (0 != i)
  1834. KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
  1835. /*
  1836. * If active_cnt is non-zero then the system was active before
  1837. * going into a reset - put it back in that state
  1838. */
  1839. if (atomic_read(&device->active_cnt))
  1840. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  1841. /* Set the page table back to the default page table */
  1842. kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
  1843. KGSL_MEMSTORE_GLOBAL);
  1844. return ret;
  1845. }
  1846. /**
  1847. * _ft_sysfs_store() - Common routine to write to FT sysfs files
  1848. * @buf: value to write
  1849. * @count: size of the value to write
  1850. * @sysfs_cfg: KGSL FT sysfs config to write
  1851. *
  1852. * This is a common routine to write to FT sysfs files.
  1853. */
  1854. static int _ft_sysfs_store(const char *buf, size_t count, unsigned int *ptr)
  1855. {
  1856. char temp[20];
  1857. unsigned long val;
  1858. int rc;
  1859. snprintf(temp, sizeof(temp), "%.*s",
  1860. (int)min(count, sizeof(temp) - 1), buf);
  1861. rc = kstrtoul(temp, 0, &val);
  1862. if (rc)
  1863. return rc;
  1864. *ptr = val;
  1865. return count;
  1866. }
  1867. /**
  1868. * _get_adreno_dev() - Routine to get a pointer to adreno dev
  1869. * @dev: device ptr
  1870. * @attr: Device attribute
  1871. * @buf: value to write
  1872. * @count: size of the value to write
  1873. */
  1874. struct adreno_device *_get_adreno_dev(struct device *dev)
  1875. {
  1876. struct kgsl_device *device = kgsl_device_from_dev(dev);
  1877. return device ? ADRENO_DEVICE(device) : NULL;
  1878. }
  1879. /**
  1880. * _ft_policy_store() - Routine to configure FT policy
  1881. * @dev: device ptr
  1882. * @attr: Device attribute
  1883. * @buf: value to write
  1884. * @count: size of the value to write
  1885. *
  1886. * FT policy can be set to any of the options below.
  1887. * KGSL_FT_DISABLE -> BIT(0) Set to disable FT
  1888. * KGSL_FT_REPLAY -> BIT(1) Set to enable replay
  1889. * KGSL_FT_SKIPIB -> BIT(2) Set to skip IB
  1890. * KGSL_FT_SKIPFRAME -> BIT(3) Set to skip frame
  1891. * by default set FT policy to KGSL_FT_DEFAULT_POLICY
  1892. */
  1893. static int _ft_policy_store(struct device *dev,
  1894. struct device_attribute *attr,
  1895. const char *buf, size_t count)
  1896. {
  1897. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  1898. int ret;
  1899. if (adreno_dev == NULL)
  1900. return 0;
  1901. mutex_lock(&adreno_dev->dev.mutex);
  1902. ret = _ft_sysfs_store(buf, count, &adreno_dev->ft_policy);
  1903. mutex_unlock(&adreno_dev->dev.mutex);
  1904. return ret;
  1905. }
  1906. /**
  1907. * _ft_policy_show() - Routine to read FT policy
  1908. * @dev: device ptr
  1909. * @attr: Device attribute
  1910. * @buf: value read
  1911. *
  1912. * This is a routine to read current FT policy
  1913. */
  1914. static int _ft_policy_show(struct device *dev,
  1915. struct device_attribute *attr,
  1916. char *buf)
  1917. {
  1918. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  1919. if (adreno_dev == NULL)
  1920. return 0;
  1921. return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_policy);
  1922. }
  1923. /**
  1924. * _ft_pagefault_policy_store() - Routine to configure FT
  1925. * pagefault policy
  1926. * @dev: device ptr
  1927. * @attr: Device attribute
  1928. * @buf: value to write
  1929. * @count: size of the value to write
  1930. *
  1931. * FT pagefault policy can be set to any of the options below.
  1932. * KGSL_FT_PAGEFAULT_INT_ENABLE -> BIT(0) set to enable pagefault INT
  1933. * KGSL_FT_PAGEFAULT_GPUHALT_ENABLE -> BIT(1) Set to enable GPU HALT on
  1934. * pagefaults. This stalls the GPU on a pagefault on IOMMU v1 HW.
  1935. * KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE -> BIT(2) Set to log only one
  1936. * pagefault per page.
  1937. * KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
  1938. * pagefault per INT.
  1939. */
  1940. static int _ft_pagefault_policy_store(struct device *dev,
  1941. struct device_attribute *attr,
  1942. const char *buf, size_t count)
  1943. {
  1944. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  1945. int ret = 0;
  1946. unsigned int policy = 0;
  1947. if (adreno_dev == NULL)
  1948. return 0;
  1949. mutex_lock(&adreno_dev->dev.mutex);
  1950. /* MMU option changed call function to reset MMU options */
  1951. if (count != _ft_sysfs_store(buf, count, &policy))
  1952. ret = -EINVAL;
  1953. if (!ret) {
  1954. policy &= (KGSL_FT_PAGEFAULT_INT_ENABLE |
  1955. KGSL_FT_PAGEFAULT_GPUHALT_ENABLE |
  1956. KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE |
  1957. KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT);
  1958. ret = kgsl_mmu_set_pagefault_policy(&(adreno_dev->dev.mmu),
  1959. adreno_dev->ft_pf_policy);
  1960. if (!ret)
  1961. adreno_dev->ft_pf_policy = policy;
  1962. }
  1963. mutex_unlock(&adreno_dev->dev.mutex);
  1964. if (!ret)
  1965. return count;
  1966. else
  1967. return 0;
  1968. }
  1969. /**
  1970. * _ft_pagefault_policy_show() - Routine to read FT pagefault
  1971. * policy
  1972. * @dev: device ptr
  1973. * @attr: Device attribute
  1974. * @buf: value read
  1975. *
  1976. * This is a routine to read current FT pagefault policy
  1977. */
  1978. static int _ft_pagefault_policy_show(struct device *dev,
  1979. struct device_attribute *attr,
  1980. char *buf)
  1981. {
  1982. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  1983. if (adreno_dev == NULL)
  1984. return 0;
  1985. return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_pf_policy);
  1986. }
  1987. /**
  1988. * _ft_fast_hang_detect_store() - Routine to configure FT fast
  1989. * hang detect policy
  1990. * @dev: device ptr
  1991. * @attr: Device attribute
  1992. * @buf: value to write
  1993. * @count: size of the value to write
  1994. *
  1995. * 0x1 - Enable fast hang detection
  1996. * 0x0 - Disable fast hang detection
  1997. */
  1998. static int _ft_fast_hang_detect_store(struct device *dev,
  1999. struct device_attribute *attr,
  2000. const char *buf, size_t count)
  2001. {
  2002. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  2003. int ret, tmp;
  2004. if (adreno_dev == NULL)
  2005. return 0;
  2006. mutex_lock(&adreno_dev->dev.mutex);
  2007. tmp = adreno_dev->fast_hang_detect;
  2008. ret = _ft_sysfs_store(buf, count, &adreno_dev->fast_hang_detect);
  2009. if (tmp != adreno_dev->fast_hang_detect) {
  2010. if (adreno_dev->fast_hang_detect) {
  2011. if (adreno_dev->gpudev->fault_detect_start &&
  2012. !kgsl_active_count_get(&adreno_dev->dev)) {
  2013. adreno_dev->gpudev->fault_detect_start(
  2014. adreno_dev);
  2015. kgsl_active_count_put(&adreno_dev->dev);
  2016. }
  2017. } else {
  2018. if (adreno_dev->gpudev->fault_detect_stop)
  2019. adreno_dev->gpudev->fault_detect_stop(
  2020. adreno_dev);
  2021. }
  2022. }
  2023. mutex_unlock(&adreno_dev->dev.mutex);
  2024. return ret;
  2025. }
  2026. /**
  2027. * _ft_fast_hang_detect_show() - Routine to read FT fast
  2028. * hang detect policy
  2029. * @dev: device ptr
  2030. * @attr: Device attribute
  2031. * @buf: value read
  2032. */
  2033. static int _ft_fast_hang_detect_show(struct device *dev,
  2034. struct device_attribute *attr,
  2035. char *buf)
  2036. {
  2037. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  2038. if (adreno_dev == NULL)
  2039. return 0;
  2040. return snprintf(buf, PAGE_SIZE, "%d\n",
  2041. (adreno_dev->fast_hang_detect ? 1 : 0));
  2042. }
  2043. /**
  2044. * _ft_long_ib_detect_store() - Routine to configure FT long IB
  2045. * detect policy
  2046. * @dev: device ptr
  2047. * @attr: Device attribute
  2048. * @buf: value to write
  2049. * @count: size of the value to write
  2050. *
  2051. * 0x0 - Enable long IB detection
  2052. * 0x1 - Disable long IB detection
  2053. */
  2054. static int _ft_long_ib_detect_store(struct device *dev,
  2055. struct device_attribute *attr,
  2056. const char *buf, size_t count)
  2057. {
  2058. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  2059. int ret;
  2060. if (adreno_dev == NULL)
  2061. return 0;
  2062. mutex_lock(&adreno_dev->dev.mutex);
  2063. ret = _ft_sysfs_store(buf, count, &adreno_dev->long_ib_detect);
  2064. mutex_unlock(&adreno_dev->dev.mutex);
  2065. return ret;
  2066. }
  2067. /**
  2068. * _ft_long_ib_detect_show() - Routine to read FT long IB
  2069. * detect policy
  2070. * @dev: device ptr
  2071. * @attr: Device attribute
  2072. * @buf: value read
  2073. */
  2074. static int _ft_long_ib_detect_show(struct device *dev,
  2075. struct device_attribute *attr,
  2076. char *buf)
  2077. {
  2078. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  2079. if (adreno_dev == NULL)
  2080. return 0;
  2081. return snprintf(buf, PAGE_SIZE, "%d\n",
  2082. (adreno_dev->long_ib_detect ? 1 : 0));
  2083. }
  2084. /**
  2085. * _wake_timeout_store() - Store the amount of time to extend idle check after
  2086. * wake on touch
  2087. * @dev: device ptr
  2088. * @attr: Device attribute
  2089. * @buf: value to write
  2090. * @count: size of the value to write
  2091. *
  2092. */
  2093. static ssize_t _wake_timeout_store(struct device *dev,
  2094. struct device_attribute *attr,
  2095. const char *buf, size_t count)
  2096. {
  2097. return _ft_sysfs_store(buf, count, &_wake_timeout);
  2098. }
  2099. /**
  2100. * _wake_timeout_show() - Show the amount of time idle check gets extended
  2101. * after wake on touch
  2102. * detect policy
  2103. * @dev: device ptr
  2104. * @attr: Device attribute
  2105. * @buf: value read
  2106. */
  2107. static ssize_t _wake_timeout_show(struct device *dev,
  2108. struct device_attribute *attr,
  2109. char *buf)
  2110. {
  2111. return snprintf(buf, PAGE_SIZE, "%d\n", _wake_timeout);
  2112. }
  2113. /**
  2114. * _ft_hang_intr_status_store - Routine to enable/disable h/w hang interrupt
  2115. * @dev: device ptr
  2116. * @attr: Device attribute
  2117. * @buf: value to write
  2118. * @count: size of the value to write
  2119. */
  2120. static ssize_t _ft_hang_intr_status_store(struct device *dev,
  2121. struct device_attribute *attr,
  2122. const char *buf, size_t count)
  2123. {
  2124. unsigned int new_setting, old_setting;
  2125. struct kgsl_device *device = kgsl_device_from_dev(dev);
  2126. struct adreno_device *adreno_dev;
  2127. int ret;
  2128. if (device == NULL)
  2129. return 0;
  2130. adreno_dev = ADRENO_DEVICE(device);
  2131. mutex_lock(&device->mutex);
  2132. ret = _ft_sysfs_store(buf, count, &new_setting);
  2133. if (ret != count)
  2134. goto done;
  2135. if (new_setting)
  2136. new_setting = 1;
  2137. old_setting =
  2138. (test_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv) ? 1 : 0);
  2139. if (new_setting != old_setting) {
  2140. if (new_setting)
  2141. set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
  2142. else
  2143. clear_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
  2144. /* Set the new setting based on device state */
  2145. switch (device->state) {
  2146. case KGSL_STATE_NAP:
  2147. case KGSL_STATE_SLEEP:
  2148. kgsl_pwrctrl_wake(device, 0);
  2149. case KGSL_STATE_ACTIVE:
  2150. adreno_dev->gpudev->irq_control(adreno_dev, 1);
  2151. /*
  2152. * For following states setting will be picked up on device
  2153. * start. Still need them in switch statement to differentiate
  2154. * from default
  2155. */
  2156. case KGSL_STATE_SLUMBER:
  2157. case KGSL_STATE_SUSPEND:
  2158. break;
  2159. default:
  2160. ret = -EACCES;
  2161. /* reset back to old setting on error */
  2162. if (new_setting)
  2163. clear_bit(ADRENO_DEVICE_HANG_INTR,
  2164. &adreno_dev->priv);
  2165. else
  2166. set_bit(ADRENO_DEVICE_HANG_INTR,
  2167. &adreno_dev->priv);
  2168. goto done;
  2169. }
  2170. }
  2171. done:
  2172. mutex_unlock(&device->mutex);
  2173. return ret;
  2174. }
  2175. /**
  2176. * _ft_hang_intr_status_show() - Routine to read hardware hang interrupt
  2177. * enablement
  2178. * @dev: device ptr
  2179. * @attr: Device attribute
  2180. * @buf: value read
  2181. */
  2182. static ssize_t _ft_hang_intr_status_show(struct device *dev,
  2183. struct device_attribute *attr,
  2184. char *buf)
  2185. {
  2186. struct adreno_device *adreno_dev = _get_adreno_dev(dev);
  2187. if (adreno_dev == NULL)
  2188. return 0;
  2189. return snprintf(buf, PAGE_SIZE, "%d\n",
  2190. test_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv) ? 1 : 0);
  2191. }
  2192. #define FT_DEVICE_ATTR(name) \
  2193. DEVICE_ATTR(name, 0644, _ ## name ## _show, _ ## name ## _store);
  2194. FT_DEVICE_ATTR(ft_policy);
  2195. FT_DEVICE_ATTR(ft_pagefault_policy);
  2196. FT_DEVICE_ATTR(ft_fast_hang_detect);
  2197. FT_DEVICE_ATTR(ft_long_ib_detect);
  2198. FT_DEVICE_ATTR(ft_hang_intr_status);
  2199. static DEVICE_INT_ATTR(wake_nice, 0644, _wake_nice);
  2200. static FT_DEVICE_ATTR(wake_timeout);
  2201. const struct device_attribute *ft_attr_list[] = {
  2202. &dev_attr_ft_policy,
  2203. &dev_attr_ft_pagefault_policy,
  2204. &dev_attr_ft_fast_hang_detect,
  2205. &dev_attr_ft_long_ib_detect,
  2206. &dev_attr_wake_nice.attr,
  2207. &dev_attr_wake_timeout,
  2208. &dev_attr_ft_hang_intr_status,
  2209. NULL,
  2210. };
  2211. int adreno_ft_init_sysfs(struct kgsl_device *device)
  2212. {
  2213. return kgsl_create_device_sysfs_files(device->dev, ft_attr_list);
  2214. }
  2215. void adreno_ft_uninit_sysfs(struct kgsl_device *device)
  2216. {
  2217. kgsl_remove_device_sysfs_files(device->dev, ft_attr_list);
  2218. }
  2219. static int adreno_getproperty(struct kgsl_device *device,
  2220. enum kgsl_property_type type,
  2221. void *value,
  2222. unsigned int sizebytes)
  2223. {
  2224. int status = -EINVAL;
  2225. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2226. switch (type) {
  2227. case KGSL_PROP_DEVICE_INFO:
  2228. {
  2229. struct kgsl_devinfo devinfo;
  2230. if (sizebytes != sizeof(devinfo)) {
  2231. status = -EINVAL;
  2232. break;
  2233. }
  2234. memset(&devinfo, 0, sizeof(devinfo));
  2235. devinfo.device_id = device->id+1;
  2236. devinfo.chip_id = adreno_dev->chip_id;
  2237. devinfo.mmu_enabled = kgsl_mmu_enabled();
  2238. devinfo.gpu_id = adreno_dev->gpurev;
  2239. devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
  2240. devinfo.gmem_sizebytes = adreno_dev->gmem_size;
  2241. if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
  2242. 0) {
  2243. status = -EFAULT;
  2244. break;
  2245. }
  2246. status = 0;
  2247. }
  2248. break;
  2249. case KGSL_PROP_DEVICE_SHADOW:
  2250. {
  2251. struct kgsl_shadowprop shadowprop;
  2252. if (sizebytes != sizeof(shadowprop)) {
  2253. status = -EINVAL;
  2254. break;
  2255. }
  2256. memset(&shadowprop, 0, sizeof(shadowprop));
  2257. if (device->memstore.hostptr) {
  2258. /*NOTE: with mmu enabled, gpuaddr doesn't mean
  2259. * anything to mmap().
  2260. */
  2261. shadowprop.gpuaddr = device->memstore.gpuaddr;
  2262. shadowprop.size = device->memstore.size;
  2263. /* GSL needs this to be set, even if it
  2264. appears to be meaningless */
  2265. shadowprop.flags = KGSL_FLAGS_INITIALIZED |
  2266. KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
  2267. }
  2268. if (copy_to_user(value, &shadowprop,
  2269. sizeof(shadowprop))) {
  2270. status = -EFAULT;
  2271. break;
  2272. }
  2273. status = 0;
  2274. }
  2275. break;
  2276. case KGSL_PROP_MMU_ENABLE:
  2277. {
  2278. int mmu_prop = kgsl_mmu_enabled();
  2279. if (sizebytes != sizeof(int)) {
  2280. status = -EINVAL;
  2281. break;
  2282. }
  2283. if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
  2284. status = -EFAULT;
  2285. break;
  2286. }
  2287. status = 0;
  2288. }
  2289. break;
  2290. case KGSL_PROP_INTERRUPT_WAITS:
  2291. {
  2292. int int_waits = 1;
  2293. if (sizebytes != sizeof(int)) {
  2294. status = -EINVAL;
  2295. break;
  2296. }
  2297. if (copy_to_user(value, &int_waits, sizeof(int))) {
  2298. status = -EFAULT;
  2299. break;
  2300. }
  2301. status = 0;
  2302. }
  2303. break;
  2304. default:
  2305. status = -EINVAL;
  2306. }
  2307. return status;
  2308. }
  2309. static int adreno_set_constraint(struct kgsl_device *device,
  2310. struct kgsl_context *context,
  2311. struct kgsl_device_constraint *constraint)
  2312. {
  2313. int status = 0;
  2314. switch (constraint->type) {
  2315. case KGSL_CONSTRAINT_PWRLEVEL: {
  2316. struct kgsl_device_constraint_pwrlevel pwr;
  2317. if (constraint->size != sizeof(pwr)) {
  2318. status = -EINVAL;
  2319. break;
  2320. }
  2321. if (copy_from_user(&pwr,
  2322. (void __user *)constraint->data,
  2323. sizeof(pwr))) {
  2324. status = -EFAULT;
  2325. break;
  2326. }
  2327. if (pwr.level >= KGSL_CONSTRAINT_PWR_MAXLEVELS) {
  2328. status = -EINVAL;
  2329. break;
  2330. }
  2331. context->pwr_constraint.type =
  2332. KGSL_CONSTRAINT_PWRLEVEL;
  2333. context->pwr_constraint.sub_type = pwr.level;
  2334. }
  2335. break;
  2336. case KGSL_CONSTRAINT_NONE:
  2337. context->pwr_constraint.type = KGSL_CONSTRAINT_NONE;
  2338. break;
  2339. default:
  2340. status = -EINVAL;
  2341. break;
  2342. }
  2343. return status;
  2344. }
  2345. static int adreno_setproperty(struct kgsl_device_private *dev_priv,
  2346. enum kgsl_property_type type,
  2347. void *value,
  2348. unsigned int sizebytes)
  2349. {
  2350. int status = -EINVAL;
  2351. struct kgsl_device *device = dev_priv->device;
  2352. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2353. switch (type) {
  2354. case KGSL_PROP_PWRCTRL: {
  2355. unsigned int enable;
  2356. if (sizebytes != sizeof(enable))
  2357. break;
  2358. if (copy_from_user(&enable, (void __user *) value,
  2359. sizeof(enable))) {
  2360. status = -EFAULT;
  2361. break;
  2362. }
  2363. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  2364. if (enable) {
  2365. device->pwrctrl.ctrl_flags = 0;
  2366. adreno_dev->fast_hang_detect = 1;
  2367. if (adreno_dev->gpudev->fault_detect_start)
  2368. adreno_dev->gpudev->fault_detect_start(
  2369. adreno_dev);
  2370. kgsl_pwrscale_enable(device);
  2371. } else {
  2372. kgsl_pwrctrl_wake(device, 0);
  2373. device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
  2374. adreno_dev->fast_hang_detect = 0;
  2375. if (adreno_dev->gpudev->fault_detect_stop)
  2376. adreno_dev->gpudev->fault_detect_stop(
  2377. adreno_dev);
  2378. kgsl_pwrscale_disable(device);
  2379. }
  2380. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  2381. status = 0;
  2382. }
  2383. break;
  2384. case KGSL_PROP_PWR_CONSTRAINT: {
  2385. struct kgsl_device_constraint constraint;
  2386. struct kgsl_context *context;
  2387. if (sizebytes != sizeof(constraint))
  2388. break;
  2389. if (copy_from_user(&constraint, value,
  2390. sizeof(constraint))) {
  2391. status = -EFAULT;
  2392. break;
  2393. }
  2394. context = kgsl_context_get_owner(dev_priv,
  2395. constraint.context_id);
  2396. if (context == NULL)
  2397. break;
  2398. status = adreno_set_constraint(device, context,
  2399. &constraint);
  2400. kgsl_context_put(context);
  2401. }
  2402. break;
  2403. default:
  2404. break;
  2405. }
  2406. return status;
  2407. }
  2408. /**
  2409. * adreno_hw_isidle() - Check if the GPU core is idle
  2410. * @device: Pointer to the KGSL device structure for the GPU
  2411. *
  2412. * Return true if the RBBM status register for the GPU type indicates that the
  2413. * hardware is idle
  2414. */
  2415. bool adreno_hw_isidle(struct kgsl_device *device)
  2416. {
  2417. unsigned int reg_rbbm_status;
  2418. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2419. /* Don't consider ourselves idle if there is an IRQ pending */
  2420. if (adreno_dev->gpudev->irq_pending(adreno_dev))
  2421. return false;
  2422. adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
  2423. &reg_rbbm_status);
  2424. if (adreno_is_a2xx(adreno_dev)) {
  2425. if (reg_rbbm_status == 0x110)
  2426. return true;
  2427. } else if (adreno_is_a3xx(adreno_dev)) {
  2428. if (!(reg_rbbm_status & 0x80000000))
  2429. return true;
  2430. }
  2431. return false;
  2432. }
  2433. /**
  2434. * adreno_soft_reset() - Do a soft reset of the GPU hardware
  2435. * @device: KGSL device to soft reset
  2436. *
  2437. * "soft reset" the GPU hardware - this is a fast path GPU reset
  2438. * The GPU hardware is reset but we never pull power so we can skip
  2439. * a lot of the standard adreno_stop/adreno_start sequence
  2440. */
  2441. int adreno_soft_reset(struct kgsl_device *device)
  2442. {
  2443. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2444. int ret;
  2445. if (!adreno_dev->gpudev->soft_reset) {
  2446. dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
  2447. return -EINVAL;
  2448. }
  2449. if (adreno_dev->drawctxt_active)
  2450. kgsl_context_put(&adreno_dev->drawctxt_active->base);
  2451. adreno_dev->drawctxt_active = NULL;
  2452. /* Stop the ringbuffer */
  2453. adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
  2454. if (kgsl_pwrctrl_isenabled(device))
  2455. device->ftbl->irqctrl(device, 0);
  2456. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
  2457. adreno_clear_gpu_fault(adreno_dev);
  2458. /* Delete the idle timer */
  2459. del_timer_sync(&device->idle_timer);
  2460. /* Make sure we are totally awake */
  2461. kgsl_pwrctrl_enable(device);
  2462. /* save physical performance counter values before GPU soft reset */
  2463. adreno_perfcounter_save(adreno_dev);
  2464. /* Reset the GPU */
  2465. adreno_dev->gpudev->soft_reset(adreno_dev);
  2466. /* Restore physical performance counter values after soft reset */
  2467. adreno_perfcounter_restore(adreno_dev);
  2468. /* Reinitialize the GPU */
  2469. adreno_dev->gpudev->start(adreno_dev);
  2470. /* Enable IRQ */
  2471. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
  2472. device->ftbl->irqctrl(device, 1);
  2473. /*
  2474. * If we have offsets for the jump tables we can try to do a warm start,
  2475. * otherwise do a full ringbuffer restart
  2476. */
  2477. if (adreno_dev->pm4_jt_idx)
  2478. ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
  2479. else
  2480. ret = adreno_ringbuffer_cold_start(&adreno_dev->ringbuffer);
  2481. if (ret)
  2482. return ret;
  2483. device->reset_counter++;
  2484. return 0;
  2485. }
  2486. /*
  2487. * adreno_isidle() - return true if the GPU hardware is idle
  2488. * @device: Pointer to the KGSL device structure for the GPU
  2489. *
  2490. * Return true if the GPU hardware is idle and there are no commands pending in
  2491. * the ringbuffer
  2492. */
  2493. bool adreno_isidle(struct kgsl_device *device)
  2494. {
  2495. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2496. unsigned int rptr;
  2497. if (!kgsl_pwrctrl_isenabled(device))
  2498. return true;
  2499. rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
  2500. /*
  2501. * wptr is updated when we add commands to ringbuffer, add a barrier
  2502. * to make sure updated wptr is compared to rptr
  2503. */
  2504. smp_mb();
  2505. if (rptr == adreno_dev->ringbuffer.wptr)
  2506. return adreno_hw_isidle(device);
  2507. return false;
  2508. }
  2509. /**
  2510. * adreno_idle() - wait for the GPU hardware to go idle
  2511. * @device: Pointer to the KGSL device structure for the GPU
  2512. *
  2513. * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
  2514. */
  2515. int adreno_idle(struct kgsl_device *device)
  2516. {
  2517. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2518. unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
  2519. /*
  2520. * Make sure the device mutex is held so the dispatcher can't send any
  2521. * more commands to the hardware
  2522. */
  2523. BUG_ON(!mutex_is_locked(&device->mutex));
  2524. if (adreno_is_a3xx(adreno_dev))
  2525. kgsl_cffdump_regpoll(device,
  2526. adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
  2527. 0x00000000, 0x80000000);
  2528. else
  2529. kgsl_cffdump_regpoll(device,
  2530. adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
  2531. 0x110, 0x110);
  2532. while (time_before(jiffies, wait)) {
  2533. /*
  2534. * If we fault, stop waiting and return an error. The dispatcher
  2535. * will clean up the fault from the work queue, but we need to
  2536. * make sure we don't block it by waiting for an idle that
  2537. * will never come.
  2538. */
  2539. if (adreno_gpu_fault(adreno_dev) != 0)
  2540. return -EDEADLK;
  2541. if (adreno_isidle(device))
  2542. return 0;
  2543. }
  2544. return -ETIMEDOUT;
  2545. }
  2546. /**
  2547. * adreno_drain() - Drain the dispatch queue
  2548. * @device: Pointer to the KGSL device structure for the GPU
  2549. *
  2550. * Drain the dispatcher of existing command batches. This halts
  2551. * additional commands from being issued until the gate is completed.
  2552. */
  2553. static int adreno_drain(struct kgsl_device *device)
  2554. {
  2555. INIT_COMPLETION(device->cmdbatch_gate);
  2556. return 0;
  2557. }
  2558. /* Caller must hold the device mutex. */
  2559. static int adreno_suspend_context(struct kgsl_device *device)
  2560. {
  2561. int status = 0;
  2562. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2563. /* process any profiling results that are available */
  2564. adreno_profile_process_results(device);
  2565. /* switch to NULL ctxt */
  2566. if (adreno_dev->drawctxt_active != NULL) {
  2567. adreno_drawctxt_switch(adreno_dev, NULL, 0);
  2568. status = adreno_idle(device);
  2569. }
  2570. return status;
  2571. }
  2572. /* Find a memory structure attached to an adreno context */
  2573. struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
  2574. phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size)
  2575. {
  2576. struct kgsl_context *context;
  2577. int next = 0;
  2578. struct kgsl_memdesc *desc = NULL;
  2579. read_lock(&device->context_lock);
  2580. while (1) {
  2581. context = idr_get_next(&device->context_idr, &next);
  2582. if (context == NULL)
  2583. break;
  2584. if (kgsl_mmu_pt_equal(&device->mmu,
  2585. context->proc_priv->pagetable,
  2586. pt_base)) {
  2587. struct adreno_context *adreno_context;
  2588. adreno_context = ADRENO_CONTEXT(context);
  2589. desc = &adreno_context->gpustate;
  2590. if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
  2591. break;
  2592. desc = &adreno_context->context_gmem_shadow.gmemshadow;
  2593. if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
  2594. break;
  2595. }
  2596. next = next + 1;
  2597. desc = NULL;
  2598. }
  2599. read_unlock(&device->context_lock);
  2600. return desc;
  2601. }
  2602. /*
  2603. * adreno_find_region() - Find corresponding allocation for a given address
  2604. * @device: Device on which address operates
  2605. * @pt_base: The pagetable in which address is mapped
  2606. * @gpuaddr: The gpu address
  2607. * @size: Size in bytes of the address
  2608. * @entry: If the allocation is part of user space allocation then the mem
  2609. * entry is returned in this parameter. Caller is supposed to decrement
  2610. * refcount on this entry after its done using it.
  2611. *
  2612. * Finds an allocation descriptor for a given gpu address range
  2613. *
  2614. * Returns the descriptor on success else NULL
  2615. */
  2616. struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
  2617. phys_addr_t pt_base,
  2618. unsigned int gpuaddr,
  2619. unsigned int size,
  2620. struct kgsl_mem_entry **entry)
  2621. {
  2622. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2623. struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
  2624. *entry = NULL;
  2625. if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr, size))
  2626. return &ringbuffer->buffer_desc;
  2627. if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size))
  2628. return &device->memstore;
  2629. if (kgsl_gpuaddr_in_memdesc(&adreno_dev->pwron_fixup, gpuaddr, size))
  2630. return &adreno_dev->pwron_fixup;
  2631. if (kgsl_gpuaddr_in_memdesc(&device->mmu.setstate_memory, gpuaddr,
  2632. size))
  2633. return &device->mmu.setstate_memory;
  2634. *entry = kgsl_get_mem_entry(device, pt_base, gpuaddr, size);
  2635. if (*entry)
  2636. return &((*entry)->memdesc);
  2637. return adreno_find_ctxtmem(device, pt_base, gpuaddr, size);
  2638. }
  2639. /*
  2640. * adreno_convertaddr() - Convert a gpu address to kernel mapped address
  2641. * @device: Device on which the address operates
  2642. * @pt_base: The pagetable in which address is mapped
  2643. * @gpuaddr: The start address
  2644. * @size: The length of address range
  2645. * @entry: If the allocation is part of user space allocation then the mem
  2646. * entry is returned in this parameter. Caller is supposed to decrement
  2647. * refcount on this entry after its done using it.
  2648. *
  2649. * Returns the converted host pointer on success else NULL
  2650. */
  2651. uint8_t *adreno_convertaddr(struct kgsl_device *device, phys_addr_t pt_base,
  2652. unsigned int gpuaddr, unsigned int size,
  2653. struct kgsl_mem_entry **entry)
  2654. {
  2655. struct kgsl_memdesc *memdesc;
  2656. memdesc = adreno_find_region(device, pt_base, gpuaddr, size, entry);
  2657. return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL;
  2658. }
  2659. /**
  2660. * adreno_read - General read function to read adreno device memory
  2661. * @device - Pointer to the GPU device struct (for adreno device)
  2662. * @base - Base address (kernel virtual) where the device memory is mapped
  2663. * @offsetwords - Offset in words from the base address, of the memory that
  2664. * is to be read
  2665. * @value - Value read from the device memory
  2666. * @mem_len - Length of the device memory mapped to the kernel
  2667. */
  2668. static void adreno_read(struct kgsl_device *device, void *base,
  2669. unsigned int offsetwords, unsigned int *value,
  2670. unsigned int mem_len)
  2671. {
  2672. unsigned int *reg;
  2673. BUG_ON(offsetwords*sizeof(uint32_t) >= mem_len);
  2674. reg = (unsigned int *)(base + (offsetwords << 2));
  2675. if (!in_interrupt())
  2676. kgsl_pre_hwaccess(device);
  2677. /*ensure this read finishes before the next one.
  2678. * i.e. act like normal readl() */
  2679. *value = __raw_readl(reg);
  2680. rmb();
  2681. }
  2682. /**
  2683. * adreno_regread - Used to read adreno device registers
  2684. * @offsetwords - Word (4 Bytes) offset to the register to be read
  2685. * @value - Value read from device register
  2686. */
  2687. static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
  2688. unsigned int *value)
  2689. {
  2690. adreno_read(device, device->reg_virt, offsetwords, value,
  2691. device->reg_len);
  2692. }
  2693. /**
  2694. * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
  2695. * @device - GPU device whose shader memory is to be read
  2696. * @offsetwords - Offset in words, of the shader memory address to be read
  2697. * @value - Pointer to where the read shader mem value is to be stored
  2698. */
  2699. void adreno_shadermem_regread(struct kgsl_device *device,
  2700. unsigned int offsetwords, unsigned int *value)
  2701. {
  2702. adreno_read(device, device->shader_mem_virt, offsetwords, value,
  2703. device->shader_mem_len);
  2704. }
  2705. static void adreno_regwrite(struct kgsl_device *device,
  2706. unsigned int offsetwords,
  2707. unsigned int value)
  2708. {
  2709. unsigned int *reg;
  2710. BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
  2711. if (!in_interrupt())
  2712. kgsl_pre_hwaccess(device);
  2713. kgsl_trace_regwrite(device, offsetwords, value);
  2714. kgsl_cffdump_regwrite(device, offsetwords << 2, value);
  2715. reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
  2716. /*ensure previous writes post before this one,
  2717. * i.e. act like normal writel() */
  2718. wmb();
  2719. __raw_writel(value, reg);
  2720. }
  2721. /**
  2722. * adreno_waittimestamp - sleep while waiting for the specified timestamp
  2723. * @device - pointer to a KGSL device structure
  2724. * @context - pointer to the active kgsl context
  2725. * @timestamp - GPU timestamp to wait for
  2726. * @msecs - amount of time to wait (in milliseconds)
  2727. *
  2728. * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
  2729. */
  2730. static int adreno_waittimestamp(struct kgsl_device *device,
  2731. struct kgsl_context *context,
  2732. unsigned int timestamp,
  2733. unsigned int msecs)
  2734. {
  2735. int ret;
  2736. struct adreno_context *drawctxt;
  2737. if (context == NULL) {
  2738. /* If they are doing then complain once */
  2739. dev_WARN_ONCE(device->dev, 1,
  2740. "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
  2741. return -ENOTTY;
  2742. }
  2743. /* Return -EINVAL if the context has been detached */
  2744. if (kgsl_context_detached(context))
  2745. return -EINVAL;
  2746. ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
  2747. timestamp, msecs);
  2748. /* If the context got invalidated then return a specific error */
  2749. drawctxt = ADRENO_CONTEXT(context);
  2750. if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
  2751. ret = -EDEADLK;
  2752. /*
  2753. * Return -EPROTO if the device has faulted since the last time we
  2754. * checked. Userspace uses this as a marker for performing post
  2755. * fault activities
  2756. */
  2757. if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &drawctxt->priv))
  2758. ret = -EPROTO;
  2759. return ret;
  2760. }
  2761. static unsigned int adreno_readtimestamp(struct kgsl_device *device,
  2762. struct kgsl_context *context, enum kgsl_timestamp_type type)
  2763. {
  2764. unsigned int timestamp = 0;
  2765. unsigned int id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
  2766. switch (type) {
  2767. case KGSL_TIMESTAMP_QUEUED: {
  2768. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2769. timestamp = adreno_context_timestamp(context,
  2770. &adreno_dev->ringbuffer);
  2771. break;
  2772. }
  2773. case KGSL_TIMESTAMP_CONSUMED:
  2774. kgsl_sharedmem_readl(&device->memstore, &timestamp,
  2775. KGSL_MEMSTORE_OFFSET(id, soptimestamp));
  2776. break;
  2777. case KGSL_TIMESTAMP_RETIRED:
  2778. kgsl_sharedmem_readl(&device->memstore, &timestamp,
  2779. KGSL_MEMSTORE_OFFSET(id, eoptimestamp));
  2780. break;
  2781. }
  2782. return timestamp;
  2783. }
  2784. static long adreno_ioctl(struct kgsl_device_private *dev_priv,
  2785. unsigned int cmd, void *data)
  2786. {
  2787. struct kgsl_device *device = dev_priv->device;
  2788. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2789. int result = 0;
  2790. switch (cmd) {
  2791. case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET: {
  2792. struct kgsl_drawctxt_set_bin_base_offset *binbase = data;
  2793. struct kgsl_context *context;
  2794. binbase = data;
  2795. context = kgsl_context_get_owner(dev_priv,
  2796. binbase->drawctxt_id);
  2797. if (context) {
  2798. adreno_drawctxt_set_bin_base_offset(
  2799. device, context, binbase->offset);
  2800. } else {
  2801. result = -EINVAL;
  2802. KGSL_DRV_ERR(device,
  2803. "invalid drawctxt drawctxt_id %d "
  2804. "device_id=%d\n",
  2805. binbase->drawctxt_id, device->id);
  2806. }
  2807. kgsl_context_put(context);
  2808. break;
  2809. }
  2810. case IOCTL_KGSL_PERFCOUNTER_GET: {
  2811. struct kgsl_perfcounter_get *get = data;
  2812. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  2813. /*
  2814. * adreno_perfcounter_get() is called by kernel clients
  2815. * during start(), so it is not safe to take an
  2816. * active count inside this function.
  2817. */
  2818. result = kgsl_active_count_get(device);
  2819. if (result == 0) {
  2820. result = adreno_perfcounter_get(adreno_dev,
  2821. get->groupid, get->countable, &get->offset,
  2822. &get->offset_hi, PERFCOUNTER_FLAG_NONE);
  2823. kgsl_active_count_put(device);
  2824. }
  2825. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  2826. break;
  2827. }
  2828. case IOCTL_KGSL_PERFCOUNTER_PUT: {
  2829. struct kgsl_perfcounter_put *put = data;
  2830. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  2831. result = adreno_perfcounter_put(adreno_dev, put->groupid,
  2832. put->countable, PERFCOUNTER_FLAG_NONE);
  2833. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  2834. break;
  2835. }
  2836. case IOCTL_KGSL_PERFCOUNTER_QUERY: {
  2837. struct kgsl_perfcounter_query *query = data;
  2838. result = adreno_perfcounter_query_group(adreno_dev,
  2839. query->groupid, query->countables,
  2840. query->count, &query->max_counters);
  2841. break;
  2842. }
  2843. case IOCTL_KGSL_PERFCOUNTER_READ: {
  2844. struct kgsl_perfcounter_read *read = data;
  2845. result = adreno_perfcounter_read_group(adreno_dev,
  2846. read->reads, read->count);
  2847. break;
  2848. }
  2849. default:
  2850. KGSL_DRV_INFO(dev_priv->device,
  2851. "invalid ioctl code %08x\n", cmd);
  2852. result = -ENOIOCTLCMD;
  2853. break;
  2854. }
  2855. return result;
  2856. }
  2857. static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
  2858. {
  2859. freq /= 1000000;
  2860. return ticks / freq;
  2861. }
  2862. static void adreno_power_stats(struct kgsl_device *device,
  2863. struct kgsl_power_stats *stats)
  2864. {
  2865. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2866. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  2867. struct adreno_busy_data busy_data;
  2868. memset(stats, 0, sizeof(*stats));
  2869. /*
  2870. * If we're not currently active, there shouldn't have been
  2871. * any cycles since the last time this function was called.
  2872. */
  2873. if (device->state != KGSL_STATE_ACTIVE)
  2874. return;
  2875. /* Get the busy cycles counted since the counter was last reset */
  2876. adreno_dev->gpudev->busy_cycles(adreno_dev, &busy_data);
  2877. stats->busy_time = adreno_ticks_to_us(busy_data.gpu_busy,
  2878. kgsl_pwrctrl_active_freq(pwr));
  2879. stats->ram_time = busy_data.vbif_ram_cycles;
  2880. stats->ram_wait = busy_data.vbif_starved_ram;
  2881. }
  2882. void adreno_irqctrl(struct kgsl_device *device, int state)
  2883. {
  2884. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2885. adreno_dev->gpudev->irq_control(adreno_dev, state);
  2886. }
  2887. static unsigned int adreno_gpuid(struct kgsl_device *device,
  2888. unsigned int *chipid)
  2889. {
  2890. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2891. /* Some applications need to know the chip ID too, so pass
  2892. * that as a parameter */
  2893. if (chipid != NULL)
  2894. *chipid = adreno_dev->chip_id;
  2895. /* Standard KGSL gpuid format:
  2896. * top word is 0x0002 for 2D or 0x0003 for 3D
  2897. * Bottom word is core specific identifer
  2898. */
  2899. return (0x0003 << 16) | ((int) adreno_dev->gpurev);
  2900. }
  2901. static const struct kgsl_functable adreno_functable = {
  2902. /* Mandatory functions */
  2903. .regread = adreno_regread,
  2904. .regwrite = adreno_regwrite,
  2905. .idle = adreno_idle,
  2906. .isidle = adreno_isidle,
  2907. .suspend_context = adreno_suspend_context,
  2908. .init = adreno_init,
  2909. .start = adreno_start,
  2910. .stop = adreno_stop,
  2911. .getproperty = adreno_getproperty,
  2912. .waittimestamp = adreno_waittimestamp,
  2913. .readtimestamp = adreno_readtimestamp,
  2914. .issueibcmds = adreno_ringbuffer_issueibcmds,
  2915. .ioctl = adreno_ioctl,
  2916. .setup_pt = adreno_setup_pt,
  2917. .cleanup_pt = adreno_cleanup_pt,
  2918. .power_stats = adreno_power_stats,
  2919. .irqctrl = adreno_irqctrl,
  2920. .gpuid = adreno_gpuid,
  2921. .snapshot = adreno_snapshot,
  2922. .irq_handler = adreno_irq_handler,
  2923. .drain = adreno_drain,
  2924. /* Optional functions */
  2925. .setstate = adreno_setstate,
  2926. .drawctxt_create = adreno_drawctxt_create,
  2927. .drawctxt_detach = adreno_drawctxt_detach,
  2928. .drawctxt_destroy = adreno_drawctxt_destroy,
  2929. .drawctxt_dump = adreno_drawctxt_dump,
  2930. .setproperty = adreno_setproperty,
  2931. .postmortem_dump = adreno_dump,
  2932. .drawctxt_sched = adreno_drawctxt_sched,
  2933. .resume = adreno_dispatcher_start,
  2934. };
  2935. static struct platform_driver adreno_platform_driver = {
  2936. .probe = adreno_probe,
  2937. .remove = __devexit_p(adreno_remove),
  2938. .suspend = kgsl_suspend_driver,
  2939. .resume = kgsl_resume_driver,
  2940. .id_table = adreno_id_table,
  2941. .driver = {
  2942. .owner = THIS_MODULE,
  2943. .name = DEVICE_3D_NAME,
  2944. .pm = &kgsl_pm_ops,
  2945. .of_match_table = adreno_match_table,
  2946. }
  2947. };
  2948. static int __init kgsl_3d_init(void)
  2949. {
  2950. return platform_driver_register(&adreno_platform_driver);
  2951. }
  2952. static void __exit kgsl_3d_exit(void)
  2953. {
  2954. platform_driver_unregister(&adreno_platform_driver);
  2955. }
  2956. module_init(kgsl_3d_init);
  2957. module_exit(kgsl_3d_exit);
  2958. MODULE_DESCRIPTION("3D Graphics driver");
  2959. MODULE_VERSION("1.2");
  2960. MODULE_LICENSE("GPL v2");
  2961. MODULE_ALIAS("platform:kgsl_3d");