mdss_mdp_pp.c 156 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530
  1. /*
  2. * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #define pr_fmt(fmt) "%s: " fmt, __func__
  15. #include "mdss_fb.h"
  16. #include "mdss_mdp.h"
  17. #include <linux/uaccess.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/delay.h>
  20. #include <mach/msm_bus.h>
  21. #include <mach/msm_bus_board.h>
  22. #ifdef CONFIG_FB_MSM_CAMERA_CSC
  23. struct mdp_csc_cfg mdp_csc_convert_wideband = {
  24. 0,
  25. {
  26. 0x0200, 0x0000, 0x02CD,
  27. 0x0200, 0xFF4F, 0xFE91,
  28. 0x0200, 0x038B, 0x0000,
  29. },
  30. { 0x0, 0xFF80, 0xFF80,},
  31. { 0x0, 0x0, 0x0,},
  32. { 0x0, 0xFF, 0x0, 0xFF, 0x0, 0xFF,},
  33. { 0x0, 0xFF, 0x0, 0xFF, 0x0, 0xFF,},
  34. };
  35. #endif
  36. struct mdp_csc_cfg mdp_csc_convert[MDSS_MDP_MAX_CSC] = {
  37. [MDSS_MDP_CSC_RGB2RGB] = {
  38. 0,
  39. {
  40. 0x0200, 0x0000, 0x0000,
  41. 0x0000, 0x0200, 0x0000,
  42. 0x0000, 0x0000, 0x0200,
  43. },
  44. { 0x0, 0x0, 0x0,},
  45. { 0x0, 0x0, 0x0,},
  46. { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
  47. { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
  48. },
  49. [MDSS_MDP_CSC_YUV2RGB] = {
  50. 0,
  51. {
  52. 0x0254, 0x0000, 0x0331,
  53. 0x0254, 0xff37, 0xfe60,
  54. 0x0254, 0x0409, 0x0000,
  55. },
  56. { 0xfff0, 0xff80, 0xff80,},
  57. { 0x0, 0x0, 0x0,},
  58. { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
  59. { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
  60. },
  61. [MDSS_MDP_CSC_RGB2YUV] = {
  62. 0,
  63. {
  64. 0x0083, 0x0102, 0x0032,
  65. 0x1fb5, 0x1f6c, 0x00e1,
  66. 0x00e1, 0x1f45, 0x1fdc
  67. },
  68. { 0x0, 0x0, 0x0,},
  69. { 0x0010, 0x0080, 0x0080,},
  70. { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
  71. { 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
  72. },
  73. [MDSS_MDP_CSC_YUV2YUV] = {
  74. 0,
  75. {
  76. 0x0200, 0x0000, 0x0000,
  77. 0x0000, 0x0200, 0x0000,
  78. 0x0000, 0x0000, 0x0200,
  79. },
  80. { 0x0, 0x0, 0x0,},
  81. { 0x0, 0x0, 0x0,},
  82. { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
  83. { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
  84. },
  85. };
  86. /*
  87. * To program a linear LUT we need to make the slope to be 1/16 to enable
  88. * conversion from 12bit to 8bit. Also in cases where post blend values might
  89. * cross 255, we need to cap them now to 255. The offset of the final segment
  90. * would be programmed in such a case and we set the value to 32460 which is
  91. * 255 in U8.7.
  92. */
  93. static struct mdp_ar_gc_lut_data lin_gc_data[GC_LUT_SEGMENTS] = {
  94. { 0, 256, 0}, {4095, 0, 0},
  95. {4095, 0, 0}, {4095, 0, 0},
  96. {4095, 0, 0}, {4095, 0, 0},
  97. {4095, 0, 0}, {4095, 0, 0},
  98. {4095, 0, 0}, {4095, 0, 0},
  99. {4095, 0, 0}, {4095, 0, 0},
  100. {4095, 0, 0}, {4095, 0, 0},
  101. {4095, 0, 0}, {4095, 0, 32640}
  102. };
  103. #if defined(CONFIG_MDNIE_TFT_MSM8X26) || defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL) || defined(CONFIG_MDNIE_VIDEO_ENHANCED)
  104. struct mdp_pcc_cfg_data pcc_reverse = {
  105. .block = MDP_LOGICAL_BLOCK_DISP_0,
  106. .ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_ENABLE,
  107. .r = { 0x00007ff8, 0xffff8000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  108. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  109. .g = { 0x00007ff8, 0x00000000, 0xffff8000, 0x00000000, 0x00000000, 0x00000000,
  110. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  111. .b = { 0x00007ff8, 0x00000000, 0x00000000, 0xffff8000, 0x00000000, 0x00000000,
  112. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  113. };
  114. struct mdp_pcc_cfg_data pcc_normal = {
  115. .block = MDP_LOGICAL_BLOCK_DISP_0,
  116. .ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_DISABLE,
  117. .r = { 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  118. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  119. .g = { 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000,
  120. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  121. .b = { 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000,
  122. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  123. };
  124. #endif
  125. #define CSC_MV_OFF 0x0
  126. #define CSC_BV_OFF 0x2C
  127. #define CSC_LV_OFF 0x14
  128. #define CSC_POST_OFF 0xC
  129. #define MDSS_BLOCK_DISP_NUM (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
  130. #define MDSS_MAX_MIXER_DISP_NUM (MDSS_BLOCK_DISP_NUM + \
  131. MDSS_MDP_WB_MAX_LAYERMIXER)
  132. #define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
  133. #define HIST_KICKOFF_WAIT_FRACTION 4
  134. /* hist collect state */
  135. enum {
  136. HIST_UNKNOWN,
  137. HIST_IDLE,
  138. HIST_RESET,
  139. HIST_START,
  140. HIST_READY,
  141. };
  142. static u32 dither_matrix[16] = {
  143. 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
  144. static u32 dither_depth_map[9] = {
  145. 0, 0, 0, 0, 0, 1, 2, 3, 3};
  146. static u32 igc_limited[IGC_LUT_ENTRIES] = {
  147. 16777472, 17826064, 18874656, 19923248,
  148. 19923248, 20971840, 22020432, 23069024,
  149. 24117616, 25166208, 26214800, 26214800,
  150. 27263392, 28311984, 29360576, 30409168,
  151. 31457760, 32506352, 32506352, 33554944,
  152. 34603536, 35652128, 36700720, 37749312,
  153. 38797904, 38797904, 39846496, 40895088,
  154. 41943680, 42992272, 44040864, 45089456,
  155. 45089456, 46138048, 47186640, 48235232,
  156. 49283824, 50332416, 51381008, 51381008,
  157. 52429600, 53478192, 54526784, 55575376,
  158. 56623968, 57672560, 58721152, 58721152,
  159. 59769744, 60818336, 61866928, 62915520,
  160. 63964112, 65012704, 65012704, 66061296,
  161. 67109888, 68158480, 69207072, 70255664,
  162. 71304256, 71304256, 72352848, 73401440,
  163. 74450032, 75498624, 76547216, 77595808,
  164. 77595808, 78644400, 79692992, 80741584,
  165. 81790176, 82838768, 83887360, 83887360,
  166. 84935952, 85984544, 87033136, 88081728,
  167. 89130320, 90178912, 90178912, 91227504,
  168. 92276096, 93324688, 94373280, 95421872,
  169. 96470464, 96470464, 97519056, 98567648,
  170. 99616240, 100664832, 101713424, 102762016,
  171. 102762016, 103810608, 104859200, 105907792,
  172. 106956384, 108004976, 109053568, 109053568,
  173. 110102160, 111150752, 112199344, 113247936,
  174. 114296528, 115345120, 115345120, 116393712,
  175. 117442304, 118490896, 119539488, 120588080,
  176. 121636672, 121636672, 122685264, 123733856,
  177. 124782448, 125831040, 126879632, 127928224,
  178. 127928224, 128976816, 130025408, 131074000,
  179. 132122592, 133171184, 134219776, 135268368,
  180. 135268368, 136316960, 137365552, 138414144,
  181. 139462736, 140511328, 141559920, 141559920,
  182. 142608512, 143657104, 144705696, 145754288,
  183. 146802880, 147851472, 147851472, 148900064,
  184. 149948656, 150997248, 152045840, 153094432,
  185. 154143024, 154143024, 155191616, 156240208,
  186. 157288800, 158337392, 159385984, 160434576,
  187. 160434576, 161483168, 162531760, 163580352,
  188. 164628944, 165677536, 166726128, 166726128,
  189. 167774720, 168823312, 169871904, 170920496,
  190. 171969088, 173017680, 173017680, 174066272,
  191. 175114864, 176163456, 177212048, 178260640,
  192. 179309232, 179309232, 180357824, 181406416,
  193. 182455008, 183503600, 184552192, 185600784,
  194. 185600784, 186649376, 187697968, 188746560,
  195. 189795152, 190843744, 191892336, 191892336,
  196. 192940928, 193989520, 195038112, 196086704,
  197. 197135296, 198183888, 198183888, 199232480,
  198. 200281072, 201329664, 202378256, 203426848,
  199. 204475440, 204475440, 205524032, 206572624,
  200. 207621216, 208669808, 209718400, 210766992,
  201. 211815584, 211815584, 212864176, 213912768,
  202. 214961360, 216009952, 217058544, 218107136,
  203. 218107136, 219155728, 220204320, 221252912,
  204. 222301504, 223350096, 224398688, 224398688,
  205. 225447280, 226495872, 227544464, 228593056,
  206. 229641648, 230690240, 230690240, 231738832,
  207. 232787424, 233836016, 234884608, 235933200,
  208. 236981792, 236981792, 238030384, 239078976,
  209. 240127568, 241176160, 242224752, 243273344,
  210. 243273344, 244321936, 245370528, 246419120};
  211. #define GAMUT_T0_SIZE 125
  212. #define GAMUT_T1_SIZE 100
  213. #define GAMUT_T2_SIZE 80
  214. #define GAMUT_T3_SIZE 100
  215. #define GAMUT_T4_SIZE 100
  216. #define GAMUT_T5_SIZE 80
  217. #define GAMUT_T6_SIZE 64
  218. #define GAMUT_T7_SIZE 80
  219. #define GAMUT_TOTAL_TABLE_SIZE (GAMUT_T0_SIZE + GAMUT_T1_SIZE + \
  220. GAMUT_T2_SIZE + GAMUT_T3_SIZE + GAMUT_T4_SIZE + \
  221. GAMUT_T5_SIZE + GAMUT_T6_SIZE + GAMUT_T7_SIZE)
  222. #define MDSS_MDP_PA_SIZE 0xC
  223. #define MDSS_MDP_SIX_ZONE_SIZE 0xC
  224. #define MDSS_MDP_MEM_COL_SIZE 0x3C
  225. #define MDSS_MDP_GC_SIZE 0x28
  226. #define MDSS_MDP_PCC_SIZE 0xB8
  227. #define MDSS_MDP_GAMUT_SIZE 0x5C
  228. #define MDSS_MDP_IGC_DSPP_SIZE 0x28
  229. #define MDSS_MDP_IGC_SSPP_SIZE 0x88
  230. #define MDSS_MDP_VIG_QSEED2_SHARP_SIZE 0x0C
  231. #define TOTAL_BLEND_STAGES 0x4
  232. #define PP_FLAGS_DIRTY_PA 0x1
  233. #define PP_FLAGS_DIRTY_PCC 0x2
  234. #define PP_FLAGS_DIRTY_IGC 0x4
  235. #define PP_FLAGS_DIRTY_ARGC 0x8
  236. #define PP_FLAGS_DIRTY_ENHIST 0x10
  237. #define PP_FLAGS_DIRTY_DITHER 0x20
  238. #define PP_FLAGS_DIRTY_GAMUT 0x40
  239. #define PP_FLAGS_DIRTY_HIST_COL 0x80
  240. #define PP_FLAGS_DIRTY_PGC 0x100
  241. #define PP_FLAGS_DIRTY_SHARP 0x200
  242. #define PP_SSPP 0
  243. #define PP_DSPP 1
  244. #define PP_STS_ENABLE 0x1
  245. #define PP_STS_GAMUT_FIRST 0x2
  246. #define PP_STS_PA_HUE_MASK 0x2
  247. #define PP_STS_PA_SAT_MASK 0x4
  248. #define PP_STS_PA_VAL_MASK 0x8
  249. #define PP_STS_PA_CONT_MASK 0x10
  250. #define PP_STS_PA_MEM_PROTECT_EN 0x20
  251. #define PP_STS_PA_MEM_COL_SKIN_MASK 0x40
  252. #define PP_STS_PA_MEM_COL_FOL_MASK 0x80
  253. #define PP_STS_PA_MEM_COL_SKY_MASK 0x100
  254. #define PP_STS_PA_SIX_ZONE_HUE_MASK 0x200
  255. #define PP_STS_PA_SIX_ZONE_SAT_MASK 0x400
  256. #define PP_STS_PA_SIX_ZONE_VAL_MASK 0x800
  257. #define PP_STS_PA_SAT_ZERO_EXP_EN 0x1000
  258. #define PP_AD_BAD_HW_NUM 255
  259. #define MDSS_SIDE_NONE 0
  260. #define MDSS_SIDE_LEFT 1
  261. #define MDSS_SIDE_RIGHT 2
  262. #define PP_AD_STATE_INIT 0x2
  263. #define PP_AD_STATE_CFG 0x4
  264. #define PP_AD_STATE_DATA 0x8
  265. #define PP_AD_STATE_RUN 0x10
  266. #define PP_AD_STATE_VSYNC 0x20
  267. #define PP_AD_STATE_BL_LIN 0x40
  268. #define PP_AD_STATE_IS_INITCFG(st) (((st) & PP_AD_STATE_INIT) &&\
  269. ((st) & PP_AD_STATE_CFG))
  270. #define PP_AD_STATE_IS_READY(st) (((st) & PP_AD_STATE_INIT) &&\
  271. ((st) & PP_AD_STATE_CFG) &&\
  272. ((st) & PP_AD_STATE_DATA))
  273. #define PP_AD_STS_DIRTY_INIT 0x2
  274. #define PP_AD_STS_DIRTY_CFG 0x4
  275. #define PP_AD_STS_DIRTY_DATA 0x8
  276. #define PP_AD_STS_DIRTY_VSYNC 0x10
  277. #define PP_AD_STS_DIRTY_ENABLE 0x20
  278. #define PP_AD_STS_IS_DIRTY(sts) (((sts) & PP_AD_STS_DIRTY_INIT) ||\
  279. ((sts) & PP_AD_STS_DIRTY_CFG))
  280. /* Bits 0 and 1 */
  281. #define MDSS_AD_INPUT_AMBIENT (0x03)
  282. /* Bits 3 and 7 */
  283. #define MDSS_AD_INPUT_STRENGTH (0x88)
  284. /*
  285. * Check data by shifting by mode to see if it matches to the
  286. * MDSS_AD_INPUT_* bitfields
  287. */
  288. #define MDSS_AD_MODE_DATA_MATCH(mode, data) ((1 << (mode)) & (data))
  289. #define MDSS_AD_RUNNING_AUTO_BL(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
  290. ((ad)->cfg.mode == MDSS_AD_MODE_AUTO_BL))
  291. #define MDSS_AD_RUNNING_AUTO_STR(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
  292. ((ad)->cfg.mode == MDSS_AD_MODE_AUTO_STR))
  293. #define SHARP_STRENGTH_DEFAULT 32
  294. #define SHARP_EDGE_THR_DEFAULT 112
  295. #define SHARP_SMOOTH_THR_DEFAULT 8
  296. #define SHARP_NOISE_THR_DEFAULT 2
  297. #define MDP_PP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
  298. { \
  299. .src = MSM_BUS_MASTER_SPDM, \
  300. .dst = MSM_BUS_SLAVE_IMEM_CFG, \
  301. .ab = (ab_val), \
  302. .ib = (ib_val), \
  303. }
  304. #define SZ_37_5M (37500000 * 8)
  305. static struct msm_bus_vectors mdp_pp_bus_vectors[] = {
  306. MDP_PP_BUS_VECTOR_ENTRY(0, 0),
  307. MDP_PP_BUS_VECTOR_ENTRY(0, SZ_37_5M),
  308. };
  309. static struct msm_bus_paths mdp_pp_bus_usecases[ARRAY_SIZE(mdp_pp_bus_vectors)];
  310. static struct msm_bus_scale_pdata mdp_pp_bus_scale_table = {
  311. .usecase = mdp_pp_bus_usecases,
  312. .num_usecases = ARRAY_SIZE(mdp_pp_bus_usecases),
  313. .name = "mdss_pp",
  314. };
  315. struct mdss_pp_res_type {
  316. /* logical info */
  317. u32 pp_disp_flags[MDSS_MAX_MIXER_DISP_NUM];
  318. u32 igc_lut_c0c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
  319. u32 igc_lut_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
  320. struct mdp_ar_gc_lut_data
  321. gc_lut_r[MDSS_MAX_MIXER_DISP_NUM][GC_LUT_SEGMENTS];
  322. struct mdp_ar_gc_lut_data
  323. gc_lut_g[MDSS_MAX_MIXER_DISP_NUM][GC_LUT_SEGMENTS];
  324. struct mdp_ar_gc_lut_data
  325. gc_lut_b[MDSS_MAX_MIXER_DISP_NUM][GC_LUT_SEGMENTS];
  326. u32 enhist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
  327. struct mdp_pa_cfg pa_disp_cfg[MDSS_BLOCK_DISP_NUM];
  328. struct mdp_pa_v2_data pa_v2_disp_cfg[MDSS_BLOCK_DISP_NUM];
  329. u32 six_zone_lut_curve_p0[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
  330. u32 six_zone_lut_curve_p1[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
  331. struct mdp_pcc_cfg_data pcc_disp_cfg[MDSS_BLOCK_DISP_NUM];
  332. struct mdp_igc_lut_data igc_disp_cfg[MDSS_BLOCK_DISP_NUM];
  333. struct mdp_pgc_lut_data argc_disp_cfg[MDSS_MAX_MIXER_DISP_NUM];
  334. struct mdp_pgc_lut_data pgc_disp_cfg[MDSS_BLOCK_DISP_NUM];
  335. struct mdp_hist_lut_data enhist_disp_cfg[MDSS_BLOCK_DISP_NUM];
  336. struct mdp_dither_cfg_data dither_disp_cfg[MDSS_BLOCK_DISP_NUM];
  337. struct mdp_gamut_cfg_data gamut_disp_cfg[MDSS_BLOCK_DISP_NUM];
  338. uint16_t gamut_tbl[MDSS_BLOCK_DISP_NUM][GAMUT_TOTAL_TABLE_SIZE * 3];
  339. u32 hist_data[MDSS_BLOCK_DISP_NUM][HIST_V_SIZE];
  340. struct pp_sts_type pp_disp_sts[MDSS_MAX_MIXER_DISP_NUM];
  341. /* physical info */
  342. struct pp_hist_col_info dspp_hist[MDSS_MDP_MAX_DSPP];
  343. };
  344. static DEFINE_MUTEX(mdss_pp_mutex);
  345. static struct mdss_pp_res_type *mdss_pp_res;
  346. static u32 pp_hist_read(char __iomem *v_addr,
  347. struct pp_hist_col_info *hist_info);
  348. static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix);
  349. static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
  350. u32 done_bit, char __iomem *ctl_base);
  351. static void pp_update_pcc_regs(char __iomem *addr,
  352. struct mdp_pcc_cfg_data *cfg_ptr);
  353. static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
  354. char __iomem *addr, u32 blk_idx);
  355. static void pp_update_gc_one_lut(char __iomem *addr,
  356. struct mdp_ar_gc_lut_data *lut_data,
  357. uint8_t num_stages);
  358. static void pp_update_argc_lut(char __iomem *addr,
  359. struct mdp_pgc_lut_data *config);
  360. static void pp_update_hist_lut(char __iomem *base,
  361. struct mdp_hist_lut_data *cfg);
  362. static int pp_gm_has_invalid_lut_size(struct mdp_gamut_cfg_data *config);
  363. static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
  364. char __iomem *base, struct pp_sts_type *pp_sts);
  365. static void pp_pa_config(unsigned long flags, char __iomem *addr,
  366. struct pp_sts_type *pp_sts,
  367. struct mdp_pa_cfg *pa_config);
  368. static void pp_pa_v2_config(unsigned long flags, char __iomem *addr,
  369. struct pp_sts_type *pp_sts,
  370. struct mdp_pa_v2_data *pa_v2_config,
  371. int mdp_location);
  372. static void pp_pcc_config(unsigned long flags, char __iomem *addr,
  373. struct pp_sts_type *pp_sts,
  374. struct mdp_pcc_cfg_data *pcc_config);
  375. static void pp_igc_config(unsigned long flags, char __iomem *addr,
  376. struct pp_sts_type *pp_sts,
  377. struct mdp_igc_lut_data *igc_config,
  378. u32 pipe_num);
  379. static void pp_enhist_config(unsigned long flags, char __iomem *addr,
  380. struct pp_sts_type *pp_sts,
  381. struct mdp_hist_lut_data *enhist_cfg);
  382. static void pp_dither_config(char __iomem *addr,
  383. struct pp_sts_type *pp_sts,
  384. struct mdp_dither_cfg_data *dither_cfg);
  385. static void pp_dspp_opmode_config(struct mdss_mdp_ctl *ctl, u32 num,
  386. struct pp_sts_type *pp_sts, int mdp_rev,
  387. u32 *opmode);
  388. static void pp_sharp_config(char __iomem *addr,
  389. struct pp_sts_type *pp_sts,
  390. struct mdp_sharp_cfg *sharp_config);
  391. static void pp_update_pa_v2_vig_opmode(struct pp_sts_type *pp_sts,
  392. u32 *opmode);
  393. static int pp_copy_pa_six_zone_lut(struct mdp_pa_v2_cfg_data *pa_v2_config,
  394. u32 disp_num);
  395. static void pp_update_pa_v2_global_adj_regs(char __iomem *addr,
  396. struct mdp_pa_v2_data *pa_config);
  397. static void pp_update_pa_v2_mem_col(char __iomem *addr,
  398. struct mdp_pa_v2_data *pa_v2_config);
  399. static void pp_update_pa_v2_mem_col_regs(char __iomem *addr,
  400. struct mdp_pa_mem_col_cfg *cfg);
  401. static void pp_update_pa_v2_six_zone_regs(char __iomem *addr,
  402. struct mdp_pa_v2_data *pa_v2_config);
  403. static void pp_update_pa_v2_sts(struct pp_sts_type *pp_sts,
  404. struct mdp_pa_v2_data *pa_v2_config);
  405. static int pp_read_pa_v2_regs(char __iomem *addr,
  406. struct mdp_pa_v2_data *pa_v2_config,
  407. u32 disp_num);
  408. static void pp_read_pa_mem_col_regs(char __iomem *addr,
  409. struct mdp_pa_mem_col_cfg *mem_col_cfg);
  410. static int mdss_ad_init_checks(struct msm_fb_data_type *mfd);
  411. static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
  412. struct mdss_ad_info **ad);
  413. static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd);
  414. static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t);
  415. static void pp_ad_cfg_write(struct mdss_mdp_ad *ad_hw,
  416. struct mdss_ad_info *ad);
  417. static void pp_ad_init_write(struct mdss_mdp_ad *ad_hw,
  418. struct mdss_ad_info *ad, struct mdss_mdp_ctl *ctl);
  419. static void pp_ad_input_write(struct mdss_mdp_ad *ad_hw,
  420. struct mdss_ad_info *ad);
  421. static int pp_ad_setup_hw_nums(struct msm_fb_data_type *mfd,
  422. struct mdss_ad_info *ad);
  423. static void pp_ad_bypass_config(struct mdss_ad_info *ad,
  424. struct mdss_mdp_ctl *ctl, u32 num, u32 *opmode);
  425. static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd);
  426. static void pp_ad_cfg_lut(char __iomem *addr, u32 *data);
  427. static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out);
  428. static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
  429. int inv);
  430. static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
  431. bool *bl_out_notify);
  432. static struct msm_fb_data_type *mdss_get_mfd_from_index(int index);
  433. static int pp_ad_shutdown_cleanup(struct msm_fb_data_type *mfd);
  434. static int pp_num_to_side(struct mdss_mdp_ctl *ctl, u32 num);
  435. static inline bool pp_sts_is_enabled(u32 sts, int side);
  436. static inline void pp_sts_set_split_bits(u32 *sts, u32 bits);
  437. static u32 last_sts, last_state;
  438. inline int linear_map(int in, int *out, int in_max, int out_max)
  439. {
  440. if (in < 0 || !out || in_max <= 0 || out_max <= 0)
  441. return -EINVAL;
  442. *out = ((in * out_max) / in_max);
  443. pr_debug("in = %d, out = %d, in_max = %d, out_max = %d\n",
  444. in, *out, in_max, out_max);
  445. if ((in > 0) && (*out == 0))
  446. *out = 1;
  447. return 0;
  448. }
  449. int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
  450. struct mdp_csc_cfg *data)
  451. {
  452. int i, ret = 0;
  453. char __iomem *base, *addr;
  454. u32 val = 0;
  455. struct mdss_data_type *mdata;
  456. struct mdss_mdp_pipe *pipe;
  457. struct mdss_mdp_ctl *ctl;
  458. if (data == NULL) {
  459. pr_err("no csc matrix specified\n");
  460. return -EINVAL;
  461. }
  462. mdata = mdss_mdp_get_mdata();
  463. switch (block) {
  464. case MDSS_MDP_BLOCK_SSPP:
  465. if (blk_idx < mdata->nvig_pipes) {
  466. pipe = mdata->vig_pipes + blk_idx;
  467. base = pipe->base;
  468. if (tbl_idx == 1)
  469. base += MDSS_MDP_REG_VIG_CSC_1_BASE;
  470. else
  471. base += MDSS_MDP_REG_VIG_CSC_0_BASE;
  472. } else {
  473. ret = -EINVAL;
  474. }
  475. break;
  476. case MDSS_MDP_BLOCK_WB:
  477. if (blk_idx < mdata->nctl) {
  478. ctl = mdata->ctl_off + blk_idx;
  479. base = ctl->wb_base + MDSS_MDP_REG_WB_CSC_BASE;
  480. } else {
  481. ret = -EINVAL;
  482. }
  483. break;
  484. default:
  485. ret = -EINVAL;
  486. break;
  487. }
  488. if (ret != 0) {
  489. pr_err("unsupported block id for csc\n");
  490. return ret;
  491. }
  492. addr = base + CSC_MV_OFF;
  493. for (i = 0; i < 9; i++) {
  494. if (i & 0x1) {
  495. val |= data->csc_mv[i] << 16;
  496. writel_relaxed(val, addr);
  497. addr += sizeof(u32 *);
  498. } else {
  499. val = data->csc_mv[i];
  500. }
  501. }
  502. writel_relaxed(val, addr); /* COEFF_33 */
  503. addr = base + CSC_BV_OFF;
  504. for (i = 0; i < 3; i++) {
  505. writel_relaxed(data->csc_pre_bv[i], addr);
  506. writel_relaxed(data->csc_post_bv[i], addr + CSC_POST_OFF);
  507. addr += sizeof(u32 *);
  508. }
  509. addr = base + CSC_LV_OFF;
  510. for (i = 0; i < 6; i += 2) {
  511. val = (data->csc_pre_lv[i] << 8) | data->csc_pre_lv[i+1];
  512. writel_relaxed(val, addr);
  513. val = (data->csc_post_lv[i] << 8) | data->csc_post_lv[i+1];
  514. writel_relaxed(val, addr + CSC_POST_OFF);
  515. addr += sizeof(u32 *);
  516. }
  517. return ret;
  518. }
  519. int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type)
  520. {
  521. struct mdp_csc_cfg *data;
  522. if (csc_type >= MDSS_MDP_MAX_CSC) {
  523. pr_err("invalid csc matrix index %d\n", csc_type);
  524. return -ERANGE;
  525. }
  526. pr_debug("csc type=%d blk=%d idx=%d tbl=%d\n", csc_type,
  527. block, blk_idx, tbl_idx);
  528. #ifdef CONFIG_FB_MSM_CAMERA_CSC
  529. if (csc_type == MDSS_MDP_CSC_YUV2RGB && !csc_update)
  530. {
  531. data = &mdp_csc_convert_wideband;
  532. pr_debug("will do mdp_csc_convert (wide band)\n");
  533. }
  534. else
  535. {
  536. data = &mdp_csc_convert[csc_type];
  537. pr_debug("will do mdp_csc_convert (narrow band)\n");
  538. }
  539. #else
  540. data = &mdp_csc_convert[csc_type];
  541. #endif
  542. return mdss_mdp_csc_setup_data(block, blk_idx, tbl_idx, data);
  543. }
  544. static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
  545. char __iomem *base, struct pp_sts_type *pp_sts)
  546. {
  547. char __iomem *addr;
  548. int i, j;
  549. if (gamut_cfg->flags & MDP_PP_OPS_WRITE) {
  550. addr = base + MDSS_MDP_REG_DSPP_GAMUT_BASE;
  551. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  552. for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
  553. writel_relaxed((u32)gamut_cfg->r_tbl[i][j],
  554. addr);
  555. addr += 4;
  556. }
  557. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  558. for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
  559. writel_relaxed((u32)gamut_cfg->g_tbl[i][j],
  560. addr);
  561. addr += 4;
  562. }
  563. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  564. for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
  565. writel_relaxed((u32)gamut_cfg->b_tbl[i][j],
  566. addr);
  567. addr += 4;
  568. }
  569. if (gamut_cfg->gamut_first)
  570. pp_sts->gamut_sts |= PP_STS_GAMUT_FIRST;
  571. }
  572. if (gamut_cfg->flags & MDP_PP_OPS_DISABLE)
  573. pp_sts->gamut_sts &= ~PP_STS_ENABLE;
  574. else if (gamut_cfg->flags & MDP_PP_OPS_ENABLE)
  575. pp_sts->gamut_sts |= PP_STS_ENABLE;
  576. pp_sts_set_split_bits(&pp_sts->gamut_sts, gamut_cfg->flags);
  577. }
  578. static void pp_pa_config(unsigned long flags, char __iomem *addr,
  579. struct pp_sts_type *pp_sts,
  580. struct mdp_pa_cfg *pa_config)
  581. {
  582. if (flags & PP_FLAGS_DIRTY_PA) {
  583. if (pa_config->flags & MDP_PP_OPS_WRITE) {
  584. writel_relaxed(pa_config->hue_adj, addr);
  585. addr += 4;
  586. writel_relaxed(pa_config->sat_adj, addr);
  587. addr += 4;
  588. writel_relaxed(pa_config->val_adj, addr);
  589. addr += 4;
  590. writel_relaxed(pa_config->cont_adj, addr);
  591. }
  592. if (pa_config->flags & MDP_PP_OPS_DISABLE)
  593. pp_sts->pa_sts &= ~PP_STS_ENABLE;
  594. else if (pa_config->flags & MDP_PP_OPS_ENABLE)
  595. pp_sts->pa_sts |= PP_STS_ENABLE;
  596. }
  597. }
  598. static void pp_pa_v2_config(unsigned long flags, char __iomem *addr,
  599. struct pp_sts_type *pp_sts,
  600. struct mdp_pa_v2_data *pa_v2_config,
  601. int mdp_location)
  602. {
  603. if ((flags & PP_FLAGS_DIRTY_PA) &&
  604. (pa_v2_config->flags & MDP_PP_OPS_WRITE)) {
  605. pp_update_pa_v2_global_adj_regs(addr,
  606. pa_v2_config);
  607. /* Update PA DSPP Regs */
  608. if (mdp_location == PP_DSPP) {
  609. addr += 0x10;
  610. pp_update_pa_v2_six_zone_regs(addr, pa_v2_config);
  611. addr += 0xC;
  612. pp_update_pa_v2_mem_col(addr, pa_v2_config);
  613. } else if (mdp_location == PP_SSPP) { /* Update PA SSPP Regs */
  614. addr -= MDSS_MDP_REG_VIG_PA_BASE;
  615. addr += MDSS_MDP_REG_VIG_MEM_COL_BASE;
  616. pp_update_pa_v2_mem_col(addr, pa_v2_config);
  617. }
  618. pp_update_pa_v2_sts(pp_sts, pa_v2_config);
  619. }
  620. }
  621. static void pp_update_pa_v2_global_adj_regs(char __iomem *addr,
  622. struct mdp_pa_v2_data *pa_v2_config)
  623. {
  624. if (pa_v2_config->flags & MDP_PP_PA_HUE_ENABLE)
  625. writel_relaxed(pa_v2_config->global_hue_adj, addr);
  626. addr += 4;
  627. if (pa_v2_config->flags & MDP_PP_PA_SAT_ENABLE)
  628. /* Sat Global Adjust reg includes Sat Threshold */
  629. writel_relaxed(pa_v2_config->global_sat_adj, addr);
  630. addr += 4;
  631. if (pa_v2_config->flags & MDP_PP_PA_VAL_ENABLE)
  632. writel_relaxed(pa_v2_config->global_val_adj, addr);
  633. addr += 4;
  634. if (pa_v2_config->flags & MDP_PP_PA_CONT_ENABLE)
  635. writel_relaxed(pa_v2_config->global_cont_adj, addr);
  636. }
  637. static void pp_update_pa_v2_mem_col(char __iomem *addr,
  638. struct mdp_pa_v2_data *pa_v2_config)
  639. {
  640. /* Update skin zone memory color registers */
  641. if (pa_v2_config->flags & MDP_PP_PA_SKIN_ENABLE)
  642. pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->skin_cfg);
  643. addr += 0x14;
  644. /* Update sky zone memory color registers */
  645. if (pa_v2_config->flags & MDP_PP_PA_SKY_ENABLE)
  646. pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->sky_cfg);
  647. addr += 0x14;
  648. /* Update foliage zone memory color registers */
  649. if (pa_v2_config->flags & MDP_PP_PA_FOL_ENABLE)
  650. pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->fol_cfg);
  651. }
  652. static void pp_update_pa_v2_mem_col_regs(char __iomem *addr,
  653. struct mdp_pa_mem_col_cfg *cfg)
  654. {
  655. pr_debug("ADDR: 0x%x, P0: 0x%x\n", (u32)addr, cfg->color_adjust_p0);
  656. writel_relaxed(cfg->color_adjust_p0, addr);
  657. addr += 4;
  658. pr_debug("ADDR: 0x%x, P1: 0x%x\n", (u32)addr, cfg->color_adjust_p1);
  659. writel_relaxed(cfg->color_adjust_p1, addr);
  660. addr += 4;
  661. pr_debug("ADDR: 0x%x, HUE REGION: 0x%x\n", (u32)addr, cfg->hue_region);
  662. writel_relaxed(cfg->hue_region, addr);
  663. addr += 4;
  664. pr_debug("ADDR: 0x%x, SAT REGION: 0x%x\n", (u32)addr, cfg->sat_region);
  665. writel_relaxed(cfg->sat_region, addr);
  666. addr += 4;
  667. pr_debug("ADDR: 0x%x, VAL REGION: 0x%x\n", (u32)addr, cfg->val_region);
  668. writel_relaxed(cfg->val_region, addr);
  669. }
  670. static void pp_update_pa_v2_six_zone_regs(char __iomem *addr,
  671. struct mdp_pa_v2_data *pa_v2_config)
  672. {
  673. int i;
  674. u32 data;
  675. /* Update six zone memory color registers */
  676. if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
  677. addr += 4;
  678. writel_relaxed(pa_v2_config->six_zone_curve_p1[0], addr);
  679. addr -= 4;
  680. /* Index Update to trigger auto-incrementing LUT accesses */
  681. data = (1 << 26);
  682. writel_relaxed((pa_v2_config->six_zone_curve_p0[0] & 0xFFF) |
  683. data, addr);
  684. /* Remove Index Update */
  685. for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
  686. addr += 4;
  687. writel_relaxed(pa_v2_config->six_zone_curve_p1[i],
  688. addr);
  689. addr -= 4;
  690. writel_relaxed(pa_v2_config->six_zone_curve_p0[i] &
  691. 0xFFF, addr);
  692. }
  693. addr += 8;
  694. writel_relaxed(pa_v2_config->six_zone_thresh, addr);
  695. }
  696. }
  697. static void pp_update_pa_v2_sts(struct pp_sts_type *pp_sts,
  698. struct mdp_pa_v2_data *pa_v2_config)
  699. {
  700. pp_sts->pa_sts = 0;
  701. /* PA STS update */
  702. if (pa_v2_config->flags & MDP_PP_OPS_ENABLE)
  703. pp_sts->pa_sts |= PP_STS_ENABLE;
  704. else
  705. pp_sts->pa_sts &= ~PP_STS_ENABLE;
  706. /* Global HSV STS update */
  707. if (pa_v2_config->flags & MDP_PP_PA_HUE_MASK)
  708. pp_sts->pa_sts |= PP_STS_PA_HUE_MASK;
  709. if (pa_v2_config->flags & MDP_PP_PA_SAT_MASK)
  710. pp_sts->pa_sts |= PP_STS_PA_SAT_MASK;
  711. if (pa_v2_config->flags & MDP_PP_PA_VAL_MASK)
  712. pp_sts->pa_sts |= PP_STS_PA_VAL_MASK;
  713. if (pa_v2_config->flags & MDP_PP_PA_CONT_MASK)
  714. pp_sts->pa_sts |= PP_STS_PA_CONT_MASK;
  715. if (pa_v2_config->flags & MDP_PP_PA_MEM_PROTECT_EN)
  716. pp_sts->pa_sts |= PP_STS_PA_MEM_PROTECT_EN;
  717. if (pa_v2_config->flags & MDP_PP_PA_SAT_ZERO_EXP_EN)
  718. pp_sts->pa_sts |= PP_STS_PA_SAT_ZERO_EXP_EN;
  719. /* Memory Color STS update */
  720. if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_SKIN_MASK)
  721. pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKIN_MASK;
  722. if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_SKY_MASK)
  723. pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKY_MASK;
  724. if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_FOL_MASK)
  725. pp_sts->pa_sts |= PP_STS_PA_MEM_COL_FOL_MASK;
  726. /* Six Zone STS update */
  727. if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_HUE_MASK)
  728. pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_HUE_MASK;
  729. if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_SAT_MASK)
  730. pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_SAT_MASK;
  731. if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_VAL_MASK)
  732. pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_VAL_MASK;
  733. pp_sts_set_split_bits(&pp_sts->pa_sts, pa_v2_config->flags);
  734. }
  735. static void pp_pcc_config(unsigned long flags, char __iomem *addr,
  736. struct pp_sts_type *pp_sts,
  737. struct mdp_pcc_cfg_data *pcc_config)
  738. {
  739. if (flags & PP_FLAGS_DIRTY_PCC) {
  740. if (pcc_config->ops & MDP_PP_OPS_WRITE)
  741. pp_update_pcc_regs(addr, pcc_config);
  742. if (pcc_config->ops & MDP_PP_OPS_DISABLE)
  743. pp_sts->pcc_sts &= ~PP_STS_ENABLE;
  744. else if (pcc_config->ops & MDP_PP_OPS_ENABLE)
  745. pp_sts->pcc_sts |= PP_STS_ENABLE;
  746. pp_sts_set_split_bits(&pp_sts->pcc_sts, pcc_config->ops);
  747. }
  748. }
  749. static void pp_igc_config(unsigned long flags, char __iomem *addr,
  750. struct pp_sts_type *pp_sts,
  751. struct mdp_igc_lut_data *igc_config,
  752. u32 pipe_num)
  753. {
  754. u32 tbl_idx;
  755. if (flags & PP_FLAGS_DIRTY_IGC) {
  756. if (igc_config->ops & MDP_PP_OPS_WRITE)
  757. pp_update_igc_lut(igc_config, addr, pipe_num);
  758. if (igc_config->ops & MDP_PP_IGC_FLAG_ROM0) {
  759. pp_sts->pcc_sts |= PP_STS_ENABLE;
  760. tbl_idx = 1;
  761. } else if (igc_config->ops & MDP_PP_IGC_FLAG_ROM1) {
  762. pp_sts->pcc_sts |= PP_STS_ENABLE;
  763. tbl_idx = 2;
  764. } else {
  765. tbl_idx = 0;
  766. }
  767. pp_sts->igc_tbl_idx = tbl_idx;
  768. if (igc_config->ops & MDP_PP_OPS_DISABLE)
  769. pp_sts->igc_sts &= ~PP_STS_ENABLE;
  770. else if (igc_config->ops & MDP_PP_OPS_ENABLE)
  771. pp_sts->igc_sts |= PP_STS_ENABLE;
  772. pp_sts_set_split_bits(&pp_sts->igc_sts, igc_config->ops);
  773. }
  774. }
  775. static void pp_enhist_config(unsigned long flags, char __iomem *addr,
  776. struct pp_sts_type *pp_sts,
  777. struct mdp_hist_lut_data *enhist_cfg)
  778. {
  779. if (flags & PP_FLAGS_DIRTY_ENHIST) {
  780. if (enhist_cfg->ops & MDP_PP_OPS_WRITE)
  781. pp_update_hist_lut(addr, enhist_cfg);
  782. if (enhist_cfg->ops & MDP_PP_OPS_DISABLE)
  783. pp_sts->enhist_sts &= ~PP_STS_ENABLE;
  784. else if (enhist_cfg->ops & MDP_PP_OPS_ENABLE)
  785. pp_sts->enhist_sts |= PP_STS_ENABLE;
  786. }
  787. }
  788. /*the below function doesn't do error checking on the input params*/
  789. static void pp_sharp_config(char __iomem *addr,
  790. struct pp_sts_type *pp_sts,
  791. struct mdp_sharp_cfg *sharp_config)
  792. {
  793. if (sharp_config->flags & MDP_PP_OPS_WRITE) {
  794. writel_relaxed(sharp_config->strength, addr);
  795. addr += 4;
  796. writel_relaxed(sharp_config->edge_thr, addr);
  797. addr += 4;
  798. writel_relaxed(sharp_config->smooth_thr, addr);
  799. addr += 4;
  800. writel_relaxed(sharp_config->noise_thr, addr);
  801. }
  802. if (sharp_config->flags & MDP_PP_OPS_DISABLE)
  803. pp_sts->sharp_sts &= ~PP_STS_ENABLE;
  804. else if (sharp_config->flags & MDP_PP_OPS_ENABLE)
  805. pp_sts->sharp_sts |= PP_STS_ENABLE;
  806. }
  807. static int pp_vig_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
  808. {
  809. u32 opmode = 0;
  810. unsigned long flags = 0;
  811. char __iomem *offset;
  812. struct mdss_data_type *mdata;
  813. u32 current_opmode;
  814. u32 csc_reset;
  815. u32 dcm_state = DCM_UNINIT;
  816. pr_debug("pnum=%x\n", pipe->num);
  817. if (pipe->mixer && pipe->mixer->ctl && pipe->mixer->ctl->mfd)
  818. dcm_state = pipe->mixer->ctl->mfd->dcm_state;
  819. mdata = mdss_mdp_get_mdata();
  820. if ((pipe->flags & MDP_OVERLAY_PP_CFG_EN) &&
  821. (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG)) {
  822. opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
  823. MDP_CSC_FLAG_ENABLE) << 17;
  824. opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
  825. MDP_CSC_FLAG_YUV_IN) << 18;
  826. opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
  827. MDP_CSC_FLAG_YUV_OUT) << 19;
  828. /*
  829. * TODO: Allow pipe to be programmed whenever new CSC is
  830. * applied (i.e. dirty bit)
  831. */
  832. mdss_mdp_csc_setup_data(MDSS_MDP_BLOCK_SSPP, pipe->num,
  833. 1, &pipe->pp_cfg.csc_cfg);
  834. } else {
  835. if (pipe->src_fmt->is_yuv) {
  836. opmode |= (0 << 19) | /* DST_DATA=RGB */
  837. (1 << 18) | /* SRC_DATA=YCBCR */
  838. (1 << 17); /* CSC_1_EN */
  839. /*
  840. * TODO: Needs to be part of dirty bit logic: if there
  841. * is a previously configured pipe need to re-configure
  842. * CSC matrix
  843. */
  844. mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
  845. MDSS_MDP_CSC_YUV2RGB);
  846. }
  847. }
  848. pp_histogram_setup(&opmode, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer);
  849. /* Update CSC state only if tuning mode is enable */
  850. if (dcm_state == DTM_ENTER) {
  851. /* Reset bit 16 to 19 for CSC_STATE in VIG_OP_MODE */
  852. csc_reset = 0xFFF0FFFF;
  853. current_opmode = readl_relaxed(pipe->base +
  854. MDSS_MDP_REG_VIG_OP_MODE);
  855. *op |= ((current_opmode & csc_reset) | opmode);
  856. return 0;
  857. }
  858. if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
  859. if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) &&
  860. (mdata->mdp_rev < MDSS_MDP_HW_REV_103)) {
  861. flags = PP_FLAGS_DIRTY_PA;
  862. pp_pa_config(flags,
  863. pipe->base + MDSS_MDP_REG_VIG_PA_BASE,
  864. &pipe->pp_res.pp_sts,
  865. &pipe->pp_cfg.pa_cfg);
  866. if (pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)
  867. opmode |= MDSS_MDP_VIG_OP_PA_EN;
  868. }
  869. if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_V2_CFG) &&
  870. (mdata->mdp_rev >= MDSS_MDP_HW_REV_103)) {
  871. flags = PP_FLAGS_DIRTY_PA;
  872. pp_pa_v2_config(flags,
  873. pipe->base + MDSS_MDP_REG_VIG_PA_BASE,
  874. &pipe->pp_res.pp_sts,
  875. &pipe->pp_cfg.pa_v2_cfg,
  876. PP_SSPP);
  877. pp_update_pa_v2_vig_opmode(&pipe->pp_res.pp_sts,
  878. &opmode);
  879. if (pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)
  880. opmode |= MDSS_MDP_VIG_OP_PA_EN;
  881. }
  882. if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
  883. pp_enhist_config(PP_FLAGS_DIRTY_ENHIST,
  884. pipe->base + MDSS_MDP_REG_VIG_HIST_LUT_BASE,
  885. &pipe->pp_res.pp_sts,
  886. &pipe->pp_cfg.hist_lut_cfg);
  887. }
  888. }
  889. if (pipe->pp_res.pp_sts.enhist_sts & PP_STS_ENABLE) {
  890. /* Enable HistLUT and PA */
  891. opmode |= BIT(10) | BIT(4);
  892. if (!(pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)) {
  893. /* Program default value */
  894. offset = pipe->base + MDSS_MDP_REG_VIG_PA_BASE;
  895. writel_relaxed(0, offset);
  896. writel_relaxed(0, offset + 4);
  897. writel_relaxed(0, offset + 8);
  898. writel_relaxed(0, offset + 12);
  899. }
  900. }
  901. *op |= opmode;
  902. return 0;
  903. }
  904. static void pp_update_pa_v2_vig_opmode(struct pp_sts_type *pp_sts,
  905. u32 *opmode)
  906. {
  907. if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
  908. *opmode |= MDSS_MDP_VIG_OP_PA_HUE_MASK;
  909. if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
  910. *opmode |= MDSS_MDP_VIG_OP_PA_SAT_MASK;
  911. if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
  912. *opmode |= MDSS_MDP_VIG_OP_PA_VAL_MASK;
  913. if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
  914. *opmode |= MDSS_MDP_VIG_OP_PA_CONT_MASK;
  915. if (pp_sts->pa_sts & PP_STS_PA_MEM_PROTECT_EN)
  916. *opmode |= MDSS_MDP_VIG_OP_PA_MEM_PROTECT_EN;
  917. if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
  918. *opmode |= MDSS_MDP_VIG_OP_PA_SAT_ZERO_EXP_EN;
  919. if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
  920. *opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_SKIN_MASK;
  921. if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
  922. *opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_SKY_MASK;
  923. if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
  924. *opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_FOL_MASK;
  925. }
  926. static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe)
  927. {
  928. u32 scale_config = 0;
  929. int init_phasex = 0, init_phasey = 0;
  930. int phasex_step = 0, phasey_step = 0;
  931. u32 chroma_sample;
  932. u32 filter_mode;
  933. struct mdss_data_type *mdata;
  934. u32 src_w, src_h;
  935. u32 dcm_state = DCM_UNINIT;
  936. u32 chroma_shift_x = 0, chroma_shift_y = 0;
  937. pr_debug("pipe=%d, change pxl ext=%d\n", pipe->num,
  938. pipe->scale.enable_pxl_ext);
  939. mdata = mdss_mdp_get_mdata();
  940. if (pipe->mixer && pipe->mixer->ctl && pipe->mixer->ctl->mfd)
  941. dcm_state = pipe->mixer->ctl->mfd->dcm_state;
  942. if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
  943. filter_mode = MDSS_MDP_SCALE_FILTER_CA;
  944. else
  945. filter_mode = MDSS_MDP_SCALE_FILTER_BIL;
  946. if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA) {
  947. if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
  948. pr_err("no scaling supported on dma pipe\n");
  949. return -EINVAL;
  950. } else {
  951. return 0;
  952. }
  953. }
  954. src_w = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
  955. src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
  956. chroma_sample = pipe->src_fmt->chroma_sample;
  957. if (pipe->flags & MDP_SOURCE_ROTATED_90) {
  958. if (chroma_sample == MDSS_MDP_CHROMA_H1V2)
  959. chroma_sample = MDSS_MDP_CHROMA_H2V1;
  960. else if (chroma_sample == MDSS_MDP_CHROMA_H2V1)
  961. chroma_sample = MDSS_MDP_CHROMA_H1V2;
  962. }
  963. if (!(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_SHARP_CFG)) {
  964. pipe->pp_cfg.sharp_cfg.flags = MDP_PP_OPS_ENABLE |
  965. MDP_PP_OPS_WRITE;
  966. pipe->pp_cfg.sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
  967. pipe->pp_cfg.sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
  968. pipe->pp_cfg.sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
  969. pipe->pp_cfg.sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
  970. }
  971. if (dcm_state != DTM_ENTER &&
  972. ((pipe->src_fmt->is_yuv) &&
  973. !((pipe->dst.w < src_w) || (pipe->dst.h < src_h)))) {
  974. pp_sharp_config(pipe->base +
  975. MDSS_MDP_REG_VIG_QSEED2_SHARP,
  976. &pipe->pp_res.pp_sts,
  977. &pipe->pp_cfg.sharp_cfg);
  978. }
  979. if ((src_h != pipe->dst.h) ||
  980. (pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
  981. (chroma_sample == MDSS_MDP_CHROMA_420) ||
  982. (chroma_sample == MDSS_MDP_CHROMA_H1V2) ||
  983. (pipe->scale.enable_pxl_ext && (src_h != pipe->dst.h))) {
  984. pr_debug("scale y - src_h=%d dst_h=%d\n", src_h, pipe->dst.h);
  985. if ((src_h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
  986. pr_err("too much downscaling height=%d->%d",
  987. src_h, pipe->dst.h);
  988. return -EINVAL;
  989. }
  990. scale_config |= MDSS_MDP_SCALEY_EN;
  991. phasey_step = pipe->scale.phase_step_y[0];
  992. init_phasey = pipe->scale.init_phase_y[0];
  993. if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
  994. if (!pipe->vert_deci &&
  995. ((chroma_sample == MDSS_MDP_CHROMA_420) ||
  996. (chroma_sample == MDSS_MDP_CHROMA_H1V2)))
  997. chroma_shift_y = 1; /* 2x upsample chroma */
  998. if (src_h <= pipe->dst.h) {
  999. scale_config |= /* G/Y, A */
  1000. (filter_mode << 10) |
  1001. (MDSS_MDP_SCALE_FILTER_BIL << 18);
  1002. } else
  1003. scale_config |= /* G/Y, A */
  1004. (MDSS_MDP_SCALE_FILTER_PCMN << 10) |
  1005. (MDSS_MDP_SCALE_FILTER_PCMN << 18);
  1006. if ((src_h >> chroma_shift_y) <= pipe->dst.h)
  1007. scale_config |= /* CrCb */
  1008. (MDSS_MDP_SCALE_FILTER_BIL << 14);
  1009. else
  1010. scale_config |= /* CrCb */
  1011. (MDSS_MDP_SCALE_FILTER_PCMN << 14);
  1012. writel_relaxed(init_phasey, pipe->base +
  1013. MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
  1014. writel_relaxed(phasey_step >> chroma_shift_y,
  1015. pipe->base +
  1016. MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
  1017. } else {
  1018. if (src_h <= pipe->dst.h)
  1019. scale_config |= /* RGB, A */
  1020. (MDSS_MDP_SCALE_FILTER_BIL << 10) |
  1021. (MDSS_MDP_SCALE_FILTER_BIL << 18);
  1022. else
  1023. scale_config |= /* RGB, A */
  1024. (MDSS_MDP_SCALE_FILTER_PCMN << 10) |
  1025. (MDSS_MDP_SCALE_FILTER_PCMN << 18);
  1026. }
  1027. }
  1028. if ((src_w != pipe->dst.w) ||
  1029. (pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
  1030. (chroma_sample == MDSS_MDP_CHROMA_420) ||
  1031. (chroma_sample == MDSS_MDP_CHROMA_H2V1) ||
  1032. (pipe->scale.enable_pxl_ext && (src_w != pipe->dst.w))) {
  1033. pr_debug("scale x - src_w=%d dst_w=%d\n", src_w, pipe->dst.w);
  1034. if ((src_w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
  1035. pr_err("too much downscaling width=%d->%d",
  1036. src_w, pipe->dst.w);
  1037. return -EINVAL;
  1038. }
  1039. scale_config |= MDSS_MDP_SCALEX_EN;
  1040. init_phasex = pipe->scale.init_phase_x[0];
  1041. phasex_step = pipe->scale.phase_step_x[0];
  1042. if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
  1043. if (!pipe->horz_deci &&
  1044. ((chroma_sample == MDSS_MDP_CHROMA_420) ||
  1045. (chroma_sample == MDSS_MDP_CHROMA_H2V1)))
  1046. chroma_shift_x = 1; /* 2x upsample chroma */
  1047. if (src_w <= pipe->dst.w)
  1048. scale_config |= /* G/Y, A */
  1049. (filter_mode << 8) |
  1050. (MDSS_MDP_SCALE_FILTER_BIL << 16);
  1051. else
  1052. scale_config |= /* G/Y, A */
  1053. (MDSS_MDP_SCALE_FILTER_PCMN << 8) |
  1054. (MDSS_MDP_SCALE_FILTER_PCMN << 16);
  1055. if ((src_w >> chroma_shift_x) <= pipe->dst.w)
  1056. scale_config |= /* CrCb */
  1057. (MDSS_MDP_SCALE_FILTER_BIL << 12);
  1058. else
  1059. scale_config |= /* CrCb */
  1060. (MDSS_MDP_SCALE_FILTER_PCMN << 12);
  1061. writel_relaxed(init_phasex, pipe->base +
  1062. MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
  1063. writel_relaxed(phasex_step >> chroma_shift_x,
  1064. pipe->base +
  1065. MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
  1066. } else {
  1067. if (src_w <= pipe->dst.w)
  1068. scale_config |= /* RGB, A */
  1069. (MDSS_MDP_SCALE_FILTER_BIL << 8) |
  1070. (MDSS_MDP_SCALE_FILTER_BIL << 16);
  1071. else
  1072. scale_config |= /* RGB, A */
  1073. (MDSS_MDP_SCALE_FILTER_PCMN << 8) |
  1074. (MDSS_MDP_SCALE_FILTER_PCMN << 16);
  1075. }
  1076. }
  1077. if (pipe->scale.enable_pxl_ext) {
  1078. if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
  1079. /*program x,y initial phase and phase step*/
  1080. writel_relaxed(pipe->scale.init_phase_x[0],
  1081. pipe->base +
  1082. MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
  1083. writel_relaxed(pipe->scale.phase_step_x[0],
  1084. pipe->base +
  1085. MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
  1086. writel_relaxed(pipe->scale.init_phase_x[1],
  1087. pipe->base +
  1088. MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
  1089. writel_relaxed(pipe->scale.phase_step_x[1],
  1090. pipe->base +
  1091. MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
  1092. writel_relaxed(pipe->scale.init_phase_y[0],
  1093. pipe->base +
  1094. MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
  1095. writel_relaxed(pipe->scale.phase_step_y[0],
  1096. pipe->base +
  1097. MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
  1098. writel_relaxed(pipe->scale.init_phase_y[1],
  1099. pipe->base +
  1100. MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
  1101. writel_relaxed(pipe->scale.phase_step_y[1],
  1102. pipe->base +
  1103. MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
  1104. } else {
  1105. writel_relaxed(pipe->scale.phase_step_x[0],
  1106. pipe->base +
  1107. MDSS_MDP_REG_SCALE_PHASE_STEP_X);
  1108. writel_relaxed(pipe->scale.phase_step_y[0],
  1109. pipe->base +
  1110. MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
  1111. writel_relaxed(pipe->scale.init_phase_x[0],
  1112. pipe->base +
  1113. MDSS_MDP_REG_SCALE_INIT_PHASE_X);
  1114. writel_relaxed(pipe->scale.init_phase_y[0],
  1115. pipe->base +
  1116. MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
  1117. }
  1118. /*program pixel extn values for the SSPP*/
  1119. mdss_mdp_pipe_program_pixel_extn(pipe);
  1120. } else {
  1121. if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
  1122. /*program x,y initial phase and phase step*/
  1123. writel_relaxed(0,
  1124. pipe->base +
  1125. MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
  1126. writel_relaxed(init_phasex,
  1127. pipe->base +
  1128. MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
  1129. writel_relaxed(phasex_step,
  1130. pipe->base +
  1131. MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
  1132. writel_relaxed(phasex_step >> chroma_shift_x,
  1133. pipe->base +
  1134. MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
  1135. writel_relaxed(0,
  1136. pipe->base +
  1137. MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
  1138. writel_relaxed(init_phasey,
  1139. pipe->base +
  1140. MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
  1141. writel_relaxed(phasey_step,
  1142. pipe->base +
  1143. MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
  1144. writel_relaxed(phasey_step >> chroma_shift_y,
  1145. pipe->base +
  1146. MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
  1147. } else {
  1148. writel_relaxed(phasex_step,
  1149. pipe->base +
  1150. MDSS_MDP_REG_SCALE_PHASE_STEP_X);
  1151. writel_relaxed(phasey_step,
  1152. pipe->base +
  1153. MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
  1154. writel_relaxed(0,
  1155. pipe->base +
  1156. MDSS_MDP_REG_SCALE_INIT_PHASE_X);
  1157. writel_relaxed(0,
  1158. pipe->base +
  1159. MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
  1160. }
  1161. }
  1162. writel_relaxed(scale_config, pipe->base +
  1163. MDSS_MDP_REG_SCALE_CONFIG);
  1164. return 0;
  1165. }
  1166. int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
  1167. {
  1168. int ret = 0;
  1169. if (!pipe)
  1170. return -ENODEV;
  1171. ret = mdss_mdp_scale_setup(pipe);
  1172. if (ret)
  1173. return -EINVAL;
  1174. if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
  1175. ret = pp_vig_pipe_setup(pipe, op);
  1176. return ret;
  1177. }
  1178. void mdss_mdp_pipe_sspp_term(struct mdss_mdp_pipe *pipe)
  1179. {
  1180. u32 done_bit;
  1181. struct pp_hist_col_info *hist_info;
  1182. char __iomem *ctl_base;
  1183. if (pipe) {
  1184. if (pipe->pp_res.hist.col_en) {
  1185. done_bit = 3 << (pipe->num * 4);
  1186. hist_info = &pipe->pp_res.hist;
  1187. ctl_base = pipe->base +
  1188. MDSS_MDP_REG_VIG_HIST_CTL_BASE;
  1189. pp_histogram_disable(hist_info, done_bit, ctl_base);
  1190. }
  1191. memset(&pipe->pp_cfg, 0, sizeof(struct mdp_overlay_pp_params));
  1192. memset(&pipe->pp_res, 0, sizeof(struct mdss_pipe_pp_res));
  1193. }
  1194. }
  1195. int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
  1196. {
  1197. int ret = 0;
  1198. unsigned long flags = 0;
  1199. char __iomem *pipe_base;
  1200. u32 pipe_num;
  1201. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1202. u32 current_opmode;
  1203. u32 dcm_state = DCM_UNINIT;
  1204. if (pipe == NULL)
  1205. return -EINVAL;
  1206. if (pipe->mixer && pipe->mixer->ctl && pipe->mixer->ctl->mfd)
  1207. dcm_state = pipe->mixer->ctl->mfd->dcm_state;
  1208. /* Read IGC state and update the same if tuning mode is enable */
  1209. if (dcm_state == DTM_ENTER) {
  1210. current_opmode = readl_relaxed(pipe->base +
  1211. MDSS_MDP_REG_SSPP_SRC_OP_MODE);
  1212. *op |= (current_opmode & BIT(16));
  1213. return ret;
  1214. }
  1215. /*
  1216. * TODO: should this function be responsible for masking multiple
  1217. * pipes to be written in dual pipe case?
  1218. * if so, requires rework of update_igc_lut
  1219. */
  1220. switch (pipe->type) {
  1221. case MDSS_MDP_PIPE_TYPE_VIG:
  1222. pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE;
  1223. pipe_num = pipe->num - MDSS_MDP_SSPP_VIG0;
  1224. break;
  1225. case MDSS_MDP_PIPE_TYPE_RGB:
  1226. pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_RGB_BASE;
  1227. pipe_num = pipe->num - MDSS_MDP_SSPP_RGB0;
  1228. break;
  1229. case MDSS_MDP_PIPE_TYPE_DMA:
  1230. pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_DMA_BASE;
  1231. pipe_num = pipe->num - MDSS_MDP_SSPP_DMA0;
  1232. break;
  1233. default:
  1234. return -EINVAL;
  1235. }
  1236. if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) {
  1237. flags |= PP_FLAGS_DIRTY_IGC;
  1238. pp_igc_config(flags, pipe_base, &pipe->pp_res.pp_sts,
  1239. &pipe->pp_cfg.igc_cfg, pipe_num);
  1240. }
  1241. if (pipe->pp_res.pp_sts.igc_sts & PP_STS_ENABLE)
  1242. *op |= (1 << 16); /* IGC_LUT_EN */
  1243. return ret;
  1244. }
  1245. static int pp_mixer_setup(u32 disp_num,
  1246. struct mdss_mdp_mixer *mixer)
  1247. {
  1248. u32 flags, mixer_num, opmode = 0, lm_bitmask = 0;
  1249. struct mdp_pgc_lut_data *pgc_config;
  1250. struct pp_sts_type *pp_sts;
  1251. struct mdss_mdp_ctl *ctl;
  1252. char __iomem *addr;
  1253. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1254. if (!mixer || !mixer->ctl || !mdata)
  1255. return -EINVAL;
  1256. mixer_num = mixer->num;
  1257. ctl = mixer->ctl;
  1258. lm_bitmask = (BIT(6) << mixer_num);
  1259. /* Assign appropriate flags after mixer index validation */
  1260. if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  1261. if (mixer_num >= mdata->nmixers_intf) {
  1262. pr_err("bad intf mixer index = %d total = %d\n",
  1263. mixer_num, mdata->nmixers_intf);
  1264. return 0;
  1265. }
  1266. if (mixer_num == MDSS_MDP_DSPP3)
  1267. lm_bitmask = BIT(20);
  1268. } else if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
  1269. if (mixer_num >= mdata->nmixers_wb +
  1270. mdata->nmixers_intf) {
  1271. pr_err("bad wb mixer index = %d total = %d\n",
  1272. mixer_num,
  1273. mdata->nmixers_intf + mdata->nmixers_wb);
  1274. return 0;
  1275. }
  1276. } else {
  1277. return 0;
  1278. }
  1279. flags = mdss_pp_res->pp_disp_flags[disp_num];
  1280. pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
  1281. /* GC_LUT is in layer mixer */
  1282. if (flags & PP_FLAGS_DIRTY_ARGC) {
  1283. pgc_config = &mdss_pp_res->argc_disp_cfg[disp_num];
  1284. addr = mixer->base + MDSS_MDP_REG_LM_GC_LUT_BASE;
  1285. /*
  1286. * ARGC will always be enabled. When user setting is
  1287. * disabled we program the linear ARGC data to enable
  1288. * rounding in HW.
  1289. */
  1290. pp_sts->argc_sts |= PP_STS_ENABLE;
  1291. if (pgc_config->flags & MDP_PP_OPS_WRITE)
  1292. pp_update_argc_lut(addr, pgc_config);
  1293. if (pgc_config->flags & MDP_PP_OPS_DISABLE) {
  1294. pgc_config->r_data = &lin_gc_data[0];
  1295. pgc_config->g_data = &lin_gc_data[0];
  1296. pgc_config->b_data = &lin_gc_data[0];
  1297. pgc_config->num_r_stages = GC_LUT_SEGMENTS;
  1298. pgc_config->num_g_stages = GC_LUT_SEGMENTS;
  1299. pgc_config->num_b_stages = GC_LUT_SEGMENTS;
  1300. pp_update_argc_lut(addr, pgc_config);
  1301. }
  1302. ctl->flush_bits |= lm_bitmask;
  1303. }
  1304. /* update LM opmode if LM needs flush */
  1305. if (flags & PP_FLAGS_DIRTY_ARGC) {
  1306. addr = mixer->base + MDSS_MDP_REG_LM_OP_MODE;
  1307. opmode = readl_relaxed(addr);
  1308. opmode |= (1 << 0); /* GC_LUT_EN */
  1309. writel_relaxed(opmode, addr);
  1310. }
  1311. return 0;
  1312. }
  1313. static char __iomem *mdss_mdp_get_mixer_addr_off(u32 dspp_num)
  1314. {
  1315. struct mdss_data_type *mdata;
  1316. struct mdss_mdp_mixer *mixer;
  1317. mdata = mdss_mdp_get_mdata();
  1318. if (mdata->nmixers_intf <= dspp_num) {
  1319. pr_err("Invalid dspp_num=%d", dspp_num);
  1320. return ERR_PTR(-EINVAL);
  1321. }
  1322. mixer = mdata->mixer_intf + dspp_num;
  1323. return mixer->base;
  1324. }
  1325. static char __iomem *mdss_mdp_get_dspp_addr_off(u32 dspp_num)
  1326. {
  1327. struct mdss_data_type *mdata;
  1328. struct mdss_mdp_mixer *mixer;
  1329. mdata = mdss_mdp_get_mdata();
  1330. if (mdata->nmixers_intf <= dspp_num) {
  1331. pr_err("Invalid dspp_num=%d", dspp_num);
  1332. return ERR_PTR(-EINVAL);
  1333. }
  1334. mixer = mdata->mixer_intf + dspp_num;
  1335. return mixer->dspp_base;
  1336. }
  1337. /* Assumes that function will be called from within clock enabled space*/
  1338. static int pp_histogram_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
  1339. {
  1340. int ret = -EINVAL;
  1341. char __iomem *base;
  1342. u32 op_flags, kick_base, col_state;
  1343. struct mdss_data_type *mdata;
  1344. struct mdss_mdp_pipe *pipe;
  1345. struct pp_hist_col_info *hist_info;
  1346. unsigned long flag;
  1347. if (mix && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG)) {
  1348. /* HIST_EN & AUTO_CLEAR */
  1349. op_flags = BIT(16) | BIT(17);
  1350. hist_info = &mdss_pp_res->dspp_hist[mix->num];
  1351. base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
  1352. kick_base = MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
  1353. } else if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
  1354. mdata = mdss_mdp_get_mdata();
  1355. pipe = mdss_mdp_pipe_get(mdata, BIT(PP_BLOCK(block)));
  1356. if (IS_ERR_OR_NULL(pipe)) {
  1357. pr_debug("pipe DNE (%d)", (u32) BIT(PP_BLOCK(block)));
  1358. ret = -ENODEV;
  1359. goto error;
  1360. }
  1361. /* HIST_EN & AUTO_CLEAR */
  1362. op_flags = BIT(8) + BIT(9);
  1363. hist_info = &pipe->pp_res.hist;
  1364. base = pipe->base;
  1365. kick_base = MDSS_MDP_REG_VIG_HIST_CTL_BASE;
  1366. mdss_mdp_pipe_unmap(pipe);
  1367. } else {
  1368. pr_warn("invalid histogram location (%d)", block);
  1369. goto error;
  1370. }
  1371. mutex_lock(&hist_info->hist_mutex);
  1372. spin_lock_irqsave(&hist_info->hist_lock, flag);
  1373. if (hist_info->col_en) {
  1374. *op |= op_flags;
  1375. col_state = hist_info->col_state;
  1376. if (col_state == HIST_IDLE) {
  1377. /* Kick off collection */
  1378. writel_relaxed(1, base + kick_base);
  1379. hist_info->col_state = HIST_START;
  1380. complete(&hist_info->first_kick);
  1381. }
  1382. }
  1383. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  1384. mutex_unlock(&hist_info->hist_mutex);
  1385. ret = 0;
  1386. error:
  1387. return ret;
  1388. }
  1389. static void pp_dither_config(char __iomem *addr,
  1390. struct pp_sts_type *pp_sts,
  1391. struct mdp_dither_cfg_data *dither_cfg)
  1392. {
  1393. u32 data;
  1394. int i;
  1395. if (dither_cfg->flags & MDP_PP_OPS_WRITE) {
  1396. data = dither_depth_map[dither_cfg->g_y_depth];
  1397. data |= dither_depth_map[dither_cfg->b_cb_depth] << 2;
  1398. data |= dither_depth_map[dither_cfg->r_cr_depth] << 4;
  1399. writel_relaxed(data, addr);
  1400. addr += 0x14;
  1401. for (i = 0; i < 16; i += 4) {
  1402. data = dither_matrix[i] |
  1403. (dither_matrix[i + 1] << 4) |
  1404. (dither_matrix[i + 2] << 8) |
  1405. (dither_matrix[i + 3] << 12);
  1406. writel_relaxed(data, addr);
  1407. addr += 4;
  1408. }
  1409. }
  1410. if (dither_cfg->flags & MDP_PP_OPS_DISABLE)
  1411. pp_sts->dither_sts &= ~PP_STS_ENABLE;
  1412. else if (dither_cfg->flags & MDP_PP_OPS_ENABLE)
  1413. pp_sts->dither_sts |= PP_STS_ENABLE;
  1414. pp_sts_set_split_bits(&pp_sts->dither_sts, dither_cfg->flags);
  1415. }
  1416. static void pp_dspp_opmode_config(struct mdss_mdp_ctl *ctl, u32 num,
  1417. struct pp_sts_type *pp_sts, int mdp_rev,
  1418. u32 *opmode)
  1419. {
  1420. int side;
  1421. side = pp_num_to_side(ctl, num);
  1422. if (side < 0)
  1423. return;
  1424. if (pp_sts_is_enabled(pp_sts->pa_sts, side))
  1425. *opmode |= MDSS_MDP_DSPP_OP_PA_EN; /* PA_EN */
  1426. if (mdp_rev >= MDSS_MDP_HW_REV_103) {
  1427. if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
  1428. *opmode |= MDSS_MDP_DSPP_OP_PA_HUE_MASK;
  1429. if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
  1430. *opmode |= MDSS_MDP_DSPP_OP_PA_SAT_MASK;
  1431. if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
  1432. *opmode |= MDSS_MDP_DSPP_OP_PA_VAL_MASK;
  1433. if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
  1434. *opmode |= MDSS_MDP_DSPP_OP_PA_CONT_MASK;
  1435. if (pp_sts->pa_sts & PP_STS_PA_MEM_PROTECT_EN)
  1436. *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_PROTECT_EN;
  1437. if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
  1438. *opmode |= MDSS_MDP_DSPP_OP_PA_SAT_ZERO_EXP_EN;
  1439. if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
  1440. *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_SKIN_MASK;
  1441. if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
  1442. *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_FOL_MASK;
  1443. if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
  1444. *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_SKY_MASK;
  1445. if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
  1446. *opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_HUE_MASK;
  1447. if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
  1448. *opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_SAT_MASK;
  1449. if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
  1450. *opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_VAL_MASK;
  1451. }
  1452. if (pp_sts_is_enabled(pp_sts->pcc_sts, side))
  1453. *opmode |= MDSS_MDP_DSPP_OP_PCC_EN; /* PCC_EN */
  1454. if (pp_sts_is_enabled(pp_sts->igc_sts, side)) {
  1455. *opmode |= MDSS_MDP_DSPP_OP_IGC_LUT_EN | /* IGC_LUT_EN */
  1456. (pp_sts->igc_tbl_idx << 1);
  1457. }
  1458. if (pp_sts->enhist_sts & PP_STS_ENABLE) {
  1459. *opmode |= MDSS_MDP_DSPP_OP_HIST_LUTV_EN | /* HIST_LUT_EN */
  1460. MDSS_MDP_DSPP_OP_PA_EN; /* PA_EN */
  1461. }
  1462. if (pp_sts_is_enabled(pp_sts->dither_sts, side))
  1463. *opmode |= MDSS_MDP_DSPP_OP_DST_DITHER_EN; /* DITHER_EN */
  1464. if (pp_sts_is_enabled(pp_sts->gamut_sts, side)) {
  1465. *opmode |= MDSS_MDP_DSPP_OP_GAMUT_EN; /* GAMUT_EN */
  1466. if (pp_sts->gamut_sts & PP_STS_GAMUT_FIRST)
  1467. *opmode |= MDSS_MDP_DSPP_OP_GAMUT_PCC_ORDER;
  1468. }
  1469. if (pp_sts_is_enabled(pp_sts->pgc_sts, side))
  1470. *opmode |= MDSS_MDP_DSPP_OP_ARGC_LUT_EN;
  1471. }
  1472. static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
  1473. {
  1474. u32 ad_flags, flags, dspp_num, opmode = 0, ad_bypass;
  1475. struct mdp_pgc_lut_data *pgc_config;
  1476. struct pp_sts_type *pp_sts;
  1477. char __iomem *base, *addr;
  1478. int ret = 0;
  1479. struct mdss_data_type *mdata;
  1480. struct mdss_ad_info *ad = NULL;
  1481. struct mdss_mdp_ad *ad_hw = NULL;
  1482. struct mdss_mdp_ctl *ctl;
  1483. u32 mixer_cnt;
  1484. u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  1485. if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
  1486. return -EINVAL;
  1487. ctl = mixer->ctl;
  1488. mdata = ctl->mdata;
  1489. dspp_num = mixer->num;
  1490. /* no corresponding dspp */
  1491. if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
  1492. (dspp_num >= MDSS_MDP_MAX_DSPP))
  1493. return -EINVAL;
  1494. base = mdss_mdp_get_dspp_addr_off(dspp_num);
  1495. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  1496. ret = pp_histogram_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer);
  1497. if (ret)
  1498. goto dspp_exit;
  1499. if (disp_num < MDSS_BLOCK_DISP_NUM)
  1500. flags = mdss_pp_res->pp_disp_flags[disp_num];
  1501. else
  1502. flags = 0;
  1503. mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
  1504. if (dspp_num < mdata->nad_cfgs && disp_num < mdata->nad_cfgs &&
  1505. (mixer_cnt <= mdata->nmax_concurrent_ad_hw)) {
  1506. ad = &mdata->ad_cfgs[disp_num];
  1507. ad_flags = ad->reg_sts;
  1508. ad_hw = &mdata->ad_off[dspp_num];
  1509. } else {
  1510. ad_flags = 0;
  1511. }
  1512. /* call calibration specific processing here */
  1513. if (ctl->mfd->calib_mode)
  1514. goto flush_exit;
  1515. /* nothing to update */
  1516. if ((!flags) && (!(opmode)) && (!ad_flags))
  1517. goto dspp_exit;
  1518. pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
  1519. if (!flags) {
  1520. pr_debug("skip configuring dspp features\n");
  1521. goto opmode_config;
  1522. }
  1523. if (disp_num < MDSS_BLOCK_DISP_NUM) {
  1524. if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
  1525. pp_pa_v2_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
  1526. &mdss_pp_res->pa_v2_disp_cfg[disp_num],
  1527. PP_DSPP);
  1528. } else
  1529. pp_pa_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
  1530. &mdss_pp_res->pa_disp_cfg[disp_num]);
  1531. pp_pcc_config(flags, base + MDSS_MDP_REG_DSPP_PCC_BASE, pp_sts,
  1532. &mdss_pp_res->pcc_disp_cfg[disp_num]);
  1533. pp_igc_config(flags, mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE,
  1534. pp_sts, &mdss_pp_res->igc_disp_cfg[disp_num],
  1535. dspp_num);
  1536. pp_enhist_config(flags, base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
  1537. pp_sts, &mdss_pp_res->enhist_disp_cfg[disp_num]);
  1538. }
  1539. if (pp_sts->enhist_sts & PP_STS_ENABLE &&
  1540. !(pp_sts->pa_sts & PP_STS_ENABLE)) {
  1541. /* Program default value */
  1542. addr = base + MDSS_MDP_REG_DSPP_PA_BASE;
  1543. writel_relaxed(0, addr);
  1544. writel_relaxed(0, addr + 4);
  1545. writel_relaxed(0, addr + 8);
  1546. writel_relaxed(0, addr + 12);
  1547. }
  1548. if (disp_num < MDSS_BLOCK_DISP_NUM) {
  1549. if (flags & PP_FLAGS_DIRTY_DITHER) {
  1550. addr = base + MDSS_MDP_REG_DSPP_DITHER_DEPTH;
  1551. pp_dither_config(addr, pp_sts,
  1552. &mdss_pp_res->dither_disp_cfg[disp_num]);
  1553. }
  1554. if (flags & PP_FLAGS_DIRTY_GAMUT)
  1555. pp_gamut_config(&mdss_pp_res->gamut_disp_cfg[disp_num], base,
  1556. pp_sts);
  1557. if (flags & PP_FLAGS_DIRTY_PGC) {
  1558. pgc_config = &mdss_pp_res->pgc_disp_cfg[disp_num];
  1559. if (pgc_config->flags & MDP_PP_OPS_WRITE) {
  1560. addr = base + MDSS_MDP_REG_DSPP_GC_BASE;
  1561. pp_update_argc_lut(addr, pgc_config);
  1562. }
  1563. if (pgc_config->flags & MDP_PP_OPS_DISABLE)
  1564. pp_sts->pgc_sts &= ~PP_STS_ENABLE;
  1565. else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
  1566. pp_sts->pgc_sts |= PP_STS_ENABLE;
  1567. pp_sts_set_split_bits(&pp_sts->pgc_sts, pgc_config->flags);
  1568. }
  1569. }
  1570. opmode_config:
  1571. pp_dspp_opmode_config(ctl, dspp_num, pp_sts, mdata->mdp_rev, &opmode);
  1572. flush_exit:
  1573. if (ad_hw) {
  1574. mutex_lock(&ad->lock);
  1575. ad_flags = ad->reg_sts;
  1576. if (ad_flags & PP_AD_STS_DIRTY_DATA)
  1577. pp_ad_input_write(ad_hw, ad);
  1578. if (ad_flags & PP_AD_STS_DIRTY_INIT)
  1579. pp_ad_init_write(ad_hw, ad, ctl);
  1580. if (ad_flags & PP_AD_STS_DIRTY_CFG)
  1581. pp_ad_cfg_write(ad_hw, ad);
  1582. pp_ad_bypass_config(ad, ctl, ad_hw->num, &ad_bypass);
  1583. writel_relaxed(ad_bypass, ad_hw->base);
  1584. mutex_unlock(&ad->lock);
  1585. }
  1586. writel_relaxed(opmode, base + MDSS_MDP_REG_DSPP_OP_MODE);
  1587. ctl->flush_bits |= BIT(13 + dspp_num);
  1588. wmb();
  1589. dspp_exit:
  1590. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  1591. return ret;
  1592. }
  1593. int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl)
  1594. {
  1595. int ret = 0;
  1596. if ((!ctl->mfd) || (!mdss_pp_res))
  1597. return -EINVAL;
  1598. /* TODO: have some sort of reader/writer lock to prevent unclocked
  1599. * access while display power is toggled */
  1600. mutex_lock(&ctl->lock);
  1601. if (!ctl->power_on) {
  1602. ret = -EPERM;
  1603. goto error;
  1604. }
  1605. ret = mdss_mdp_pp_setup_locked(ctl);
  1606. error:
  1607. mutex_unlock(&ctl->lock);
  1608. return ret;
  1609. }
  1610. int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
  1611. {
  1612. struct mdss_data_type *mdata = ctl->mdata;
  1613. int ret = 0;
  1614. u32 mixer_cnt;
  1615. u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  1616. u32 disp_num;
  1617. int i;
  1618. bool valid_mixers = true;
  1619. bool valid_ad_panel = true;
  1620. if ((!ctl->mfd) || (!mdss_pp_res))
  1621. return -EINVAL;
  1622. /* treat fb_num the same as block logical id*/
  1623. disp_num = ctl->mfd->index;
  1624. if (disp_num >= MDSS_MAX_MIXER_DISP_NUM) {
  1625. pr_warn("Invalid display number found, %u", disp_num);
  1626. return -EINVAL;
  1627. }
  1628. mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
  1629. if (!mixer_cnt) {
  1630. valid_mixers = false;
  1631. /* exit if mixer is not writeback */
  1632. if (!ctl->mixer_left ||
  1633. (ctl->mixer_left->type == MDSS_MDP_MIXER_TYPE_INTF)) {
  1634. ret = -EINVAL;
  1635. pr_warn("No mixers for post processing err = %d\n",
  1636. ret);
  1637. goto exit;
  1638. }
  1639. }
  1640. if (mdata->nad_cfgs == 0)
  1641. valid_mixers = false;
  1642. for (i = 0; i < mixer_cnt && valid_mixers; i++) {
  1643. if (mixer_id[i] >= mdata->nad_cfgs)
  1644. valid_mixers = false;
  1645. }
  1646. valid_ad_panel = (ctl->mfd->panel_info->type != DTV_PANEL) &&
  1647. (((mdata->mdp_rev < MDSS_MDP_HW_REV_103) &&
  1648. (ctl->mfd->panel_info->type == WRITEBACK_PANEL)) ||
  1649. (ctl->mfd->panel_info->type != WRITEBACK_PANEL));
  1650. if (valid_mixers && (mixer_cnt <= mdata->nmax_concurrent_ad_hw) &&
  1651. valid_ad_panel) {
  1652. ret = mdss_mdp_ad_setup(ctl->mfd);
  1653. if (ret < 0)
  1654. pr_warn("ad_setup(disp%d) returns %d", disp_num, ret);
  1655. }
  1656. mutex_lock(&mdss_pp_mutex);
  1657. if (ctl->mixer_left) {
  1658. pp_mixer_setup(disp_num, ctl->mixer_left);
  1659. pp_dspp_setup(disp_num, ctl->mixer_left);
  1660. }
  1661. if (ctl->mixer_right) {
  1662. pp_mixer_setup(disp_num, ctl->mixer_right);
  1663. pp_dspp_setup(disp_num, ctl->mixer_right);
  1664. }
  1665. /* clear dirty flag */
  1666. if (disp_num < MDSS_MAX_MIXER_DISP_NUM) {
  1667. mdss_pp_res->pp_disp_flags[disp_num] = 0;
  1668. if (disp_num < mdata->nad_cfgs)
  1669. mdata->ad_cfgs[disp_num].reg_sts = 0;
  1670. }
  1671. mutex_unlock(&mdss_pp_mutex);
  1672. exit:
  1673. return ret;
  1674. }
  1675. /*
  1676. * Set dirty and write bits on features that were enabled so they will be
  1677. * reconfigured
  1678. */
  1679. int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 dspp_num)
  1680. {
  1681. u32 flags = 0, disp_num, bl, ret = 0;
  1682. struct pp_sts_type pp_sts;
  1683. struct mdss_ad_info *ad;
  1684. struct mdss_data_type *mdata = ctl->mdata;
  1685. struct msm_fb_data_type *bl_mfd;
  1686. if (dspp_num >= MDSS_MDP_MAX_DSPP) {
  1687. pr_warn("invalid dspp_num");
  1688. return -EINVAL;
  1689. }
  1690. disp_num = ctl->mfd->index;
  1691. pp_sts = mdss_pp_res->pp_disp_sts[disp_num];
  1692. if (pp_sts.pa_sts & PP_STS_ENABLE) {
  1693. flags |= PP_FLAGS_DIRTY_PA;
  1694. if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
  1695. if (!(mdss_pp_res->pa_v2_disp_cfg[disp_num].flags
  1696. & MDP_PP_OPS_DISABLE))
  1697. mdss_pp_res->pa_v2_disp_cfg[disp_num].flags |=
  1698. MDP_PP_OPS_WRITE;
  1699. } else {
  1700. if (!(mdss_pp_res->pa_disp_cfg[disp_num].flags
  1701. & MDP_PP_OPS_DISABLE))
  1702. mdss_pp_res->pa_disp_cfg[disp_num].flags |=
  1703. MDP_PP_OPS_WRITE;
  1704. }
  1705. }
  1706. if (pp_sts.pcc_sts & PP_STS_ENABLE) {
  1707. flags |= PP_FLAGS_DIRTY_PCC;
  1708. if (!(mdss_pp_res->pcc_disp_cfg[disp_num].ops
  1709. & MDP_PP_OPS_DISABLE))
  1710. mdss_pp_res->pcc_disp_cfg[disp_num].ops |=
  1711. MDP_PP_OPS_WRITE;
  1712. }
  1713. if (pp_sts.igc_sts & PP_STS_ENABLE) {
  1714. flags |= PP_FLAGS_DIRTY_IGC;
  1715. if (!(mdss_pp_res->igc_disp_cfg[disp_num].ops
  1716. & MDP_PP_OPS_DISABLE))
  1717. mdss_pp_res->igc_disp_cfg[disp_num].ops |=
  1718. MDP_PP_OPS_WRITE;
  1719. }
  1720. if (pp_sts.argc_sts & PP_STS_ENABLE) {
  1721. flags |= PP_FLAGS_DIRTY_ARGC;
  1722. if (!(mdss_pp_res->argc_disp_cfg[disp_num].flags
  1723. & MDP_PP_OPS_DISABLE))
  1724. mdss_pp_res->argc_disp_cfg[disp_num].flags |=
  1725. MDP_PP_OPS_WRITE;
  1726. }
  1727. if (pp_sts.enhist_sts & PP_STS_ENABLE) {
  1728. flags |= PP_FLAGS_DIRTY_ENHIST;
  1729. if (!(mdss_pp_res->enhist_disp_cfg[disp_num].ops
  1730. & MDP_PP_OPS_DISABLE))
  1731. mdss_pp_res->enhist_disp_cfg[disp_num].ops |=
  1732. MDP_PP_OPS_WRITE;
  1733. }
  1734. if (pp_sts.dither_sts & PP_STS_ENABLE) {
  1735. flags |= PP_FLAGS_DIRTY_DITHER;
  1736. if (!(mdss_pp_res->dither_disp_cfg[disp_num].flags
  1737. & MDP_PP_OPS_DISABLE))
  1738. mdss_pp_res->dither_disp_cfg[disp_num].flags |=
  1739. MDP_PP_OPS_WRITE;
  1740. }
  1741. if (pp_sts.gamut_sts & PP_STS_ENABLE) {
  1742. flags |= PP_FLAGS_DIRTY_GAMUT;
  1743. if (!(mdss_pp_res->gamut_disp_cfg[disp_num].flags
  1744. & MDP_PP_OPS_DISABLE))
  1745. mdss_pp_res->gamut_disp_cfg[disp_num].flags |=
  1746. MDP_PP_OPS_WRITE;
  1747. }
  1748. if (pp_sts.pgc_sts & PP_STS_ENABLE) {
  1749. flags |= PP_FLAGS_DIRTY_PGC;
  1750. if (!(mdss_pp_res->pgc_disp_cfg[disp_num].flags
  1751. & MDP_PP_OPS_DISABLE))
  1752. mdss_pp_res->pgc_disp_cfg[disp_num].flags |=
  1753. MDP_PP_OPS_WRITE;
  1754. }
  1755. mdss_pp_res->pp_disp_flags[disp_num] |= flags;
  1756. if (dspp_num < mdata->nad_cfgs) {
  1757. ret = mdss_mdp_get_ad(ctl->mfd, &ad);
  1758. if (ret) {
  1759. pr_debug("Failed to get AD info, err = %d\n", ret);
  1760. return ret;
  1761. }
  1762. if (ctl->mfd->panel_info->type == WRITEBACK_PANEL) {
  1763. bl_mfd = mdss_get_mfd_from_index(0);
  1764. if (!bl_mfd) {
  1765. ret = -EINVAL;
  1766. pr_warn("Failed to get primary FB bl handle, err = %d\n",
  1767. ret);
  1768. return ret;
  1769. }
  1770. } else {
  1771. bl_mfd = ctl->mfd;
  1772. }
  1773. mutex_lock(&ad->lock);
  1774. bl = bl_mfd->ad_bl_level;
  1775. if (PP_AD_STATE_CFG & ad->state)
  1776. pp_ad_cfg_write(&mdata->ad_off[dspp_num], ad);
  1777. if (PP_AD_STATE_INIT & ad->state)
  1778. pp_ad_init_write(&mdata->ad_off[dspp_num], ad, ctl);
  1779. if ((PP_AD_STATE_DATA & ad->state) &&
  1780. (ad->sts & PP_STS_ENABLE)) {
  1781. ad->last_bl = bl;
  1782. linear_map(bl, &ad->bl_data,
  1783. bl_mfd->panel_info->bl_max,
  1784. MDSS_MDP_AD_BL_SCALE);
  1785. pp_ad_input_write(&mdata->ad_off[dspp_num], ad);
  1786. }
  1787. if ((PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr)
  1788. ctl->add_vsync_handler(ctl, &ad->handle);
  1789. mutex_unlock(&ad->lock);
  1790. }
  1791. return 0;
  1792. }
  1793. int mdss_mdp_pp_init(struct device *dev)
  1794. {
  1795. int i, ret = 0;
  1796. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1797. struct mdss_mdp_pipe *vig;
  1798. struct msm_bus_scale_pdata *pp_bus_pdata;
  1799. struct mdp_pgc_lut_data *gc_cfg;
  1800. if (!mdata)
  1801. return -EPERM;
  1802. mutex_lock(&mdss_pp_mutex);
  1803. if (!mdss_pp_res) {
  1804. mdss_pp_res = devm_kzalloc(dev, sizeof(*mdss_pp_res),
  1805. GFP_KERNEL);
  1806. if (mdss_pp_res == NULL) {
  1807. pr_err("%s mdss_pp_res allocation failed!", __func__);
  1808. ret = -ENOMEM;
  1809. } else {
  1810. for (i = 0; i < MDSS_MDP_MAX_DSPP; i++) {
  1811. mutex_init(
  1812. &mdss_pp_res->dspp_hist[i].hist_mutex);
  1813. spin_lock_init(
  1814. &mdss_pp_res->dspp_hist[i].hist_lock);
  1815. init_completion(
  1816. &mdss_pp_res->dspp_hist[i].comp);
  1817. init_completion(
  1818. &mdss_pp_res->dspp_hist[i].first_kick);
  1819. }
  1820. /*
  1821. * Set LM ARGC flags to disable. This would program
  1822. * default GC which would allow for rounding in HW.
  1823. */
  1824. for (i = 0; i < MDSS_MAX_MIXER_DISP_NUM; i++) {
  1825. gc_cfg = &mdss_pp_res->argc_disp_cfg[i];
  1826. gc_cfg->flags = MDP_PP_OPS_DISABLE;
  1827. mdss_pp_res->pp_disp_flags[i] |=
  1828. PP_FLAGS_DIRTY_ARGC;
  1829. }
  1830. }
  1831. }
  1832. if (mdata && mdata->vig_pipes) {
  1833. vig = mdata->vig_pipes;
  1834. for (i = 0; i < mdata->nvig_pipes; i++) {
  1835. mutex_init(&vig[i].pp_res.hist.hist_mutex);
  1836. spin_lock_init(&vig[i].pp_res.hist.hist_lock);
  1837. init_completion(&vig[i].pp_res.hist.comp);
  1838. init_completion(&vig[i].pp_res.hist.first_kick);
  1839. }
  1840. if (!mdata->pp_bus_hdl) {
  1841. pp_bus_pdata = &mdp_pp_bus_scale_table;
  1842. for (i = 0; i < pp_bus_pdata->num_usecases; i++) {
  1843. mdp_pp_bus_usecases[i].num_paths = 1;
  1844. mdp_pp_bus_usecases[i].vectors =
  1845. &mdp_pp_bus_vectors[i];
  1846. }
  1847. mdata->pp_bus_hdl =
  1848. msm_bus_scale_register_client(pp_bus_pdata);
  1849. if (!mdata->pp_bus_hdl) {
  1850. pr_err("not able to register pp_bus_scale\n");
  1851. ret = -ENOMEM;
  1852. }
  1853. pr_debug("register pp_bus_hdl=%x\n", mdata->pp_bus_hdl);
  1854. }
  1855. }
  1856. mutex_unlock(&mdss_pp_mutex);
  1857. return ret;
  1858. }
  1859. void mdss_mdp_pp_term(struct device *dev)
  1860. {
  1861. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1862. if (mdata->pp_bus_hdl) {
  1863. msm_bus_scale_unregister_client(mdata->pp_bus_hdl);
  1864. mdata->pp_bus_hdl = 0;
  1865. }
  1866. if (!mdss_pp_res) {
  1867. mutex_lock(&mdss_pp_mutex);
  1868. devm_kfree(dev, mdss_pp_res);
  1869. mdss_pp_res = NULL;
  1870. mutex_unlock(&mdss_pp_mutex);
  1871. }
  1872. }
  1873. int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd)
  1874. {
  1875. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1876. if ((!mfd) || (!mdata)) {
  1877. pr_err("Invalid mfd or mdata.\n");
  1878. return -EPERM;
  1879. }
  1880. if (mdata->nad_cfgs) {
  1881. mfd->mdp.ad_calc_bl = pp_ad_calc_bl;
  1882. mfd->mdp.ad_shutdown_cleanup = pp_ad_shutdown_cleanup;
  1883. }
  1884. return 0;
  1885. }
  1886. static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
  1887. bool *bl_out_notify)
  1888. {
  1889. int ret = -1;
  1890. int temp = bl_in;
  1891. u32 ad_bl_out = 0;
  1892. struct mdss_ad_info *ad;
  1893. ret = mdss_mdp_get_ad(mfd, &ad);
  1894. if (ret == -ENODEV) {
  1895. pr_debug("AD not supported on device.\n");
  1896. return ret;
  1897. } else if (ret || !ad) {
  1898. pr_err("Failed to get ad info: ret = %d, ad = 0x%pK.\n",
  1899. ret, ad);
  1900. return ret;
  1901. }
  1902. mutex_lock(&ad->lock);
  1903. if (!mfd->ad_bl_level)
  1904. mfd->ad_bl_level = bl_in;
  1905. if (!(ad->state & PP_AD_STATE_RUN)) {
  1906. pr_debug("AD is not running.\n");
  1907. mutex_unlock(&ad->lock);
  1908. return -EPERM;
  1909. }
  1910. if (!ad->bl_mfd || !ad->bl_mfd->panel_info ||
  1911. !ad->bl_att_lut) {
  1912. pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK, bl_att_lut = 0x%pK\n",
  1913. ad->bl_mfd,
  1914. (!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info,
  1915. ad->bl_att_lut);
  1916. mutex_unlock(&ad->lock);
  1917. return -EINVAL;
  1918. }
  1919. ret = pp_ad_linearize_bl(ad, bl_in, &temp,
  1920. MDP_PP_AD_BL_LINEAR);
  1921. if (ret) {
  1922. pr_err("Failed to linearize BL: %d\n", ret);
  1923. mutex_unlock(&ad->lock);
  1924. return ret;
  1925. }
  1926. ret = pp_ad_attenuate_bl(ad, temp, &temp);
  1927. if (ret) {
  1928. pr_err("Failed to attenuate BL: %d\n", ret);
  1929. mutex_unlock(&ad->lock);
  1930. return ret;
  1931. }
  1932. ad_bl_out = temp;
  1933. ret = pp_ad_linearize_bl(ad, temp, &temp, MDP_PP_AD_BL_LINEAR_INV);
  1934. if (ret) {
  1935. pr_err("Failed to inverse linearize BL: %d\n", ret);
  1936. mutex_unlock(&ad->lock);
  1937. return ret;
  1938. }
  1939. *bl_out = temp;
  1940. if(!mfd->ad_bl_level)
  1941. mfd->ad_bl_level = bl_in;
  1942. if (ad_bl_out != mfd->ad_bl_level) {
  1943. mfd->ad_bl_level = ad_bl_out;
  1944. *bl_out_notify = true;
  1945. }
  1946. pp_ad_invalidate_input(mfd);
  1947. mutex_unlock(&ad->lock);
  1948. return 0;
  1949. }
  1950. static int pp_ad_shutdown_cleanup(struct msm_fb_data_type *mfd)
  1951. {
  1952. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1953. struct mdss_mdp_ctl *ctl;
  1954. struct mdss_ad_info *ad;
  1955. bool needs_queue_cleanup = true;
  1956. int i = 0, ret = 0;
  1957. if ((!mdata) || (!mfd))
  1958. return -EPERM;
  1959. if (!mdata->ad_calc_wq)
  1960. return 0;
  1961. ret = mdss_mdp_get_ad(mfd, &ad);
  1962. if (ret) {
  1963. ret = -EINVAL;
  1964. pr_debug("failed to get ad_info, err = %d\n", ret);
  1965. return ret;
  1966. }
  1967. if (!ad->mfd)
  1968. return 0;
  1969. ad->mfd = NULL;
  1970. ctl = mfd_to_ctl(mfd);
  1971. if (ctl && ctl->remove_vsync_handler)
  1972. ctl->remove_vsync_handler(ctl, &ad->handle);
  1973. cancel_work_sync(&ad->calc_work);
  1974. /* Check if any other AD config is active */
  1975. for (i = 0; i < mdata->nad_cfgs; i++) {
  1976. ad = &mdata->ad_cfgs[i];
  1977. if (ad->mfd) {
  1978. needs_queue_cleanup = false;
  1979. break;
  1980. }
  1981. }
  1982. /* Destroy work queue if all AD configs are inactive */
  1983. if (needs_queue_cleanup) {
  1984. destroy_workqueue(mdata->ad_calc_wq);
  1985. mdata->ad_calc_wq = NULL;
  1986. }
  1987. return 0;
  1988. }
  1989. static int pp_get_dspp_num(u32 disp_num, u32 *dspp_num)
  1990. {
  1991. int i;
  1992. u32 mixer_cnt;
  1993. u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  1994. mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
  1995. if (!mixer_cnt)
  1996. return -EPERM;
  1997. /* only read the first mixer */
  1998. for (i = 0; i < mixer_cnt; i++) {
  1999. if (mixer_id[i] < MDSS_MDP_MAX_DSPP)
  2000. break;
  2001. }
  2002. if (i >= mixer_cnt)
  2003. return -EPERM;
  2004. *dspp_num = mixer_id[i];
  2005. return 0;
  2006. }
  2007. int mdss_mdp_pa_config(struct mdp_pa_cfg_data *config,
  2008. u32 *copyback)
  2009. {
  2010. int ret = 0;
  2011. u32 disp_num, dspp_num = 0;
  2012. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  2013. char __iomem *pa_addr;
  2014. if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
  2015. return -EINVAL;
  2016. if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
  2017. (config->block >= MDP_BLOCK_MAX))
  2018. return -EINVAL;
  2019. mutex_lock(&mdss_pp_mutex);
  2020. disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
  2021. if (config->pa_data.flags & MDP_PP_OPS_READ) {
  2022. ret = pp_get_dspp_num(disp_num, &dspp_num);
  2023. if (ret) {
  2024. pr_err("no dspp connects to disp %d",
  2025. disp_num);
  2026. goto pa_config_exit;
  2027. }
  2028. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2029. pa_addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
  2030. MDSS_MDP_REG_DSPP_PA_BASE;
  2031. config->pa_data.hue_adj = readl_relaxed(pa_addr);
  2032. pa_addr += 4;
  2033. config->pa_data.sat_adj = readl_relaxed(pa_addr);
  2034. pa_addr += 4;
  2035. config->pa_data.val_adj = readl_relaxed(pa_addr);
  2036. pa_addr += 4;
  2037. config->pa_data.cont_adj = readl_relaxed(pa_addr);
  2038. *copyback = 1;
  2039. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2040. } else {
  2041. mdss_pp_res->pa_disp_cfg[disp_num] = config->pa_data;
  2042. mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
  2043. }
  2044. pa_config_exit:
  2045. mutex_unlock(&mdss_pp_mutex);
  2046. return ret;
  2047. }
  2048. int mdss_mdp_pa_v2_config(struct mdp_pa_v2_cfg_data *config,
  2049. u32 *copyback)
  2050. {
  2051. int ret = 0;
  2052. u32 disp_num, dspp_num = 0;
  2053. char __iomem *pa_addr;
  2054. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  2055. if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
  2056. return -EINVAL;
  2057. if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
  2058. (config->block >= MDP_BLOCK_MAX))
  2059. return -EINVAL;
  2060. if ((config->pa_v2_data.flags & MDSS_PP_SPLIT_MASK) ==
  2061. MDSS_PP_SPLIT_MASK) {
  2062. pr_warn("Can't set both split bits\n");
  2063. return -EINVAL;
  2064. }
  2065. mutex_lock(&mdss_pp_mutex);
  2066. disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
  2067. if (config->pa_v2_data.flags & MDP_PP_OPS_READ) {
  2068. ret = pp_get_dspp_num(disp_num, &dspp_num);
  2069. if (ret) {
  2070. pr_err("no dspp connects to disp %d",
  2071. disp_num);
  2072. goto pa_config_exit;
  2073. }
  2074. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2075. pa_addr = mdss_mdp_get_dspp_addr_off(dspp_num);
  2076. if (IS_ERR(pa_addr)) {
  2077. ret = PTR_ERR(pa_addr);
  2078. goto pa_config_exit;
  2079. } else
  2080. pa_addr += MDSS_MDP_REG_DSPP_PA_BASE;
  2081. ret = pp_read_pa_v2_regs(pa_addr,
  2082. &config->pa_v2_data,
  2083. disp_num);
  2084. if (ret)
  2085. goto pa_config_exit;
  2086. *copyback = 1;
  2087. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2088. } else {
  2089. if (config->pa_v2_data.flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
  2090. ret = pp_copy_pa_six_zone_lut(config, disp_num);
  2091. if (ret)
  2092. goto pa_config_exit;
  2093. }
  2094. mdss_pp_res->pa_v2_disp_cfg[disp_num] =
  2095. config->pa_v2_data;
  2096. mdss_pp_res->pa_v2_disp_cfg[disp_num].six_zone_curve_p0 =
  2097. &mdss_pp_res->six_zone_lut_curve_p0[disp_num][0];
  2098. mdss_pp_res->pa_v2_disp_cfg[disp_num].six_zone_curve_p1 =
  2099. &mdss_pp_res->six_zone_lut_curve_p1[disp_num][0];
  2100. mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
  2101. }
  2102. pa_config_exit:
  2103. mutex_unlock(&mdss_pp_mutex);
  2104. return ret;
  2105. }
  2106. static int pp_read_pa_v2_regs(char __iomem *addr,
  2107. struct mdp_pa_v2_data *pa_v2_config,
  2108. u32 disp_num)
  2109. {
  2110. int i;
  2111. u32 data;
  2112. if (pa_v2_config->flags & MDP_PP_PA_HUE_ENABLE)
  2113. pa_v2_config->global_hue_adj = readl_relaxed(addr);
  2114. addr += 4;
  2115. if (pa_v2_config->flags & MDP_PP_PA_SAT_ENABLE)
  2116. pa_v2_config->global_sat_adj = readl_relaxed(addr);
  2117. addr += 4;
  2118. if (pa_v2_config->flags & MDP_PP_PA_VAL_ENABLE)
  2119. pa_v2_config->global_val_adj = readl_relaxed(addr);
  2120. addr += 4;
  2121. if (pa_v2_config->flags & MDP_PP_PA_CONT_ENABLE)
  2122. pa_v2_config->global_cont_adj = readl_relaxed(addr);
  2123. addr += 4;
  2124. /* Six zone LUT and thresh data */
  2125. if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
  2126. if (pa_v2_config->six_zone_len != MDP_SIX_ZONE_LUT_SIZE)
  2127. return -EINVAL;
  2128. data = (3 << 25);
  2129. writel_relaxed(data, addr);
  2130. for (i = 0; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
  2131. addr += 4;
  2132. mdss_pp_res->six_zone_lut_curve_p1[disp_num][i] =
  2133. readl_relaxed(addr);
  2134. addr -= 4;
  2135. mdss_pp_res->six_zone_lut_curve_p0[disp_num][i] =
  2136. readl_relaxed(addr) & 0xFFF;
  2137. }
  2138. if (copy_to_user(pa_v2_config->six_zone_curve_p0,
  2139. &mdss_pp_res->six_zone_lut_curve_p0[disp_num][0],
  2140. pa_v2_config->six_zone_len * sizeof(u32))) {
  2141. return -EFAULT;
  2142. }
  2143. if (copy_to_user(pa_v2_config->six_zone_curve_p1,
  2144. &mdss_pp_res->six_zone_lut_curve_p1[disp_num][0],
  2145. pa_v2_config->six_zone_len * sizeof(u32))) {
  2146. return -EFAULT;
  2147. }
  2148. addr += 8;
  2149. pa_v2_config->six_zone_thresh = readl_relaxed(addr);
  2150. addr += 4;
  2151. } else {
  2152. addr += 12;
  2153. }
  2154. /* Skin memory color config registers */
  2155. if (pa_v2_config->flags & MDP_PP_PA_SKIN_ENABLE)
  2156. pp_read_pa_mem_col_regs(addr, &pa_v2_config->skin_cfg);
  2157. addr += 0x14;
  2158. /* Sky memory color config registers */
  2159. if (pa_v2_config->flags & MDP_PP_PA_SKY_ENABLE)
  2160. pp_read_pa_mem_col_regs(addr, &pa_v2_config->sky_cfg);
  2161. addr += 0x14;
  2162. /* Foliage memory color config registers */
  2163. if (pa_v2_config->flags & MDP_PP_PA_FOL_ENABLE)
  2164. pp_read_pa_mem_col_regs(addr, &pa_v2_config->fol_cfg);
  2165. return 0;
  2166. }
  2167. static void pp_read_pa_mem_col_regs(char __iomem *addr,
  2168. struct mdp_pa_mem_col_cfg *mem_col_cfg)
  2169. {
  2170. mem_col_cfg->color_adjust_p0 = readl_relaxed(addr);
  2171. addr += 4;
  2172. mem_col_cfg->color_adjust_p1 = readl_relaxed(addr);
  2173. addr += 4;
  2174. mem_col_cfg->hue_region = readl_relaxed(addr);
  2175. addr += 4;
  2176. mem_col_cfg->sat_region = readl_relaxed(addr);
  2177. addr += 4;
  2178. mem_col_cfg->val_region = readl_relaxed(addr);
  2179. }
  2180. static int pp_copy_pa_six_zone_lut(struct mdp_pa_v2_cfg_data *pa_v2_config,
  2181. u32 disp_num)
  2182. {
  2183. if (pa_v2_config->pa_v2_data.six_zone_len != MDP_SIX_ZONE_LUT_SIZE)
  2184. return -EINVAL;
  2185. if (copy_from_user(&mdss_pp_res->six_zone_lut_curve_p0[disp_num][0],
  2186. pa_v2_config->pa_v2_data.six_zone_curve_p0,
  2187. pa_v2_config->pa_v2_data.six_zone_len * sizeof(u32))) {
  2188. return -EFAULT;
  2189. }
  2190. if (copy_from_user(&mdss_pp_res->six_zone_lut_curve_p1[disp_num][0],
  2191. pa_v2_config->pa_v2_data.six_zone_curve_p1,
  2192. pa_v2_config->pa_v2_data.six_zone_len * sizeof(u32))) {
  2193. return -EFAULT;
  2194. }
  2195. return 0;
  2196. }
  2197. static void pp_read_pcc_regs(char __iomem *addr,
  2198. struct mdp_pcc_cfg_data *cfg_ptr)
  2199. {
  2200. cfg_ptr->r.c = readl_relaxed(addr);
  2201. cfg_ptr->g.c = readl_relaxed(addr + 4);
  2202. cfg_ptr->b.c = readl_relaxed(addr + 8);
  2203. addr += 0x10;
  2204. cfg_ptr->r.r = readl_relaxed(addr);
  2205. cfg_ptr->g.r = readl_relaxed(addr + 4);
  2206. cfg_ptr->b.r = readl_relaxed(addr + 8);
  2207. addr += 0x10;
  2208. cfg_ptr->r.g = readl_relaxed(addr);
  2209. cfg_ptr->g.g = readl_relaxed(addr + 4);
  2210. cfg_ptr->b.g = readl_relaxed(addr + 8);
  2211. addr += 0x10;
  2212. cfg_ptr->r.b = readl_relaxed(addr);
  2213. cfg_ptr->g.b = readl_relaxed(addr + 4);
  2214. cfg_ptr->b.b = readl_relaxed(addr + 8);
  2215. addr += 0x10;
  2216. cfg_ptr->r.rr = readl_relaxed(addr);
  2217. cfg_ptr->g.rr = readl_relaxed(addr + 4);
  2218. cfg_ptr->b.rr = readl_relaxed(addr + 8);
  2219. addr += 0x10;
  2220. cfg_ptr->r.rg = readl_relaxed(addr);
  2221. cfg_ptr->g.rg = readl_relaxed(addr + 4);
  2222. cfg_ptr->b.rg = readl_relaxed(addr + 8);
  2223. addr += 0x10;
  2224. cfg_ptr->r.rb = readl_relaxed(addr);
  2225. cfg_ptr->g.rb = readl_relaxed(addr + 4);
  2226. cfg_ptr->b.rb = readl_relaxed(addr + 8);
  2227. addr += 0x10;
  2228. cfg_ptr->r.gg = readl_relaxed(addr);
  2229. cfg_ptr->g.gg = readl_relaxed(addr + 4);
  2230. cfg_ptr->b.gg = readl_relaxed(addr + 8);
  2231. addr += 0x10;
  2232. cfg_ptr->r.gb = readl_relaxed(addr);
  2233. cfg_ptr->g.gb = readl_relaxed(addr + 4);
  2234. cfg_ptr->b.gb = readl_relaxed(addr + 8);
  2235. addr += 0x10;
  2236. cfg_ptr->r.bb = readl_relaxed(addr);
  2237. cfg_ptr->g.bb = readl_relaxed(addr + 4);
  2238. cfg_ptr->b.bb = readl_relaxed(addr + 8);
  2239. addr += 0x10;
  2240. cfg_ptr->r.rgb_0 = readl_relaxed(addr);
  2241. cfg_ptr->g.rgb_0 = readl_relaxed(addr + 4);
  2242. cfg_ptr->b.rgb_0 = readl_relaxed(addr + 8);
  2243. addr += 0x10;
  2244. cfg_ptr->r.rgb_1 = readl_relaxed(addr);
  2245. cfg_ptr->g.rgb_1 = readl_relaxed(addr + 4);
  2246. cfg_ptr->b.rgb_1 = readl_relaxed(addr + 8);
  2247. }
  2248. static void pp_update_pcc_regs(char __iomem *addr,
  2249. struct mdp_pcc_cfg_data *cfg_ptr)
  2250. {
  2251. writel_relaxed(cfg_ptr->r.c, addr);
  2252. writel_relaxed(cfg_ptr->g.c, addr + 4);
  2253. writel_relaxed(cfg_ptr->b.c, addr + 8);
  2254. addr += 0x10;
  2255. writel_relaxed(cfg_ptr->r.r, addr);
  2256. writel_relaxed(cfg_ptr->g.r, addr + 4);
  2257. writel_relaxed(cfg_ptr->b.r, addr + 8);
  2258. addr += 0x10;
  2259. writel_relaxed(cfg_ptr->r.g, addr);
  2260. writel_relaxed(cfg_ptr->g.g, addr + 4);
  2261. writel_relaxed(cfg_ptr->b.g, addr + 8);
  2262. addr += 0x10;
  2263. writel_relaxed(cfg_ptr->r.b, addr);
  2264. writel_relaxed(cfg_ptr->g.b, addr + 4);
  2265. writel_relaxed(cfg_ptr->b.b, addr + 8);
  2266. addr += 0x10;
  2267. writel_relaxed(cfg_ptr->r.rr, addr);
  2268. writel_relaxed(cfg_ptr->g.rr, addr + 4);
  2269. writel_relaxed(cfg_ptr->b.rr, addr + 8);
  2270. addr += 0x10;
  2271. writel_relaxed(cfg_ptr->r.rg, addr);
  2272. writel_relaxed(cfg_ptr->g.rg, addr + 4);
  2273. writel_relaxed(cfg_ptr->b.rg, addr + 8);
  2274. addr += 0x10;
  2275. writel_relaxed(cfg_ptr->r.rb, addr);
  2276. writel_relaxed(cfg_ptr->g.rb, addr + 4);
  2277. writel_relaxed(cfg_ptr->b.rb, addr + 8);
  2278. addr += 0x10;
  2279. writel_relaxed(cfg_ptr->r.gg, addr);
  2280. writel_relaxed(cfg_ptr->g.gg, addr + 4);
  2281. writel_relaxed(cfg_ptr->b.gg, addr + 8);
  2282. addr += 0x10;
  2283. writel_relaxed(cfg_ptr->r.gb, addr);
  2284. writel_relaxed(cfg_ptr->g.gb, addr + 4);
  2285. writel_relaxed(cfg_ptr->b.gb, addr + 8);
  2286. addr += 0x10;
  2287. writel_relaxed(cfg_ptr->r.bb, addr);
  2288. writel_relaxed(cfg_ptr->g.bb, addr + 4);
  2289. writel_relaxed(cfg_ptr->b.bb, addr + 8);
  2290. addr += 0x10;
  2291. writel_relaxed(cfg_ptr->r.rgb_0, addr);
  2292. writel_relaxed(cfg_ptr->g.rgb_0, addr + 4);
  2293. writel_relaxed(cfg_ptr->b.rgb_0, addr + 8);
  2294. addr += 0x10;
  2295. writel_relaxed(cfg_ptr->r.rgb_1, addr);
  2296. writel_relaxed(cfg_ptr->g.rgb_1, addr + 4);
  2297. writel_relaxed(cfg_ptr->b.rgb_1, addr + 8);
  2298. }
  2299. int mdss_mdp_pcc_config(struct mdp_pcc_cfg_data *config,
  2300. u32 *copyback)
  2301. {
  2302. int ret = 0;
  2303. u32 disp_num, dspp_num = 0;
  2304. char __iomem *addr;
  2305. if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
  2306. (config->block >= MDP_BLOCK_MAX))
  2307. return -EINVAL;
  2308. if ((config->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
  2309. pr_warn("Can't set both split bits\n");
  2310. return -EINVAL;
  2311. }
  2312. mutex_lock(&mdss_pp_mutex);
  2313. disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
  2314. if (config->ops & MDP_PP_OPS_READ) {
  2315. ret = pp_get_dspp_num(disp_num, &dspp_num);
  2316. if (ret) {
  2317. pr_err("%s, no dspp connects to disp %d",
  2318. __func__, disp_num);
  2319. goto pcc_config_exit;
  2320. }
  2321. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2322. addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
  2323. MDSS_MDP_REG_DSPP_PCC_BASE;
  2324. pp_read_pcc_regs(addr, config);
  2325. *copyback = 1;
  2326. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2327. } else {
  2328. mdss_pp_res->pcc_disp_cfg[disp_num] = *config;
  2329. mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PCC;
  2330. }
  2331. pcc_config_exit:
  2332. mutex_unlock(&mdss_pp_mutex);
  2333. return ret;
  2334. }
  2335. static void pp_read_igc_lut_cached(struct mdp_igc_lut_data *cfg)
  2336. {
  2337. int i;
  2338. u32 disp_num;
  2339. disp_num = cfg->block - MDP_LOGICAL_BLOCK_DISP_0;
  2340. for (i = 0; i < IGC_LUT_ENTRIES; i++) {
  2341. cfg->c0_c1_data[i] =
  2342. mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data[i];
  2343. cfg->c2_data[i] =
  2344. mdss_pp_res->igc_disp_cfg[disp_num].c2_data[i];
  2345. }
  2346. }
  2347. static void pp_read_igc_lut(struct mdp_igc_lut_data *cfg,
  2348. char __iomem *addr, u32 blk_idx)
  2349. {
  2350. int i;
  2351. u32 data;
  2352. /* INDEX_UPDATE & VALUE_UPDATEN */
  2353. data = (3 << 24) | (((~(1 << blk_idx)) & 0x7) << 28);
  2354. writel_relaxed(data, addr);
  2355. for (i = 0; i < cfg->len; i++)
  2356. cfg->c0_c1_data[i] = readl_relaxed(addr) & 0xFFF;
  2357. addr += 0x4;
  2358. writel_relaxed(data, addr);
  2359. for (i = 0; i < cfg->len; i++)
  2360. cfg->c0_c1_data[i] |= (readl_relaxed(addr) & 0xFFF) << 16;
  2361. addr += 0x4;
  2362. writel_relaxed(data, addr);
  2363. for (i = 0; i < cfg->len; i++)
  2364. cfg->c2_data[i] = readl_relaxed(addr) & 0xFFF;
  2365. }
  2366. static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
  2367. char __iomem *addr, u32 blk_idx)
  2368. {
  2369. int i;
  2370. u32 data;
  2371. /* INDEX_UPDATE */
  2372. data = (1 << 25) | (((~(1 << blk_idx)) & 0x7) << 28);
  2373. writel_relaxed((cfg->c0_c1_data[0] & 0xFFF) | data, addr);
  2374. /* disable index update */
  2375. data &= ~(1 << 25);
  2376. for (i = 1; i < cfg->len; i++)
  2377. writel_relaxed((cfg->c0_c1_data[i] & 0xFFF) | data, addr);
  2378. addr += 0x4;
  2379. data |= (1 << 25);
  2380. writel_relaxed(((cfg->c0_c1_data[0] >> 16) & 0xFFF) | data, addr);
  2381. data &= ~(1 << 25);
  2382. for (i = 1; i < cfg->len; i++)
  2383. writel_relaxed(((cfg->c0_c1_data[i] >> 16) & 0xFFF) | data,
  2384. addr);
  2385. addr += 0x4;
  2386. data |= (1 << 25);
  2387. writel_relaxed((cfg->c2_data[0] & 0xFFF) | data, addr);
  2388. data &= ~(1 << 25);
  2389. for (i = 1; i < cfg->len; i++)
  2390. writel_relaxed((cfg->c2_data[i] & 0xFFF) | data, addr);
  2391. }
  2392. int mdss_mdp_limited_lut_igc_config(struct mdss_mdp_ctl *ctl)
  2393. {
  2394. int ret = 0;
  2395. u32 copyback = 0;
  2396. u32 copy_from_kernel = 1;
  2397. struct mdp_igc_lut_data config;
  2398. if (!ctl)
  2399. return -EINVAL;
  2400. config.len = IGC_LUT_ENTRIES;
  2401. config.ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_ENABLE;
  2402. config.block = (ctl->mfd->index) + MDP_LOGICAL_BLOCK_DISP_0;
  2403. config.c0_c1_data = igc_limited;
  2404. config.c2_data = igc_limited;
  2405. ret = mdss_mdp_igc_lut_config(&config, &copyback,
  2406. copy_from_kernel);
  2407. return ret;
  2408. }
  2409. int mdss_mdp_igc_lut_config(struct mdp_igc_lut_data *config,
  2410. u32 *copyback, u32 copy_from_kernel)
  2411. {
  2412. int ret = 0;
  2413. u32 tbl_idx, disp_num, dspp_num = 0;
  2414. struct mdp_igc_lut_data local_cfg;
  2415. char __iomem *igc_addr;
  2416. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  2417. if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
  2418. (config->block >= MDP_BLOCK_MAX))
  2419. return -EINVAL;
  2420. if (config->len != IGC_LUT_ENTRIES)
  2421. return -EINVAL;
  2422. if ((config->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
  2423. pr_warn("Can't set both split bits\n");
  2424. return -EINVAL;
  2425. }
  2426. mutex_lock(&mdss_pp_mutex);
  2427. disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
  2428. if (config->ops & MDP_PP_OPS_READ) {
  2429. ret = pp_get_dspp_num(disp_num, &dspp_num);
  2430. if (ret) {
  2431. pr_err("%s, no dspp connects to disp %d",
  2432. __func__, disp_num);
  2433. goto igc_config_exit;
  2434. }
  2435. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2436. if (config->ops & MDP_PP_IGC_FLAG_ROM0)
  2437. tbl_idx = 1;
  2438. else if (config->ops & MDP_PP_IGC_FLAG_ROM1)
  2439. tbl_idx = 2;
  2440. else
  2441. tbl_idx = 0;
  2442. igc_addr = mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE +
  2443. (0x10 * tbl_idx);
  2444. local_cfg = *config;
  2445. local_cfg.c0_c1_data =
  2446. &mdss_pp_res->igc_lut_c0c1[disp_num][0];
  2447. local_cfg.c2_data =
  2448. &mdss_pp_res->igc_lut_c2[disp_num][0];
  2449. if (mdata->has_no_lut_read)
  2450. pp_read_igc_lut_cached(&local_cfg);
  2451. else
  2452. pp_read_igc_lut(&local_cfg, igc_addr, dspp_num);
  2453. if (copy_to_user(config->c0_c1_data, local_cfg.c0_c1_data,
  2454. config->len * sizeof(u32))) {
  2455. ret = -EFAULT;
  2456. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2457. goto igc_config_exit;
  2458. }
  2459. if (copy_to_user(config->c2_data, local_cfg.c2_data,
  2460. config->len * sizeof(u32))) {
  2461. ret = -EFAULT;
  2462. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2463. goto igc_config_exit;
  2464. }
  2465. *copyback = 1;
  2466. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2467. } else {
  2468. if (copy_from_kernel) {
  2469. memcpy(&mdss_pp_res->igc_lut_c0c1[disp_num][0],
  2470. config->c0_c1_data, config->len * sizeof(u32));
  2471. memcpy(&mdss_pp_res->igc_lut_c2[disp_num][0],
  2472. config->c2_data, config->len * sizeof(u32));
  2473. } else {
  2474. if (copy_from_user(
  2475. &mdss_pp_res->igc_lut_c0c1[disp_num][0],
  2476. config->c0_c1_data,
  2477. config->len * sizeof(u32))) {
  2478. ret = -EFAULT;
  2479. goto igc_config_exit;
  2480. }
  2481. if (copy_from_user(
  2482. &mdss_pp_res->igc_lut_c2[disp_num][0],
  2483. config->c2_data, config->len * sizeof(u32))) {
  2484. ret = -EFAULT;
  2485. goto igc_config_exit;
  2486. }
  2487. }
  2488. mdss_pp_res->igc_disp_cfg[disp_num] = *config;
  2489. mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data =
  2490. &mdss_pp_res->igc_lut_c0c1[disp_num][0];
  2491. mdss_pp_res->igc_disp_cfg[disp_num].c2_data =
  2492. &mdss_pp_res->igc_lut_c2[disp_num][0];
  2493. mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_IGC;
  2494. }
  2495. igc_config_exit:
  2496. mutex_unlock(&mdss_pp_mutex);
  2497. return ret;
  2498. }
  2499. static void pp_update_gc_one_lut(char __iomem *addr,
  2500. struct mdp_ar_gc_lut_data *lut_data,
  2501. uint8_t num_stages)
  2502. {
  2503. int i, start_idx, idx;
  2504. start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
  2505. for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
  2506. idx = min((uint8_t)i, (uint8_t)(num_stages-1));
  2507. writel_relaxed(lut_data[idx].x_start, addr);
  2508. }
  2509. for (i = 0; i < start_idx; i++) {
  2510. idx = min((uint8_t)i, (uint8_t)(num_stages-1));
  2511. writel_relaxed(lut_data[idx].x_start, addr);
  2512. }
  2513. addr += 4;
  2514. start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
  2515. for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
  2516. idx = min((uint8_t)i, (uint8_t)(num_stages-1));
  2517. writel_relaxed(lut_data[idx].slope, addr);
  2518. }
  2519. for (i = 0; i < start_idx; i++) {
  2520. idx = min((uint8_t)i, (uint8_t)(num_stages-1));
  2521. writel_relaxed(lut_data[idx].slope, addr);
  2522. }
  2523. addr += 4;
  2524. start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
  2525. for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
  2526. idx = min((uint8_t)i, (uint8_t)(num_stages-1));
  2527. writel_relaxed(lut_data[idx].offset, addr);
  2528. }
  2529. for (i = 0; i < start_idx; i++) {
  2530. idx = min((uint8_t)i, (uint8_t)(num_stages-1));
  2531. writel_relaxed(lut_data[idx].offset, addr);
  2532. }
  2533. }
  2534. static void pp_update_argc_lut(char __iomem *addr,
  2535. struct mdp_pgc_lut_data *config)
  2536. {
  2537. pp_update_gc_one_lut(addr, config->r_data, config->num_r_stages);
  2538. addr += 0x10;
  2539. pp_update_gc_one_lut(addr, config->g_data, config->num_g_stages);
  2540. addr += 0x10;
  2541. pp_update_gc_one_lut(addr, config->b_data, config->num_b_stages);
  2542. }
  2543. static void pp_read_gc_one_lut(char __iomem *addr,
  2544. struct mdp_ar_gc_lut_data *gc_data)
  2545. {
  2546. int i, start_idx, data;
  2547. data = readl_relaxed(addr);
  2548. start_idx = (data >> 16) & 0xF;
  2549. gc_data[start_idx].x_start = data & 0xFFF;
  2550. for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
  2551. data = readl_relaxed(addr);
  2552. gc_data[i].x_start = data & 0xFFF;
  2553. }
  2554. for (i = 0; i < start_idx; i++) {
  2555. data = readl_relaxed(addr);
  2556. gc_data[i].x_start = data & 0xFFF;
  2557. }
  2558. addr += 4;
  2559. data = readl_relaxed(addr);
  2560. start_idx = (data >> 16) & 0xF;
  2561. gc_data[start_idx].slope = data & 0x7FFF;
  2562. for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
  2563. data = readl_relaxed(addr);
  2564. gc_data[i].slope = data & 0x7FFF;
  2565. }
  2566. for (i = 0; i < start_idx; i++) {
  2567. data = readl_relaxed(addr);
  2568. gc_data[i].slope = data & 0x7FFF;
  2569. }
  2570. addr += 4;
  2571. data = readl_relaxed(addr);
  2572. start_idx = (data >> 16) & 0xF;
  2573. gc_data[start_idx].offset = data & 0x7FFF;
  2574. for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
  2575. data = readl_relaxed(addr);
  2576. gc_data[i].offset = data & 0x7FFF;
  2577. }
  2578. for (i = 0; i < start_idx; i++) {
  2579. data = readl_relaxed(addr);
  2580. gc_data[i].offset = data & 0x7FFF;
  2581. }
  2582. }
  2583. static int pp_read_argc_lut(struct mdp_pgc_lut_data *config, char __iomem *addr)
  2584. {
  2585. int ret = 0;
  2586. pp_read_gc_one_lut(addr, config->r_data);
  2587. addr += 0x10;
  2588. pp_read_gc_one_lut(addr, config->g_data);
  2589. addr += 0x10;
  2590. pp_read_gc_one_lut(addr, config->b_data);
  2591. return ret;
  2592. }
  2593. static int pp_read_argc_lut_cached(struct mdp_pgc_lut_data *config)
  2594. {
  2595. int i;
  2596. u32 disp_num;
  2597. struct mdp_pgc_lut_data *pgc_ptr;
  2598. disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
  2599. switch (PP_LOCAT(config->block)) {
  2600. case MDSS_PP_LM_CFG:
  2601. pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
  2602. break;
  2603. case MDSS_PP_DSPP_CFG:
  2604. pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
  2605. break;
  2606. default:
  2607. return -EINVAL;
  2608. }
  2609. for (i = 0; i < GC_LUT_SEGMENTS; i++) {
  2610. config->r_data[i].x_start = pgc_ptr->r_data[i].x_start;
  2611. config->r_data[i].slope = pgc_ptr->r_data[i].slope;
  2612. config->r_data[i].offset = pgc_ptr->r_data[i].offset;
  2613. config->g_data[i].x_start = pgc_ptr->g_data[i].x_start;
  2614. config->g_data[i].slope = pgc_ptr->g_data[i].slope;
  2615. config->g_data[i].offset = pgc_ptr->g_data[i].offset;
  2616. config->b_data[i].x_start = pgc_ptr->b_data[i].x_start;
  2617. config->b_data[i].slope = pgc_ptr->b_data[i].slope;
  2618. config->b_data[i].offset = pgc_ptr->b_data[i].offset;
  2619. }
  2620. return 0;
  2621. }
  2622. /* Note: Assumes that its inputs have been checked by calling function */
  2623. static void pp_update_hist_lut(char __iomem *addr,
  2624. struct mdp_hist_lut_data *cfg)
  2625. {
  2626. int i;
  2627. for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
  2628. writel_relaxed(cfg->data[i], addr);
  2629. /* swap */
  2630. if (PP_LOCAT(cfg->block) == MDSS_PP_DSPP_CFG)
  2631. writel_relaxed(1, addr + 4);
  2632. else
  2633. writel_relaxed(1, addr + 16);
  2634. }
  2635. int mdss_mdp_argc_config(struct mdp_pgc_lut_data *config,
  2636. u32 *copyback)
  2637. {
  2638. int ret = 0;
  2639. u32 disp_num, dspp_num = 0;
  2640. struct mdp_pgc_lut_data local_cfg;
  2641. struct mdp_pgc_lut_data *pgc_ptr;
  2642. u32 tbl_size, r_size, g_size, b_size;
  2643. char __iomem *argc_addr = 0;
  2644. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  2645. if (mdata == NULL)
  2646. return -EPERM;
  2647. if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
  2648. (PP_BLOCK(config->block) >= MDP_BLOCK_MAX))
  2649. return -EINVAL;
  2650. if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
  2651. pr_warn("Can't set both split bits\n");
  2652. return -EINVAL;
  2653. }
  2654. mutex_lock(&mdss_pp_mutex);
  2655. disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
  2656. ret = pp_get_dspp_num(disp_num, &dspp_num);
  2657. if (ret) {
  2658. pr_err("%s, no dspp connects to disp %d", __func__, disp_num);
  2659. goto argc_config_exit;
  2660. }
  2661. switch (PP_LOCAT(config->block)) {
  2662. case MDSS_PP_LM_CFG:
  2663. argc_addr = mdss_mdp_get_mixer_addr_off(dspp_num) +
  2664. MDSS_MDP_REG_LM_GC_LUT_BASE;
  2665. pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
  2666. if (config->flags & MDP_PP_OPS_WRITE)
  2667. mdss_pp_res->pp_disp_flags[disp_num] |=
  2668. PP_FLAGS_DIRTY_ARGC;
  2669. break;
  2670. case MDSS_PP_DSPP_CFG:
  2671. argc_addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
  2672. MDSS_MDP_REG_DSPP_GC_BASE;
  2673. pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
  2674. if (config->flags & MDP_PP_OPS_WRITE)
  2675. mdss_pp_res->pp_disp_flags[disp_num] |=
  2676. PP_FLAGS_DIRTY_PGC;
  2677. break;
  2678. default:
  2679. goto argc_config_exit;
  2680. break;
  2681. }
  2682. tbl_size = GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data);
  2683. if (config->flags & MDP_PP_OPS_READ) {
  2684. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2685. local_cfg = *config;
  2686. local_cfg.r_data =
  2687. &mdss_pp_res->gc_lut_r[disp_num][0];
  2688. local_cfg.g_data =
  2689. &mdss_pp_res->gc_lut_g[disp_num][0];
  2690. local_cfg.b_data =
  2691. &mdss_pp_res->gc_lut_b[disp_num][0];
  2692. if (mdata->has_no_lut_read)
  2693. pp_read_argc_lut_cached(&local_cfg);
  2694. else
  2695. pp_read_argc_lut(&local_cfg, argc_addr);
  2696. if ((tbl_size != local_cfg.num_r_stages *
  2697. sizeof(struct mdp_ar_gc_lut_data)) ||
  2698. (copy_to_user(config->r_data, local_cfg.r_data,
  2699. tbl_size))) {
  2700. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2701. ret = -EFAULT;
  2702. goto argc_config_exit;
  2703. }
  2704. if ((tbl_size != local_cfg.num_g_stages *
  2705. sizeof(struct mdp_ar_gc_lut_data)) ||
  2706. (copy_to_user(config->g_data, local_cfg.g_data,
  2707. tbl_size))) {
  2708. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2709. ret = -EFAULT;
  2710. goto argc_config_exit;
  2711. }
  2712. if ((tbl_size != local_cfg.num_b_stages *
  2713. sizeof(struct mdp_ar_gc_lut_data)) ||
  2714. (copy_to_user(config->b_data, local_cfg.b_data,
  2715. tbl_size))) {
  2716. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2717. ret = -EFAULT;
  2718. goto argc_config_exit;
  2719. }
  2720. *copyback = 1;
  2721. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2722. } else {
  2723. r_size = config->num_r_stages *
  2724. sizeof(struct mdp_ar_gc_lut_data);
  2725. g_size = config->num_g_stages *
  2726. sizeof(struct mdp_ar_gc_lut_data);
  2727. b_size = config->num_b_stages *
  2728. sizeof(struct mdp_ar_gc_lut_data);
  2729. if (r_size > tbl_size ||
  2730. g_size > tbl_size ||
  2731. b_size > tbl_size ||
  2732. r_size == 0 ||
  2733. g_size == 0 ||
  2734. b_size == 0) {
  2735. ret = -EINVAL;
  2736. pr_warn("%s, number of rgb stages invalid",
  2737. __func__);
  2738. goto argc_config_exit;
  2739. }
  2740. if (copy_from_user(&mdss_pp_res->gc_lut_r[disp_num][0],
  2741. config->r_data, r_size)) {
  2742. ret = -EFAULT;
  2743. goto argc_config_exit;
  2744. }
  2745. if (copy_from_user(&mdss_pp_res->gc_lut_g[disp_num][0],
  2746. config->g_data, g_size)) {
  2747. ret = -EFAULT;
  2748. goto argc_config_exit;
  2749. }
  2750. if (copy_from_user(&mdss_pp_res->gc_lut_b[disp_num][0],
  2751. config->b_data, b_size)) {
  2752. ret = -EFAULT;
  2753. goto argc_config_exit;
  2754. }
  2755. *pgc_ptr = *config;
  2756. pgc_ptr->r_data =
  2757. &mdss_pp_res->gc_lut_r[disp_num][0];
  2758. pgc_ptr->g_data =
  2759. &mdss_pp_res->gc_lut_g[disp_num][0];
  2760. pgc_ptr->b_data =
  2761. &mdss_pp_res->gc_lut_b[disp_num][0];
  2762. }
  2763. argc_config_exit:
  2764. mutex_unlock(&mdss_pp_mutex);
  2765. return ret;
  2766. }
  2767. int mdss_mdp_hist_lut_config(struct mdp_hist_lut_data *config,
  2768. u32 *copyback)
  2769. {
  2770. int i, ret = 0;
  2771. u32 disp_num, dspp_num = 0;
  2772. char __iomem *hist_addr;
  2773. if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
  2774. (PP_BLOCK(config->block) >= MDP_BLOCK_MAX))
  2775. return -EINVAL;
  2776. mutex_lock(&mdss_pp_mutex);
  2777. disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
  2778. if (config->ops & MDP_PP_OPS_READ) {
  2779. ret = pp_get_dspp_num(disp_num, &dspp_num);
  2780. if (ret) {
  2781. pr_err("%s, no dspp connects to disp %d",
  2782. __func__, disp_num);
  2783. goto enhist_config_exit;
  2784. }
  2785. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2786. hist_addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
  2787. MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
  2788. for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
  2789. mdss_pp_res->enhist_lut[disp_num][i] =
  2790. readl_relaxed(hist_addr);
  2791. if (copy_to_user(config->data,
  2792. &mdss_pp_res->enhist_lut[disp_num][0],
  2793. ENHIST_LUT_ENTRIES * sizeof(u32))) {
  2794. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2795. ret = -EFAULT;
  2796. goto enhist_config_exit;
  2797. }
  2798. *copyback = 1;
  2799. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2800. } else {
  2801. if (copy_from_user(&mdss_pp_res->enhist_lut[disp_num][0],
  2802. config->data, ENHIST_LUT_ENTRIES * sizeof(u32))) {
  2803. ret = -EFAULT;
  2804. goto enhist_config_exit;
  2805. }
  2806. mdss_pp_res->enhist_disp_cfg[disp_num] = *config;
  2807. mdss_pp_res->enhist_disp_cfg[disp_num].data =
  2808. &mdss_pp_res->enhist_lut[disp_num][0];
  2809. mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_ENHIST;
  2810. }
  2811. enhist_config_exit:
  2812. mutex_unlock(&mdss_pp_mutex);
  2813. return ret;
  2814. }
  2815. int mdss_mdp_dither_config(struct mdp_dither_cfg_data *config,
  2816. u32 *copyback)
  2817. {
  2818. u32 disp_num;
  2819. if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
  2820. (config->block >= MDP_BLOCK_MAX))
  2821. return -EINVAL;
  2822. if (config->flags & MDP_PP_OPS_READ)
  2823. return -ENOTSUPP;
  2824. if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
  2825. pr_warn("Can't set both split bits\n");
  2826. return -EINVAL;
  2827. }
  2828. mutex_lock(&mdss_pp_mutex);
  2829. disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
  2830. mdss_pp_res->dither_disp_cfg[disp_num] = *config;
  2831. mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_DITHER;
  2832. mutex_unlock(&mdss_pp_mutex);
  2833. return 0;
  2834. }
  2835. static int pp_gm_has_invalid_lut_size(struct mdp_gamut_cfg_data *config)
  2836. {
  2837. if (config->tbl_size[0] != GAMUT_T0_SIZE)
  2838. return -EINVAL;
  2839. if (config->tbl_size[1] != GAMUT_T1_SIZE)
  2840. return -EINVAL;
  2841. if (config->tbl_size[2] != GAMUT_T2_SIZE)
  2842. return -EINVAL;
  2843. if (config->tbl_size[3] != GAMUT_T3_SIZE)
  2844. return -EINVAL;
  2845. if (config->tbl_size[4] != GAMUT_T4_SIZE)
  2846. return -EINVAL;
  2847. if (config->tbl_size[5] != GAMUT_T5_SIZE)
  2848. return -EINVAL;
  2849. if (config->tbl_size[6] != GAMUT_T6_SIZE)
  2850. return -EINVAL;
  2851. if (config->tbl_size[7] != GAMUT_T7_SIZE)
  2852. return -EINVAL;
  2853. return 0;
  2854. }
  2855. int mdss_mdp_gamut_config(struct mdp_gamut_cfg_data *config,
  2856. u32 *copyback)
  2857. {
  2858. int i, j, ret = 0;
  2859. u32 disp_num, dspp_num = 0;
  2860. uint16_t *tbl_off;
  2861. struct mdp_gamut_cfg_data local_cfg;
  2862. uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
  2863. uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
  2864. uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
  2865. char __iomem *addr;
  2866. if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
  2867. (config->block >= MDP_BLOCK_MAX))
  2868. return -EINVAL;
  2869. if (pp_gm_has_invalid_lut_size(config))
  2870. return -EINVAL;
  2871. if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
  2872. pr_warn("Can't set both split bits\n");
  2873. return -EINVAL;
  2874. }
  2875. mutex_lock(&mdss_pp_mutex);
  2876. disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
  2877. if (config->flags & MDP_PP_OPS_READ) {
  2878. ret = pp_get_dspp_num(disp_num, &dspp_num);
  2879. if (ret) {
  2880. pr_err("%s, no dspp connects to disp %d",
  2881. __func__, disp_num);
  2882. goto gamut_config_exit;
  2883. }
  2884. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2885. addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
  2886. MDSS_MDP_REG_DSPP_GAMUT_BASE;
  2887. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  2888. r_tbl[i] = kzalloc(
  2889. sizeof(uint16_t) * config->tbl_size[i],
  2890. GFP_KERNEL);
  2891. if (!r_tbl[i]) {
  2892. pr_err("%s: alloc failed\n", __func__);
  2893. goto gamut_config_exit;
  2894. }
  2895. for (j = 0; j < config->tbl_size[i]; j++)
  2896. r_tbl[i][j] =
  2897. (u16)readl_relaxed(addr);
  2898. addr += 4;
  2899. ret = copy_to_user(config->r_tbl[i], r_tbl[i],
  2900. sizeof(uint16_t) * config->tbl_size[i]);
  2901. kfree(r_tbl[i]);
  2902. if (ret) {
  2903. pr_err("%s: copy tbl to usr failed\n",
  2904. __func__);
  2905. goto gamut_config_exit;
  2906. }
  2907. }
  2908. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  2909. g_tbl[i] = kzalloc(
  2910. sizeof(uint16_t) * config->tbl_size[i],
  2911. GFP_KERNEL);
  2912. if (!g_tbl[i]) {
  2913. pr_err("%s: alloc failed\n", __func__);
  2914. goto gamut_config_exit;
  2915. }
  2916. for (j = 0; j < config->tbl_size[i]; j++)
  2917. g_tbl[i][j] =
  2918. (u16)readl_relaxed(addr);
  2919. addr += 4;
  2920. ret = copy_to_user(config->g_tbl[i], g_tbl[i],
  2921. sizeof(uint16_t) * config->tbl_size[i]);
  2922. kfree(g_tbl[i]);
  2923. if (ret) {
  2924. pr_err("%s: copy tbl to usr failed\n",
  2925. __func__);
  2926. goto gamut_config_exit;
  2927. }
  2928. }
  2929. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  2930. b_tbl[i] = kzalloc(
  2931. sizeof(uint16_t) * config->tbl_size[i],
  2932. GFP_KERNEL);
  2933. if (!b_tbl[i]) {
  2934. pr_err("%s: alloc failed\n", __func__);
  2935. goto gamut_config_exit;
  2936. }
  2937. for (j = 0; j < config->tbl_size[i]; j++)
  2938. b_tbl[i][j] =
  2939. (u16)readl_relaxed(addr);
  2940. addr += 4;
  2941. ret = copy_to_user(config->b_tbl[i], b_tbl[i],
  2942. sizeof(uint16_t) * config->tbl_size[i]);
  2943. kfree(b_tbl[i]);
  2944. if (ret) {
  2945. pr_err("%s: copy tbl to usr failed\n",
  2946. __func__);
  2947. goto gamut_config_exit;
  2948. }
  2949. }
  2950. *copyback = 1;
  2951. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2952. } else {
  2953. local_cfg = *config;
  2954. tbl_off = mdss_pp_res->gamut_tbl[disp_num];
  2955. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  2956. local_cfg.r_tbl[i] = tbl_off;
  2957. if (copy_from_user(tbl_off, config->r_tbl[i],
  2958. config->tbl_size[i] * sizeof(uint16_t))) {
  2959. ret = -EFAULT;
  2960. goto gamut_config_exit;
  2961. }
  2962. tbl_off += local_cfg.tbl_size[i];
  2963. }
  2964. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  2965. local_cfg.g_tbl[i] = tbl_off;
  2966. if (copy_from_user(tbl_off, config->g_tbl[i],
  2967. config->tbl_size[i] * sizeof(uint16_t))) {
  2968. ret = -EFAULT;
  2969. goto gamut_config_exit;
  2970. }
  2971. tbl_off += local_cfg.tbl_size[i];
  2972. }
  2973. for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
  2974. local_cfg.b_tbl[i] = tbl_off;
  2975. if (copy_from_user(tbl_off, config->b_tbl[i],
  2976. config->tbl_size[i] * sizeof(uint16_t))) {
  2977. ret = -EFAULT;
  2978. goto gamut_config_exit;
  2979. }
  2980. tbl_off += local_cfg.tbl_size[i];
  2981. }
  2982. mdss_pp_res->gamut_disp_cfg[disp_num] = local_cfg;
  2983. mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_GAMUT;
  2984. }
  2985. gamut_config_exit:
  2986. mutex_unlock(&mdss_pp_mutex);
  2987. return ret;
  2988. }
  2989. static u32 pp_hist_read(char __iomem *v_addr,
  2990. struct pp_hist_col_info *hist_info)
  2991. {
  2992. int i, i_start;
  2993. u32 sum = 0;
  2994. u32 data;
  2995. data = readl_relaxed(v_addr);
  2996. i_start = data >> 24;
  2997. hist_info->data[i_start] = data & 0xFFFFFF;
  2998. sum += hist_info->data[i_start];
  2999. for (i = i_start + 1; i < HIST_V_SIZE; i++) {
  3000. hist_info->data[i] = readl_relaxed(v_addr) & 0xFFFFFF;
  3001. sum += hist_info->data[i];
  3002. }
  3003. for (i = 0; i < i_start; i++) {
  3004. hist_info->data[i] = readl_relaxed(v_addr) & 0xFFFFFF;
  3005. sum += hist_info->data[i];
  3006. }
  3007. hist_info->hist_cnt_read++;
  3008. return sum;
  3009. }
  3010. /* Assumes that relevant clocks are enabled */
  3011. static int pp_histogram_enable(struct pp_hist_col_info *hist_info,
  3012. struct mdp_histogram_start_req *req,
  3013. u32 shift_bit, char __iomem *ctl_base)
  3014. {
  3015. unsigned long flag;
  3016. int ret = 0;
  3017. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  3018. mutex_lock(&hist_info->hist_mutex);
  3019. /* check if it is idle */
  3020. spin_lock_irqsave(&hist_info->hist_lock, flag);
  3021. if (hist_info->col_en) {
  3022. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3023. pr_info("%s Hist collection has already been enabled %d",
  3024. __func__, (u32) ctl_base);
  3025. ret = -EINVAL;
  3026. goto exit;
  3027. }
  3028. hist_info->read_request = 0;
  3029. hist_info->col_state = HIST_RESET;
  3030. hist_info->col_en = true;
  3031. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3032. hist_info->frame_cnt = req->frame_cnt;
  3033. INIT_COMPLETION(hist_info->comp);
  3034. INIT_COMPLETION(hist_info->first_kick);
  3035. hist_info->hist_cnt_read = 0;
  3036. hist_info->hist_cnt_sent = 0;
  3037. hist_info->hist_cnt_time = 0;
  3038. mdss_mdp_hist_intr_req(&mdata->hist_intr, 3 << shift_bit, true);
  3039. writel_relaxed(req->frame_cnt, ctl_base + 8);
  3040. /* Kick out reset start */
  3041. writel_relaxed(1, ctl_base + 4);
  3042. exit:
  3043. mutex_unlock(&hist_info->hist_mutex);
  3044. return ret;
  3045. }
  3046. #define MDSS_MAX_HIST_BIN_SIZE 16777215
  3047. int mdss_mdp_hist_start(struct mdp_histogram_start_req *req)
  3048. {
  3049. u32 done_shift_bit;
  3050. char __iomem *ctl_base;
  3051. struct pp_hist_col_info *hist_info;
  3052. int i, ret = 0;
  3053. u32 disp_num, dspp_num = 0;
  3054. u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  3055. u32 frame_size;
  3056. struct mdss_mdp_pipe *pipe;
  3057. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  3058. if (!mdss_is_ready())
  3059. return -EPROBE_DEFER;
  3060. if ((PP_BLOCK(req->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
  3061. (PP_BLOCK(req->block) >= MDP_BLOCK_MAX))
  3062. return -EINVAL;
  3063. disp_num = PP_BLOCK(req->block) - MDP_LOGICAL_BLOCK_DISP_0;
  3064. mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
  3065. if (!mixer_cnt) {
  3066. pr_err("%s, no dspp connects to disp %d",
  3067. __func__, disp_num);
  3068. ret = -EPERM;
  3069. goto hist_exit;
  3070. }
  3071. if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
  3072. pr_err("%s, Too many dspp connects to disp %d",
  3073. __func__, mixer_cnt);
  3074. ret = -EPERM;
  3075. goto hist_exit;
  3076. }
  3077. frame_size = (mdata->ctl_off[mixer_id[0]].width *
  3078. mdata->ctl_off[mixer_id[0]].height);
  3079. if (!frame_size ||
  3080. ((MDSS_MAX_HIST_BIN_SIZE / frame_size) < req->frame_cnt)) {
  3081. pr_err("%s, too many frames for given display size, %d",
  3082. __func__, req->frame_cnt);
  3083. ret = -EINVAL;
  3084. goto hist_exit;
  3085. }
  3086. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  3087. if (PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG) {
  3088. i = MDSS_PP_ARG_MASK & req->block;
  3089. if (!i) {
  3090. ret = -EINVAL;
  3091. pr_warn("Must pass pipe arguments, %d", i);
  3092. goto hist_stop_clk;
  3093. }
  3094. for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
  3095. if (!PP_ARG(i, req->block))
  3096. continue;
  3097. pipe = mdss_mdp_pipe_get(mdata, BIT(i));
  3098. if (IS_ERR_OR_NULL(pipe))
  3099. continue;
  3100. if (!pipe || pipe->num > MDSS_MDP_SSPP_VIG2) {
  3101. ret = -EINVAL;
  3102. pr_warn("Invalid Hist pipe (%d)", i);
  3103. goto hist_stop_clk;
  3104. }
  3105. done_shift_bit = (pipe->num * 4);
  3106. hist_info = &pipe->pp_res.hist;
  3107. ctl_base = pipe->base +
  3108. MDSS_MDP_REG_VIG_HIST_CTL_BASE;
  3109. ret = pp_histogram_enable(hist_info, req,
  3110. done_shift_bit, ctl_base);
  3111. mdss_mdp_pipe_unmap(pipe);
  3112. }
  3113. } else if (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG) {
  3114. for (i = 0; i < mixer_cnt; i++) {
  3115. dspp_num = mixer_id[i];
  3116. done_shift_bit = (dspp_num * 4) + 12;
  3117. hist_info = &mdss_pp_res->dspp_hist[dspp_num];
  3118. ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
  3119. MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
  3120. ret = pp_histogram_enable(hist_info, req,
  3121. done_shift_bit, ctl_base);
  3122. mdss_pp_res->pp_disp_flags[disp_num] |=
  3123. PP_FLAGS_DIRTY_HIST_COL;
  3124. }
  3125. }
  3126. hist_stop_clk:
  3127. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  3128. hist_exit:
  3129. return ret;
  3130. }
  3131. static int pp_histogram_disable(struct pp_hist_col_info *hist_info,
  3132. u32 done_bit, char __iomem *ctl_base)
  3133. {
  3134. int ret = 0;
  3135. unsigned long flag;
  3136. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  3137. mutex_lock(&hist_info->hist_mutex);
  3138. spin_lock_irqsave(&hist_info->hist_lock, flag);
  3139. if (hist_info->col_en == false) {
  3140. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3141. pr_debug("Histogram already disabled (%d)", (u32) ctl_base);
  3142. ret = -EINVAL;
  3143. goto exit;
  3144. }
  3145. hist_info->col_en = false;
  3146. hist_info->col_state = HIST_UNKNOWN;
  3147. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3148. mdss_mdp_hist_intr_req(&mdata->hist_intr, done_bit, false);
  3149. complete_all(&hist_info->comp);
  3150. complete_all(&hist_info->first_kick);
  3151. writel_relaxed(BIT(1), ctl_base);/* cancel */
  3152. ret = 0;
  3153. exit:
  3154. mutex_unlock(&hist_info->hist_mutex);
  3155. return ret;
  3156. }
  3157. int mdss_mdp_hist_stop(u32 block)
  3158. {
  3159. int i, ret = 0;
  3160. char __iomem *ctl_base;
  3161. u32 dspp_num, disp_num, done_bit;
  3162. struct pp_hist_col_info *hist_info;
  3163. u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  3164. struct mdss_mdp_pipe *pipe;
  3165. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  3166. if ((PP_BLOCK(block) < MDP_LOGICAL_BLOCK_DISP_0) ||
  3167. (PP_BLOCK(block) >= MDP_BLOCK_MAX))
  3168. return -EINVAL;
  3169. disp_num = PP_BLOCK(block) - MDP_LOGICAL_BLOCK_DISP_0;
  3170. mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
  3171. if (!mixer_cnt) {
  3172. pr_err("%s, no dspp connects to disp %d",
  3173. __func__, disp_num);
  3174. ret = -EPERM;
  3175. goto hist_stop_exit;
  3176. }
  3177. if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
  3178. pr_err("%s, Too many dspp connects to disp %d",
  3179. __func__, mixer_cnt);
  3180. ret = -EPERM;
  3181. goto hist_stop_exit;
  3182. }
  3183. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  3184. if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
  3185. i = MDSS_PP_ARG_MASK & block;
  3186. if (!i) {
  3187. pr_warn("Must pass pipe arguments, %d", i);
  3188. goto hist_stop_clk;
  3189. }
  3190. for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
  3191. if (!PP_ARG(i, block))
  3192. continue;
  3193. pipe = mdss_mdp_pipe_get(mdata, BIT(i));
  3194. if (IS_ERR_OR_NULL(pipe) ||
  3195. pipe->num > MDSS_MDP_SSPP_VIG2) {
  3196. pr_warn("Invalid Hist pipe (%d)", i);
  3197. continue;
  3198. }
  3199. done_bit = 3 << (pipe->num * 4);
  3200. hist_info = &pipe->pp_res.hist;
  3201. ctl_base = pipe->base +
  3202. MDSS_MDP_REG_VIG_HIST_CTL_BASE;
  3203. ret = pp_histogram_disable(hist_info, done_bit,
  3204. ctl_base);
  3205. mdss_mdp_pipe_unmap(pipe);
  3206. if (ret)
  3207. goto hist_stop_clk;
  3208. }
  3209. } else if (PP_LOCAT(block) == MDSS_PP_DSPP_CFG) {
  3210. for (i = 0; i < mixer_cnt; i++) {
  3211. dspp_num = mixer_id[i];
  3212. done_bit = 3 << ((dspp_num * 4) + 12);
  3213. hist_info = &mdss_pp_res->dspp_hist[dspp_num];
  3214. ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
  3215. MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
  3216. ret = pp_histogram_disable(hist_info, done_bit,
  3217. ctl_base);
  3218. if (ret)
  3219. goto hist_stop_clk;
  3220. mdss_pp_res->pp_disp_flags[disp_num] |=
  3221. PP_FLAGS_DIRTY_HIST_COL;
  3222. }
  3223. }
  3224. hist_stop_clk:
  3225. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  3226. hist_stop_exit:
  3227. return ret;
  3228. }
  3229. /**
  3230. * mdss_mdp_hist_intr_req() - Request changes the histogram interupts
  3231. * @intr: structure containting state of interrupt register
  3232. * @bits: the bits on interrupt register that should be changed
  3233. * @en: true if bits should be set, false if bits should be cleared
  3234. *
  3235. * Adds or removes the bits from the interrupt request.
  3236. *
  3237. * Does not store reference count for each bit. I.e. a bit with multiple
  3238. * enable requests can be disabled with a single disable request.
  3239. *
  3240. * Return: 0 if uneventful, errno on invalid input
  3241. */
  3242. int mdss_mdp_hist_intr_req(struct mdss_intr *intr, u32 bits, bool en)
  3243. {
  3244. unsigned long flag;
  3245. int ret = 0;
  3246. if (!intr) {
  3247. pr_err("NULL addr passed, %pK", intr);
  3248. return -EINVAL;
  3249. }
  3250. spin_lock_irqsave(&intr->lock, flag);
  3251. if (en)
  3252. intr->req |= bits;
  3253. else
  3254. intr->req &= ~bits;
  3255. spin_unlock_irqrestore(&intr->lock, flag);
  3256. mdss_mdp_hist_intr_setup(intr, MDSS_IRQ_REQ);
  3257. return ret;
  3258. }
  3259. #define MDSS_INTR_STATE_ACTIVE 1
  3260. #define MDSS_INTR_STATE_NULL 0
  3261. #define MDSS_INTR_STATE_SUSPEND -1
  3262. /**
  3263. * mdss_mdp_hist_intr_setup() - Manage intr and clk depending on requests.
  3264. * @intr: structure containting state of intr reg
  3265. * @state: MDSS_IRQ_SUSPEND if suspend is needed,
  3266. * MDSS_IRQ_RESUME if resume is needed,
  3267. * MDSS_IRQ_REQ if neither (i.e. requesting an interrupt)
  3268. *
  3269. * This function acts as a gatekeeper for the interrupt, making sure that the
  3270. * MDP clocks are enabled while the interrupts are enabled to prevent
  3271. * unclocked accesses.
  3272. *
  3273. * To reduce code repetition, 4 state transitions have been encoded here. Each
  3274. * transition updates the interrupt's state structure (mdss_intr) to reflect
  3275. * the which bits have been requested (intr->req), are currently enabled
  3276. * (intr->curr), as well as defines which interrupt bits need to be enabled or
  3277. * disabled ('en' and 'dis' respectively). The 4th state is not explicity
  3278. * coded in the if/else chain, but is for MDSS_IRQ_REQ's when the interrupt
  3279. * is in suspend, in which case, the only change required (intr->req being
  3280. * updated) has already occured in the calling function.
  3281. *
  3282. * To control the clock, which can't be requested while holding the spinlock,
  3283. * the inital state is compared with the exit state to detect when the
  3284. * interrupt needs a clock.
  3285. *
  3286. * The clock requests surrounding the majority of this function serve to
  3287. * enable the register writes to change the interrupt register, as well as to
  3288. * prevent a race condition that could keep the clocks on (due to mdp_clk_cnt
  3289. * never being decremented below 0) when a enable/disable occurs but the
  3290. * disable requests the clocks disabled before the enable is able to request
  3291. * the clocks enabled.
  3292. *
  3293. * Return: 0 if uneventful, errno on repeated action or invalid input
  3294. */
  3295. int mdss_mdp_hist_intr_setup(struct mdss_intr *intr, int type)
  3296. {
  3297. unsigned long flag;
  3298. int ret = 0, req_clk = 0;
  3299. u32 en = 0, dis = 0;
  3300. u32 diff, init_curr;
  3301. int init_state;
  3302. if (!intr) {
  3303. WARN(1, "NULL intr pointer");
  3304. return -EINVAL;
  3305. }
  3306. return ret; // not used.
  3307. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  3308. spin_lock_irqsave(&intr->lock, flag);
  3309. init_state = intr->state;
  3310. init_curr = intr->curr;
  3311. if (type == MDSS_IRQ_RESUME) {
  3312. /* resume intrs */
  3313. if (intr->state == MDSS_INTR_STATE_ACTIVE) {
  3314. ret = -EPERM;
  3315. goto exit;
  3316. }
  3317. en = intr->req;
  3318. dis = 0;
  3319. intr->curr = intr->req;
  3320. intr->state = intr->curr ?
  3321. MDSS_INTR_STATE_ACTIVE : MDSS_INTR_STATE_NULL;
  3322. } else if (type == MDSS_IRQ_SUSPEND) {
  3323. /* suspend intrs */
  3324. if (intr->state == MDSS_INTR_STATE_SUSPEND) {
  3325. ret = -EPERM;
  3326. goto exit;
  3327. }
  3328. en = 0;
  3329. dis = intr->curr;
  3330. intr->curr = 0;
  3331. intr->state = MDSS_INTR_STATE_SUSPEND;
  3332. } else if (intr->state != MDSS_IRQ_SUSPEND) {
  3333. /* Not resuming/suspending or in suspend state */
  3334. diff = intr->req ^ intr->curr;
  3335. en = diff & ~intr->curr;
  3336. dis = diff & ~intr->req;
  3337. intr->curr = intr->req;
  3338. intr->state = intr->curr ?
  3339. MDSS_INTR_STATE_ACTIVE : MDSS_INTR_STATE_NULL;
  3340. }
  3341. if (en)
  3342. mdss_mdp_hist_irq_enable(en);
  3343. if (dis)
  3344. mdss_mdp_hist_irq_disable(dis);
  3345. if ((init_state != MDSS_INTR_STATE_ACTIVE) &&
  3346. (intr->state == MDSS_INTR_STATE_ACTIVE))
  3347. req_clk = 1;
  3348. else if ((init_state == MDSS_INTR_STATE_ACTIVE) &&
  3349. (intr->state != MDSS_INTR_STATE_ACTIVE))
  3350. req_clk = -1;
  3351. exit:
  3352. spin_unlock_irqrestore(&intr->lock, flag);
  3353. if (req_clk < 0)
  3354. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  3355. else if (req_clk > 0)
  3356. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  3357. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  3358. return ret;
  3359. }
  3360. static int pp_hist_collect(struct mdp_histogram_data *hist,
  3361. struct pp_hist_col_info *hist_info,
  3362. char __iomem *ctl_base, u32 expect_sum)
  3363. {
  3364. int kick_ret, wait_ret, ret = 0;
  3365. u32 timeout, sum;
  3366. char __iomem *v_base;
  3367. unsigned long flag;
  3368. struct mdss_pipe_pp_res *res;
  3369. struct mdss_mdp_pipe *pipe;
  3370. mutex_lock(&hist_info->hist_mutex);
  3371. spin_lock_irqsave(&hist_info->hist_lock, flag);
  3372. if ((hist_info->col_en == 0) ||
  3373. (hist_info->col_state == HIST_UNKNOWN)) {
  3374. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3375. ret = -EINVAL;
  3376. goto hist_collect_exit;
  3377. }
  3378. /* wait for hist done if cache has no data */
  3379. if (hist_info->col_state != HIST_READY) {
  3380. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3381. timeout = HIST_WAIT_TIMEOUT(hist_info->frame_cnt);
  3382. mutex_unlock(&hist_info->hist_mutex);
  3383. if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
  3384. res = container_of(hist_info, struct mdss_pipe_pp_res,
  3385. hist);
  3386. pipe = container_of(res, struct mdss_mdp_pipe, pp_res);
  3387. pipe->params_changed++;
  3388. }
  3389. kick_ret = wait_for_completion_killable_timeout(
  3390. &(hist_info->first_kick), timeout /
  3391. HIST_KICKOFF_WAIT_FRACTION);
  3392. if (kick_ret != 0)
  3393. wait_ret = wait_for_completion_killable_timeout(
  3394. &(hist_info->comp), timeout);
  3395. mutex_lock(&hist_info->hist_mutex);
  3396. spin_lock_irqsave(&hist_info->hist_lock, flag);
  3397. if (kick_ret == 0) {
  3398. ret = -ENODATA;
  3399. pr_debug("histogram kickoff not done yet");
  3400. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3401. goto hist_collect_exit;
  3402. } else if (kick_ret < 0) {
  3403. ret = -EINTR;
  3404. pr_debug("histogram first kickoff interrupted");
  3405. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3406. goto hist_collect_exit;
  3407. } else if (wait_ret == 0) {
  3408. ret = -ETIMEDOUT;
  3409. pr_debug("bin collection timedout, state %d",
  3410. hist_info->col_state);
  3411. /*
  3412. * When the histogram has timed out (usually
  3413. * underrun) change the SW state back to idle
  3414. * since histogram hardware will have done the
  3415. * same. Histogram data also needs to be
  3416. * cleared in this case, which is done by the
  3417. * histogram being read (triggered by READY
  3418. * state, which also moves the histogram SW back
  3419. * to IDLE).
  3420. */
  3421. hist_info->hist_cnt_time++;
  3422. hist_info->col_state = HIST_READY;
  3423. } else if (wait_ret < 0) {
  3424. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3425. ret = -EINTR;
  3426. pr_debug("%s: bin collection interrupted",
  3427. __func__);
  3428. goto hist_collect_exit;
  3429. }
  3430. if (hist_info->col_state != HIST_READY &&
  3431. hist_info->col_state != HIST_UNKNOWN) {
  3432. ret = -ENODATA;
  3433. hist_info->col_state = HIST_READY;
  3434. pr_debug("%s: state is not ready: %d",
  3435. __func__, hist_info->col_state);
  3436. }
  3437. }
  3438. if (hist_info->col_state == HIST_READY) {
  3439. hist_info->col_state = HIST_IDLE;
  3440. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3441. v_base = ctl_base + 0x1C;
  3442. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  3443. sum = pp_hist_read(v_base, hist_info);
  3444. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  3445. if (expect_sum && sum != expect_sum) {
  3446. pr_debug("hist error: bin sum incorrect! (%d/%d)\n",
  3447. sum, expect_sum);
  3448. ret = -ENODATA;
  3449. }
  3450. } else {
  3451. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3452. }
  3453. hist_collect_exit:
  3454. mutex_unlock(&hist_info->hist_mutex);
  3455. return ret;
  3456. }
  3457. int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
  3458. {
  3459. int i, j, off, ret = 0, temp_ret = 0;
  3460. struct pp_hist_col_info *hist_info;
  3461. struct pp_hist_col_info *hists[MDSS_MDP_INTF_MAX_LAYERMIXER];
  3462. u32 dspp_num, disp_num;
  3463. char __iomem *ctl_base;
  3464. u32 hist_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  3465. u32 *hist_concat = NULL;
  3466. u32 *hist_data_addr;
  3467. u32 pipe_cnt = 0;
  3468. u32 pipe_num = MDSS_MDP_SSPP_VIG0;
  3469. u32 exp_sum = 0;
  3470. struct mdss_mdp_pipe *pipe;
  3471. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  3472. unsigned long flag;
  3473. if ((PP_BLOCK(hist->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
  3474. (PP_BLOCK(hist->block) >= MDP_BLOCK_MAX))
  3475. return -EINVAL;
  3476. disp_num = PP_BLOCK(hist->block) - MDP_LOGICAL_BLOCK_DISP_0;
  3477. hist_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
  3478. if (!hist_cnt) {
  3479. pr_err("%s, no dspp connects to disp %d",
  3480. __func__, disp_num);
  3481. ret = -EPERM;
  3482. goto hist_collect_exit;
  3483. }
  3484. if (hist_cnt >= MDSS_MDP_MAX_DSPP) {
  3485. pr_err("%s, Too many dspp connects to disp %d",
  3486. __func__, hist_cnt);
  3487. ret = -EPERM;
  3488. goto hist_collect_exit;
  3489. }
  3490. if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG) {
  3491. for (i = 0; i < hist_cnt; i++) {
  3492. dspp_num = mixer_id[i];
  3493. hists[i] = &mdss_pp_res->dspp_hist[dspp_num];
  3494. }
  3495. for (i = 0; i < hist_cnt; i++) {
  3496. spin_lock_irqsave(&hists[i]->hist_lock, flag);
  3497. /* mark that collect is ready to handle completions */
  3498. hists[i]->read_request = 1;
  3499. spin_unlock_irqrestore(&hists[i]->hist_lock, flag);
  3500. }
  3501. for (i = 0; i < hist_cnt; i++) {
  3502. dspp_num = mixer_id[i];
  3503. ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num) +
  3504. MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
  3505. exp_sum = (mdata->mixer_intf[dspp_num].width *
  3506. mdata->mixer_intf[dspp_num].height);
  3507. if (ret)
  3508. temp_ret = ret;
  3509. ret = pp_hist_collect(hist, hists[i], ctl_base,
  3510. exp_sum);
  3511. if (ret)
  3512. pr_debug("hist error: dspp[%d] collect %d\n",
  3513. dspp_num, ret);
  3514. }
  3515. for (i = 0; i < hist_cnt; i++) {
  3516. /* reset read requests and re-intialize completions */
  3517. spin_lock_irqsave(&hists[i]->hist_lock, flag);
  3518. hists[i]->read_request = 0;
  3519. INIT_COMPLETION(hists[i]->comp);
  3520. spin_unlock_irqrestore(&hists[i]->hist_lock, flag);
  3521. }
  3522. if (ret || temp_ret) {
  3523. ret = ret ? ret : temp_ret;
  3524. goto hist_collect_exit;
  3525. }
  3526. if (hist->bin_cnt != HIST_V_SIZE) {
  3527. pr_err("User not expecting size %d output",
  3528. HIST_V_SIZE);
  3529. ret = -EINVAL;
  3530. goto hist_collect_exit;
  3531. }
  3532. if (hist_cnt > 1) {
  3533. hist_concat = kmalloc(HIST_V_SIZE * sizeof(u32),
  3534. GFP_KERNEL);
  3535. if (!hist_concat) {
  3536. ret = -ENOMEM;
  3537. goto hist_collect_exit;
  3538. }
  3539. memset(hist_concat, 0, HIST_V_SIZE * sizeof(u32));
  3540. for (i = 0; i < hist_cnt; i++) {
  3541. mutex_lock(&hists[i]->hist_mutex);
  3542. for (j = 0; j < HIST_V_SIZE; j++)
  3543. hist_concat[j] += hists[i]->data[j];
  3544. mutex_unlock(&hists[i]->hist_mutex);
  3545. }
  3546. hist_data_addr = hist_concat;
  3547. } else {
  3548. hist_data_addr = hists[0]->data;
  3549. }
  3550. for (i = 0; i < hist_cnt; i++)
  3551. hists[i]->hist_cnt_sent++;
  3552. } else if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
  3553. hist_cnt = MDSS_PP_ARG_MASK & hist->block;
  3554. if (!hist_cnt) {
  3555. pr_warn("Must pass pipe arguments, %d", hist_cnt);
  3556. goto hist_collect_exit;
  3557. }
  3558. /* Find the first pipe requested */
  3559. for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
  3560. if (PP_ARG(i, hist_cnt)) {
  3561. pipe_num = i;
  3562. break;
  3563. }
  3564. }
  3565. pipe = mdss_mdp_pipe_get(mdata, BIT(pipe_num));
  3566. if (IS_ERR_OR_NULL(pipe)) {
  3567. pr_warn("Invalid starting hist pipe, %d", pipe_num);
  3568. ret = -ENODEV;
  3569. goto hist_collect_exit;
  3570. }
  3571. hist_info = &pipe->pp_res.hist;
  3572. mdss_mdp_pipe_unmap(pipe);
  3573. for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
  3574. if (!PP_ARG(i, hist->block))
  3575. continue;
  3576. pipe_cnt++;
  3577. pipe = mdss_mdp_pipe_get(mdata, BIT(i));
  3578. if (IS_ERR_OR_NULL(pipe) ||
  3579. pipe->num > MDSS_MDP_SSPP_VIG2) {
  3580. pr_warn("Invalid Hist pipe (%d)", i);
  3581. continue;
  3582. }
  3583. hist_info = &pipe->pp_res.hist;
  3584. spin_lock_irqsave(&hist_info->hist_lock, flag);
  3585. hist_info->read_request = 1;
  3586. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3587. }
  3588. for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
  3589. if (!PP_ARG(i, hist->block))
  3590. continue;
  3591. pipe_cnt++;
  3592. pipe = mdss_mdp_pipe_get(mdata, BIT(i));
  3593. if (IS_ERR_OR_NULL(pipe) ||
  3594. pipe->num > MDSS_MDP_SSPP_VIG2) {
  3595. pr_warn("Invalid Hist pipe (%d)", i);
  3596. continue;
  3597. }
  3598. hist_info = &pipe->pp_res.hist;
  3599. ctl_base = pipe->base +
  3600. MDSS_MDP_REG_VIG_HIST_CTL_BASE;
  3601. if (ret)
  3602. temp_ret = ret;
  3603. ret = pp_hist_collect(hist, hist_info, ctl_base,
  3604. exp_sum);
  3605. if (ret)
  3606. pr_debug("hist error: pipe[%d] collect: %d\n",
  3607. pipe->num, ret);
  3608. mdss_mdp_pipe_unmap(pipe);
  3609. }
  3610. for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
  3611. if (!PP_ARG(i, hist->block))
  3612. continue;
  3613. pipe_cnt++;
  3614. pipe = mdss_mdp_pipe_get(mdata, BIT(i));
  3615. if (IS_ERR_OR_NULL(pipe) ||
  3616. pipe->num > MDSS_MDP_SSPP_VIG2) {
  3617. pr_warn("Invalid Hist pipe (%d)", i);
  3618. continue;
  3619. }
  3620. hist_info = &pipe->pp_res.hist;
  3621. spin_lock_irqsave(&hist_info->hist_lock, flag);
  3622. hist_info->read_request = 0;
  3623. INIT_COMPLETION(hist_info->comp);
  3624. spin_unlock_irqrestore(&hist_info->hist_lock, flag);
  3625. }
  3626. if (ret || temp_ret) {
  3627. ret = ret ? ret : temp_ret;
  3628. goto hist_collect_exit;
  3629. }
  3630. if (pipe_cnt != 0 &&
  3631. (hist->bin_cnt != (HIST_V_SIZE * pipe_cnt))) {
  3632. pr_err("User not expecting size %d output",
  3633. pipe_cnt * HIST_V_SIZE);
  3634. ret = -EINVAL;
  3635. goto hist_collect_exit;
  3636. }
  3637. if (pipe_cnt > 1) {
  3638. hist_concat = kmalloc(HIST_V_SIZE * pipe_cnt *
  3639. sizeof(u32), GFP_KERNEL);
  3640. if (!hist_concat) {
  3641. ret = -ENOMEM;
  3642. goto hist_collect_exit;
  3643. }
  3644. memset(hist_concat, 0, pipe_cnt * HIST_V_SIZE *
  3645. sizeof(u32));
  3646. for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
  3647. if (!PP_ARG(i, hist->block))
  3648. continue;
  3649. pipe = mdss_mdp_pipe_get(mdata, BIT(i));
  3650. hist_info = &pipe->pp_res.hist;
  3651. off = HIST_V_SIZE * i;
  3652. mutex_lock(&hist_info->hist_mutex);
  3653. for (j = off; j < off + HIST_V_SIZE; j++)
  3654. hist_concat[j] =
  3655. hist_info->data[j - off];
  3656. hist_info->hist_cnt_sent++;
  3657. mutex_unlock(&hist_info->hist_mutex);
  3658. mdss_mdp_pipe_unmap(pipe);
  3659. }
  3660. hist_data_addr = hist_concat;
  3661. } else {
  3662. hist_data_addr = hist_info->data;
  3663. }
  3664. } else {
  3665. pr_info("No Histogram at location %d", PP_LOCAT(hist->block));
  3666. goto hist_collect_exit;
  3667. }
  3668. ret = copy_to_user(hist->c0, hist_data_addr, sizeof(u32) *
  3669. hist->bin_cnt);
  3670. hist_collect_exit:
  3671. kfree(hist_concat);
  3672. return ret;
  3673. }
  3674. void mdss_mdp_hist_intr_done(u32 isr)
  3675. {
  3676. u32 isr_blk, blk_idx;
  3677. struct pp_hist_col_info *hist_info = NULL;
  3678. struct mdss_mdp_pipe *pipe;
  3679. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  3680. isr &= 0x333333;
  3681. while (isr != 0) {
  3682. if (isr & 0xFFF000) {
  3683. if (isr & 0x3000) {
  3684. blk_idx = 0;
  3685. isr_blk = (isr >> 12) & 0x3;
  3686. isr &= ~0x3000;
  3687. } else if (isr & 0x30000) {
  3688. blk_idx = 1;
  3689. isr_blk = (isr >> 16) & 0x3;
  3690. isr &= ~0x30000;
  3691. } else {
  3692. blk_idx = 2;
  3693. isr_blk = (isr >> 20) & 0x3;
  3694. isr &= ~0x300000;
  3695. }
  3696. hist_info = &mdss_pp_res->dspp_hist[blk_idx];
  3697. } else {
  3698. if (isr & 0x3) {
  3699. blk_idx = MDSS_MDP_SSPP_VIG0;
  3700. isr_blk = isr & 0x3;
  3701. isr &= ~0x3;
  3702. } else if (isr & 0x30) {
  3703. blk_idx = MDSS_MDP_SSPP_VIG1;
  3704. isr_blk = (isr >> 4) & 0x3;
  3705. isr &= ~0x30;
  3706. } else {
  3707. blk_idx = MDSS_MDP_SSPP_VIG2;
  3708. isr_blk = (isr >> 8) & 0x3;
  3709. isr &= ~0x300;
  3710. }
  3711. pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx));
  3712. if (IS_ERR_OR_NULL(pipe)) {
  3713. pr_debug("pipe DNE, %d", blk_idx);
  3714. continue;
  3715. }
  3716. hist_info = &pipe->pp_res.hist;
  3717. }
  3718. /* Histogram Done Interrupt */
  3719. if (hist_info && (isr_blk & 0x1) && (hist_info->col_en)) {
  3720. spin_lock(&hist_info->hist_lock);
  3721. hist_info->col_state = HIST_READY;
  3722. spin_unlock(&hist_info->hist_lock);
  3723. if (hist_info->read_request == 1) {
  3724. complete(&hist_info->comp);
  3725. hist_info->read_request++;
  3726. }
  3727. }
  3728. /* Histogram Reset Done Interrupt */
  3729. if (hist_info && (isr_blk & 0x2) && (hist_info->col_en)) {
  3730. spin_lock(&hist_info->hist_lock);
  3731. hist_info->col_state = HIST_IDLE;
  3732. spin_unlock(&hist_info->hist_lock);
  3733. }
  3734. };
  3735. }
  3736. static struct msm_fb_data_type *mdss_get_mfd_from_index(int index)
  3737. {
  3738. struct msm_fb_data_type *out = NULL;
  3739. struct mdss_mdp_ctl *ctl;
  3740. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  3741. int i;
  3742. for (i = 0; i < mdata->nctl; i++) {
  3743. ctl = mdata->ctl_off + i;
  3744. if ((ctl->power_on) && (ctl->mfd)
  3745. && (ctl->mfd->index == index))
  3746. out = ctl->mfd;
  3747. }
  3748. return out;
  3749. }
  3750. static int pp_num_to_side(struct mdss_mdp_ctl *ctl, u32 num)
  3751. {
  3752. u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  3753. u32 mixer_num;
  3754. if (!ctl || !ctl->mfd)
  3755. return -EINVAL;
  3756. mixer_num = mdss_mdp_get_ctl_mixers(ctl->mfd->index, mixer_id);
  3757. if (mixer_num < 2)
  3758. return MDSS_SIDE_NONE;
  3759. else if (mixer_id[1] == num)
  3760. return MDSS_SIDE_RIGHT;
  3761. else if (mixer_id[0] == num)
  3762. return MDSS_SIDE_LEFT;
  3763. else
  3764. pr_err("invalid, not on any side");
  3765. return -EINVAL;
  3766. }
  3767. static inline void pp_sts_set_split_bits(u32 *sts, u32 bits)
  3768. {
  3769. u32 tmp = *sts;
  3770. tmp &= ~MDSS_PP_SPLIT_MASK;
  3771. tmp |= bits & MDSS_PP_SPLIT_MASK;
  3772. *sts = tmp;
  3773. }
  3774. static inline bool pp_sts_is_enabled(u32 sts, int side)
  3775. {
  3776. bool ret = false;
  3777. /*
  3778. * If there are no sides, or if there are no split mode bits set, the
  3779. * side can't be disabled via split mode.
  3780. *
  3781. * Otherwise, if the side being checked opposes the split mode
  3782. * configuration, the side is disabled.
  3783. */
  3784. if ((side == MDSS_SIDE_NONE) || !(sts & MDSS_PP_SPLIT_MASK))
  3785. ret = true;
  3786. else if ((sts & MDSS_PP_SPLIT_RIGHT_ONLY) && (side == MDSS_SIDE_RIGHT))
  3787. ret = true;
  3788. else if ((sts & MDSS_PP_SPLIT_LEFT_ONLY) && (side == MDSS_SIDE_LEFT))
  3789. ret = true;
  3790. return ret && (sts & PP_STS_ENABLE);
  3791. }
  3792. static int mdss_ad_init_checks(struct msm_fb_data_type *mfd)
  3793. {
  3794. u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  3795. u32 mixer_num;
  3796. u32 ret = -EINVAL;
  3797. int i = 0;
  3798. struct mdss_data_type *mdata = mfd_to_mdata(mfd);
  3799. struct msm_fb_data_type *ad_mfd = mfd;
  3800. if (ad_mfd->ext_ad_ctrl >= 0)
  3801. ad_mfd = mdss_get_mfd_from_index(ad_mfd->ext_ad_ctrl);
  3802. if (!ad_mfd || !mdata)
  3803. return ret;
  3804. if (mdata->nad_cfgs == 0) {
  3805. pr_debug("Assertive Display not supported by device");
  3806. return -ENODEV;
  3807. }
  3808. if (ad_mfd->panel_info->type == DTV_PANEL) {
  3809. pr_debug("AD not supported on external display\n");
  3810. return ret;
  3811. }
  3812. mixer_num = mdss_mdp_get_ctl_mixers(ad_mfd->index, mixer_id);
  3813. if (!mixer_num) {
  3814. pr_debug("no mixers connected, %d", mixer_num);
  3815. return -EHOSTDOWN;
  3816. }
  3817. if (mixer_num > mdata->nmax_concurrent_ad_hw) {
  3818. pr_debug("too many mixers, not supported, %d > %d", mixer_num,
  3819. mdata->nmax_concurrent_ad_hw);
  3820. return ret;
  3821. }
  3822. do {
  3823. if (mixer_id[i] >= mdata->nad_cfgs) {
  3824. pr_err("invalid mixer input, %d", mixer_id[i]);
  3825. return ret;
  3826. }
  3827. i++;
  3828. } while (i < mixer_num);
  3829. return mixer_id[0];
  3830. }
  3831. static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
  3832. struct mdss_ad_info **ret_ad)
  3833. {
  3834. int ad_num, ret = 0;
  3835. struct mdss_data_type *mdata;
  3836. struct mdss_ad_info *ad = NULL;
  3837. mdata = mfd_to_mdata(mfd);
  3838. ad_num = mdss_ad_init_checks(mfd);
  3839. if (ad_num >= 0)
  3840. ad = &mdata->ad_cfgs[ad_num];
  3841. else
  3842. ret = ad_num;
  3843. *ret_ad = ad;
  3844. return ret;
  3845. }
  3846. /* must call this function from within ad->lock */
  3847. static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd)
  3848. {
  3849. int ret;
  3850. struct mdss_ad_info *ad;
  3851. struct mdss_mdp_ctl *ctl;
  3852. if (!mfd) {
  3853. pr_err("Invalid mfd\n");
  3854. return -EINVAL;
  3855. }
  3856. ctl = mfd_to_ctl(mfd);
  3857. if (!ctl) {
  3858. pr_err("Invalid ctl\n");
  3859. return -EINVAL;
  3860. }
  3861. ret = mdss_mdp_get_ad(mfd, &ad);
  3862. if (ret || !ad) {
  3863. pr_err("Fail to get ad: ret = %d, ad = 0x%pK\n", ret, ad);
  3864. return -EINVAL;
  3865. }
  3866. pr_debug("AD backlight level changed (%d), trigger update to AD\n",
  3867. mfd->ad_bl_level);
  3868. if (ad->cfg.mode == MDSS_AD_MODE_AUTO_BL) {
  3869. pr_err("AD auto backlight no longer supported.\n");
  3870. return -EINVAL;
  3871. }
  3872. if (ad->state & PP_AD_STATE_RUN) {
  3873. ad->calc_itr = ad->cfg.stab_itr;
  3874. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  3875. ad->sts |= PP_AD_STS_DIRTY_DATA;
  3876. }
  3877. return 0;
  3878. }
  3879. int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
  3880. struct mdss_ad_init_cfg *init_cfg)
  3881. {
  3882. struct mdss_ad_info *ad;
  3883. struct msm_fb_data_type *bl_mfd;
  3884. int lin_ret = -1, inv_ret = -1, att_ret = -1, ret = 0;
  3885. u32 last_ops;
  3886. ret = mdss_mdp_get_ad(mfd, &ad);
  3887. if (ret)
  3888. return ret;
  3889. if (mfd->panel_info->type == WRITEBACK_PANEL) {
  3890. bl_mfd = mdss_get_mfd_from_index(0);
  3891. if (!bl_mfd)
  3892. return ret;
  3893. } else {
  3894. bl_mfd = mfd;
  3895. }
  3896. if ((init_cfg->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
  3897. pr_warn("Can't set both split bits\n");
  3898. return -EINVAL;
  3899. }
  3900. mutex_lock(&ad->lock);
  3901. if (init_cfg->ops & MDP_PP_AD_INIT) {
  3902. memcpy(&ad->init, &init_cfg->params.init,
  3903. sizeof(struct mdss_ad_init));
  3904. if (init_cfg->params.init.bl_lin_len == AD_BL_LIN_LEN) {
  3905. lin_ret = copy_from_user(&ad->bl_lin,
  3906. init_cfg->params.init.bl_lin,
  3907. init_cfg->params.init.bl_lin_len *
  3908. sizeof(uint32_t));
  3909. inv_ret = copy_from_user(&ad->bl_lin_inv,
  3910. init_cfg->params.init.bl_lin_inv,
  3911. init_cfg->params.init.bl_lin_len *
  3912. sizeof(uint32_t));
  3913. if (lin_ret || inv_ret)
  3914. ret = -ENOMEM;
  3915. } else {
  3916. ret = -EINVAL;
  3917. }
  3918. if (ret) {
  3919. ad->state &= ~PP_AD_STATE_BL_LIN;
  3920. goto ad_config_exit;
  3921. } else
  3922. ad->state |= PP_AD_STATE_BL_LIN;
  3923. if ((init_cfg->params.init.bl_att_len == AD_BL_ATT_LUT_LEN) &&
  3924. (init_cfg->params.init.bl_att_lut)) {
  3925. att_ret = copy_from_user(&ad->bl_att_lut,
  3926. init_cfg->params.init.bl_att_lut,
  3927. init_cfg->params.init.bl_att_len *
  3928. sizeof(uint32_t));
  3929. if (att_ret)
  3930. ret = -ENOMEM;
  3931. } else {
  3932. ret = -EINVAL;
  3933. }
  3934. if (ret) {
  3935. ad->state &= ~PP_AD_STATE_BL_LIN;
  3936. goto ad_config_exit;
  3937. } else
  3938. ad->state |= PP_AD_STATE_BL_LIN;
  3939. ad->sts |= PP_AD_STS_DIRTY_INIT;
  3940. } else if (init_cfg->ops & MDP_PP_AD_CFG) {
  3941. memcpy(&ad->cfg, &init_cfg->params.cfg,
  3942. sizeof(struct mdss_ad_cfg));
  3943. ad->cfg.backlight_scale = MDSS_MDP_AD_BL_SCALE;
  3944. ad->sts |= PP_AD_STS_DIRTY_CFG;
  3945. }
  3946. last_ops = ad->ops & MDSS_PP_SPLIT_MASK;
  3947. ad->ops = init_cfg->ops & MDSS_PP_SPLIT_MASK;
  3948. /*
  3949. * if there is a change in the split mode config, the init values
  3950. * need to be re-written to hardware (if they have already been
  3951. * written or if there is data pending to be written). Check for
  3952. * pending data (DIRTY_INIT) is not checked here since it will not
  3953. * affect the outcome of this conditional (i.e. if init hasn't
  3954. * already been written (*_STATE_INIT is set), this conditional will
  3955. * only evaluate to true (and set the DIRTY bit) if the DIRTY bit has
  3956. * already been set).
  3957. */
  3958. if ((last_ops ^ ad->ops) && (ad->state & PP_AD_STATE_INIT))
  3959. ad->sts |= PP_AD_STS_DIRTY_INIT;
  3960. if (!ret && (init_cfg->ops & MDP_PP_OPS_DISABLE)) {
  3961. ad->sts &= ~PP_STS_ENABLE;
  3962. mutex_unlock(&ad->lock);
  3963. cancel_work_sync(&ad->calc_work);
  3964. mutex_lock(&ad->lock);
  3965. ad->mfd = NULL;
  3966. ad->bl_mfd = NULL;
  3967. } else if (!ret && (init_cfg->ops & MDP_PP_OPS_ENABLE)) {
  3968. ad->sts |= PP_STS_ENABLE;
  3969. ad->mfd = mfd;
  3970. ad->bl_mfd = bl_mfd;
  3971. }
  3972. ad_config_exit:
  3973. mutex_unlock(&ad->lock);
  3974. return ret;
  3975. }
  3976. int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
  3977. struct mdss_ad_input *input, int wait) {
  3978. int ret = 0;
  3979. struct mdss_ad_info *ad;
  3980. u32 bl;
  3981. ret = mdss_mdp_get_ad(mfd, &ad);
  3982. if (ret)
  3983. return ret;
  3984. mutex_lock(&ad->lock);
  3985. if ((!PP_AD_STATE_IS_INITCFG(ad->state) &&
  3986. !PP_AD_STS_IS_DIRTY(ad->sts)) &&
  3987. !input->mode == MDSS_AD_MODE_CALIB) {
  3988. pr_warn("AD not initialized or configured.");
  3989. ret = -EPERM;
  3990. goto error;
  3991. }
  3992. switch (input->mode) {
  3993. case MDSS_AD_MODE_AUTO_BL:
  3994. case MDSS_AD_MODE_AUTO_STR:
  3995. if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
  3996. MDSS_AD_INPUT_AMBIENT)) {
  3997. ret = -EINVAL;
  3998. goto error;
  3999. }
  4000. if (input->in.amb_light > MDSS_MDP_MAX_AD_AL) {
  4001. pr_warn("invalid input ambient light");
  4002. ret = -EINVAL;
  4003. goto error;
  4004. }
  4005. ad->ad_data_mode = MDSS_AD_INPUT_AMBIENT;
  4006. pr_debug("ambient = %d\n", input->in.amb_light);
  4007. ad->ad_data = input->in.amb_light;
  4008. ad->calc_itr = ad->cfg.stab_itr;
  4009. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  4010. ad->sts |= PP_AD_STS_DIRTY_DATA;
  4011. break;
  4012. case MDSS_AD_MODE_TARG_STR:
  4013. case MDSS_AD_MODE_MAN_STR:
  4014. if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
  4015. MDSS_AD_INPUT_STRENGTH)) {
  4016. ret = -EINVAL;
  4017. goto error;
  4018. }
  4019. if (input->in.strength > MDSS_MDP_MAX_AD_STR) {
  4020. pr_warn("invalid input strength");
  4021. ret = -EINVAL;
  4022. goto error;
  4023. }
  4024. ad->ad_data_mode = MDSS_AD_INPUT_STRENGTH;
  4025. pr_debug("strength = %d\n", input->in.strength);
  4026. ad->ad_data = input->in.strength;
  4027. ad->calc_itr = ad->cfg.stab_itr;
  4028. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  4029. ad->sts |= PP_AD_STS_DIRTY_DATA;
  4030. break;
  4031. case MDSS_AD_MODE_CALIB:
  4032. wait = 0;
  4033. if (mfd->calib_mode) {
  4034. bl = input->in.calib_bl;
  4035. if (bl >= AD_BL_LIN_LEN) {
  4036. pr_warn("calib_bl 255 max!");
  4037. break;
  4038. }
  4039. mutex_unlock(&ad->lock);
  4040. mutex_lock(&mfd->bl_lock);
  4041. MDSS_BRIGHT_TO_BL(bl, bl, mfd->panel_info->bl_max,
  4042. mfd->panel_info->brightness_max);
  4043. mdss_fb_set_backlight(mfd, bl);
  4044. mutex_unlock(&mfd->bl_lock);
  4045. mutex_lock(&ad->lock);
  4046. } else {
  4047. pr_warn("should be in calib mode");
  4048. }
  4049. break;
  4050. default:
  4051. pr_warn("invalid default %d", input->mode);
  4052. ret = -EINVAL;
  4053. goto error;
  4054. }
  4055. error:
  4056. mutex_unlock(&ad->lock);
  4057. if (!ret) {
  4058. if (wait) {
  4059. mutex_lock(&ad->lock);
  4060. INIT_COMPLETION(ad->comp);
  4061. mutex_unlock(&ad->lock);
  4062. }
  4063. if (wait) {
  4064. ret = wait_for_completion_timeout(
  4065. &ad->comp, HIST_WAIT_TIMEOUT(1));
  4066. if (ret == 0)
  4067. ret = -ETIMEDOUT;
  4068. else if (ret > 0)
  4069. input->output = ad->last_str;
  4070. }
  4071. }
  4072. return ret;
  4073. }
  4074. static void pp_ad_input_write(struct mdss_mdp_ad *ad_hw,
  4075. struct mdss_ad_info *ad)
  4076. {
  4077. char __iomem *base;
  4078. base = ad_hw->base;
  4079. switch (ad->cfg.mode) {
  4080. case MDSS_AD_MODE_AUTO_BL:
  4081. writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
  4082. break;
  4083. case MDSS_AD_MODE_AUTO_STR:
  4084. writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
  4085. writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
  4086. break;
  4087. case MDSS_AD_MODE_TARG_STR:
  4088. writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
  4089. writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_TARG_STR);
  4090. break;
  4091. case MDSS_AD_MODE_MAN_STR:
  4092. writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
  4093. writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_STR_MAN);
  4094. break;
  4095. default:
  4096. pr_warn("Invalid mode! %d", ad->cfg.mode);
  4097. break;
  4098. }
  4099. }
  4100. #define MDSS_AD_MERGED_WIDTH 4
  4101. static void pp_ad_init_write(struct mdss_mdp_ad *ad_hw, struct mdss_ad_info *ad,
  4102. struct mdss_mdp_ctl *ctl)
  4103. {
  4104. struct mdss_data_type *mdata = ctl->mdata;
  4105. u32 temp;
  4106. u32 frame_start, frame_end, procs_start, procs_end, tile_ctrl;
  4107. u32 num;
  4108. int side;
  4109. char __iomem *base;
  4110. bool is_calc, is_dual_pipe, split_mode;
  4111. u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  4112. u32 mixer_num;
  4113. mixer_num = mdss_mdp_get_ctl_mixers(ctl->mfd->index, mixer_id);
  4114. if (mixer_num > 1)
  4115. is_dual_pipe = true;
  4116. else
  4117. is_dual_pipe = false;
  4118. base = ad_hw->base;
  4119. is_calc = ad->calc_hw_num == ad_hw->num;
  4120. split_mode = !!(ad->ops & MDSS_PP_SPLIT_MASK);
  4121. writel_relaxed(ad->init.i_control[0] & 0x1F,
  4122. base + MDSS_MDP_REG_AD_CON_CTRL_0);
  4123. writel_relaxed(ad->init.i_control[1] << 8,
  4124. base + MDSS_MDP_REG_AD_CON_CTRL_1);
  4125. temp = ad->init.white_lvl << 16;
  4126. temp |= ad->init.black_lvl & 0xFFFF;
  4127. writel_relaxed(temp, base + MDSS_MDP_REG_AD_BW_LVL);
  4128. writel_relaxed(ad->init.var, base + MDSS_MDP_REG_AD_VAR);
  4129. writel_relaxed(ad->init.limit_ampl, base + MDSS_MDP_REG_AD_AMP_LIM);
  4130. writel_relaxed(ad->init.i_dither, base + MDSS_MDP_REG_AD_DITH);
  4131. temp = ad->init.slope_max << 8;
  4132. temp |= ad->init.slope_min & 0xFF;
  4133. writel_relaxed(temp, base + MDSS_MDP_REG_AD_SLOPE);
  4134. writel_relaxed(ad->init.dither_ctl, base + MDSS_MDP_REG_AD_DITH_CTRL);
  4135. writel_relaxed(ad->init.format, base + MDSS_MDP_REG_AD_CTRL_0);
  4136. writel_relaxed(ad->init.auto_size, base + MDSS_MDP_REG_AD_CTRL_1);
  4137. if (split_mode)
  4138. temp = mdata->mixer_intf[ad_hw->num].width << 16;
  4139. else
  4140. temp = ad->init.frame_w << 16;
  4141. temp |= ad->init.frame_h & 0xFFFF;
  4142. writel_relaxed(temp, base + MDSS_MDP_REG_AD_FRAME_SIZE);
  4143. temp = ad->init.logo_v << 8;
  4144. temp |= ad->init.logo_h & 0xFF;
  4145. writel_relaxed(temp, base + MDSS_MDP_REG_AD_LOGO_POS);
  4146. pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_FI, ad->init.asym_lut);
  4147. pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_CC, ad->init.color_corr_lut);
  4148. if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
  4149. if (is_dual_pipe && !split_mode) {
  4150. num = ad_hw->num;
  4151. side = pp_num_to_side(ctl, num);
  4152. tile_ctrl = 0x5;
  4153. if ((ad->calc_hw_num + 1) == num)
  4154. tile_ctrl |= 0x10;
  4155. if (side <= MDSS_SIDE_NONE) {
  4156. WARN(1, "error finding sides, %d", side);
  4157. frame_start = 0;
  4158. procs_start = frame_start;
  4159. frame_end = 0;
  4160. procs_end = frame_end;
  4161. } else if (side == MDSS_SIDE_LEFT) {
  4162. frame_start = 0;
  4163. procs_start = 0;
  4164. frame_end = mdata->mixer_intf[num].width +
  4165. MDSS_AD_MERGED_WIDTH;
  4166. procs_end = mdata->mixer_intf[num].width;
  4167. } else {
  4168. procs_start = ad->init.frame_w -
  4169. (mdata->mixer_intf[num].width);
  4170. procs_end = ad->init.frame_w;
  4171. frame_start = procs_start -
  4172. MDSS_AD_MERGED_WIDTH;
  4173. frame_end = procs_end;
  4174. }
  4175. procs_end -= 1;
  4176. frame_end -= 1;
  4177. } else {
  4178. frame_start = 0x0;
  4179. frame_end = 0xFFFF;
  4180. procs_start = 0x0;
  4181. procs_end = 0xFFFF;
  4182. tile_ctrl = 0x0;
  4183. }
  4184. writel_relaxed(frame_start, base + MDSS_MDP_REG_AD_FRAME_START);
  4185. writel_relaxed(frame_end, base + MDSS_MDP_REG_AD_FRAME_END);
  4186. writel_relaxed(procs_start, base + MDSS_MDP_REG_AD_PROCS_START);
  4187. writel_relaxed(procs_end, base + MDSS_MDP_REG_AD_PROCS_END);
  4188. writel_relaxed(tile_ctrl, base + MDSS_MDP_REG_AD_TILE_CTRL);
  4189. }
  4190. }
  4191. #define MDSS_PP_AD_DEF_CALIB 0x6E
  4192. static void pp_ad_cfg_write(struct mdss_mdp_ad *ad_hw, struct mdss_ad_info *ad)
  4193. {
  4194. char __iomem *base;
  4195. u32 temp, temp_calib = MDSS_PP_AD_DEF_CALIB;
  4196. base = ad_hw->base;
  4197. switch (ad->cfg.mode) {
  4198. case MDSS_AD_MODE_AUTO_BL:
  4199. temp = ad->cfg.backlight_max << 16;
  4200. temp |= ad->cfg.backlight_min & 0xFFFF;
  4201. writel_relaxed(temp, base + MDSS_MDP_REG_AD_BL_MINMAX);
  4202. writel_relaxed(ad->cfg.amb_light_min,
  4203. base + MDSS_MDP_REG_AD_AL_MIN);
  4204. temp = ad->cfg.filter[1] << 16;
  4205. temp |= ad->cfg.filter[0] & 0xFFFF;
  4206. writel_relaxed(temp, base + MDSS_MDP_REG_AD_AL_FILT);
  4207. case MDSS_AD_MODE_AUTO_STR:
  4208. pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_AL,
  4209. ad->cfg.al_calib_lut);
  4210. writel_relaxed(ad->cfg.strength_limit,
  4211. base + MDSS_MDP_REG_AD_STR_LIM);
  4212. temp = ad->cfg.calib[3] << 16;
  4213. temp |= ad->cfg.calib[2] & 0xFFFF;
  4214. writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_CD);
  4215. writel_relaxed(ad->cfg.t_filter_recursion,
  4216. base + MDSS_MDP_REG_AD_TFILT_CTRL);
  4217. temp_calib = ad->cfg.calib[0] & 0xFFFF;
  4218. case MDSS_AD_MODE_TARG_STR:
  4219. temp = ad->cfg.calib[1] << 16;
  4220. temp |= temp_calib;
  4221. writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_AB);
  4222. case MDSS_AD_MODE_MAN_STR:
  4223. writel_relaxed(ad->cfg.backlight_scale,
  4224. base + MDSS_MDP_REG_AD_BL_MAX);
  4225. writel_relaxed(ad->cfg.mode, base + MDSS_MDP_REG_AD_MODE_SEL);
  4226. pr_debug("stab_itr = %d", ad->cfg.stab_itr);
  4227. break;
  4228. default:
  4229. break;
  4230. }
  4231. }
  4232. static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t)
  4233. {
  4234. struct mdss_data_type *mdata = ctl->mdata;
  4235. struct mdss_ad_info *ad;
  4236. if (ctl->mixer_left && ctl->mixer_left->num < mdata->nad_cfgs) {
  4237. ad = &mdata->ad_cfgs[ctl->mixer_left->num];
  4238. if (!ad || !ad->mfd || !mdata->ad_calc_wq)
  4239. return;
  4240. queue_work(mdata->ad_calc_wq, &ad->calc_work);
  4241. }
  4242. }
  4243. #define MDSS_PP_AD_BYPASS_DEF 0x101
  4244. static void pp_ad_bypass_config(struct mdss_ad_info *ad,
  4245. struct mdss_mdp_ctl *ctl, u32 num, u32 *opmode)
  4246. {
  4247. int side = pp_num_to_side(ctl, num);
  4248. if (pp_sts_is_enabled(ad->reg_sts | (ad->ops & MDSS_PP_SPLIT_MASK),
  4249. side)) {
  4250. *opmode = 0;
  4251. } else {
  4252. *opmode = MDSS_PP_AD_BYPASS_DEF;
  4253. }
  4254. }
  4255. static int pp_ad_setup_hw_nums(struct msm_fb_data_type *mfd,
  4256. struct mdss_ad_info *ad)
  4257. {
  4258. u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
  4259. u32 mixer_num;
  4260. mixer_num = mdss_mdp_get_ctl_mixers(mfd->index, mixer_id);
  4261. if (!mixer_num)
  4262. return -EINVAL;
  4263. /* default to left mixer */
  4264. ad->calc_hw_num = mixer_id[0];
  4265. if ((mixer_num > 1) && (ad->ops & MDSS_PP_SPLIT_RIGHT_ONLY))
  4266. ad->calc_hw_num = mixer_id[1];
  4267. return 0;
  4268. }
  4269. static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
  4270. {
  4271. int ret = 0;
  4272. struct mdss_ad_info *ad;
  4273. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  4274. struct msm_fb_data_type *bl_mfd;
  4275. struct mdss_data_type *mdata;
  4276. u32 bypass = MDSS_PP_AD_BYPASS_DEF, bl;
  4277. ret = mdss_mdp_get_ad(mfd, &ad);
  4278. if (ret) {
  4279. ret = -EINVAL;
  4280. pr_debug("failed to get ad_info, err = %d", ret);
  4281. goto exit;
  4282. }
  4283. if (mfd->panel_info->type == WRITEBACK_PANEL) {
  4284. bl_mfd = mdss_get_mfd_from_index(0);
  4285. if (!bl_mfd) {
  4286. ret = -EINVAL;
  4287. pr_warn("failed to get primary FB bl handle, err = %d",
  4288. ret);
  4289. goto exit;
  4290. }
  4291. } else {
  4292. bl_mfd = mfd;
  4293. }
  4294. mdata = mfd_to_mdata(mfd);
  4295. mutex_lock(&ad->lock);
  4296. if (ad->sts != last_sts || ad->state != last_state) {
  4297. last_sts = ad->sts;
  4298. last_state = ad->state;
  4299. pr_debug("begining: ad->sts = 0x%08x, state = 0x%08x", ad->sts,
  4300. ad->state);
  4301. }
  4302. if (!PP_AD_STS_IS_DIRTY(ad->sts) &&
  4303. (ad->sts & PP_AD_STS_DIRTY_DATA)) {
  4304. /*
  4305. * Write inputs to regs when the data has been updated or
  4306. * Assertive Display is up and running as long as there are
  4307. * no updates to AD init or cfg
  4308. */
  4309. ad->sts &= ~PP_AD_STS_DIRTY_DATA;
  4310. ad->state |= PP_AD_STATE_DATA;
  4311. pr_debug("dirty data, last_bl = %d ", ad->last_bl);
  4312. bl = bl_mfd->ad_bl_level;
  4313. if ((ad->cfg.mode == MDSS_AD_MODE_AUTO_STR) &&
  4314. (ad->last_bl != bl)) {
  4315. ad->last_bl = bl;
  4316. ad->calc_itr = ad->cfg.stab_itr;
  4317. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  4318. linear_map(bl, &ad->bl_data,
  4319. bl_mfd->panel_info->bl_max,
  4320. MDSS_MDP_AD_BL_SCALE);
  4321. }
  4322. ad->reg_sts |= PP_AD_STS_DIRTY_DATA;
  4323. }
  4324. if (ad->sts & PP_AD_STS_DIRTY_CFG) {
  4325. ad->sts &= ~PP_AD_STS_DIRTY_CFG;
  4326. ad->state |= PP_AD_STATE_CFG;
  4327. ad->reg_sts |= PP_AD_STS_DIRTY_CFG;
  4328. if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode, ad->ad_data_mode)) {
  4329. ad->sts &= ~PP_AD_STS_DIRTY_DATA;
  4330. ad->state &= ~PP_AD_STATE_DATA;
  4331. pr_debug("Mode switched, data invalidated!");
  4332. }
  4333. }
  4334. if (ad->sts & PP_AD_STS_DIRTY_INIT) {
  4335. ad->sts &= ~PP_AD_STS_DIRTY_INIT;
  4336. if (pp_ad_setup_hw_nums(mfd, ad)) {
  4337. pr_warn("failed to setup ad master");
  4338. ad->calc_hw_num = PP_AD_BAD_HW_NUM;
  4339. } else {
  4340. ad->state |= PP_AD_STATE_INIT;
  4341. ad->reg_sts |= PP_AD_STS_DIRTY_INIT;
  4342. }
  4343. }
  4344. /* update ad screen size if it has changed since last configuration */
  4345. if (mfd->panel_info->type == WRITEBACK_PANEL &&
  4346. (ad->init.frame_w != ctl->width ||
  4347. ad->init.frame_h != ctl->height)) {
  4348. pr_debug("changing from %dx%d to %dx%d", ad->init.frame_w,
  4349. ad->init.frame_h,
  4350. ctl->width,
  4351. ctl->height);
  4352. ad->init.frame_w = ctl->width;
  4353. ad->init.frame_h = ctl->height;
  4354. ad->reg_sts |= PP_AD_STS_DIRTY_INIT;
  4355. }
  4356. if ((ad->sts & PP_STS_ENABLE) && PP_AD_STATE_IS_READY(ad->state)) {
  4357. bypass = 0;
  4358. ad->reg_sts |= PP_AD_STS_DIRTY_ENABLE;
  4359. ad->state |= PP_AD_STATE_RUN;
  4360. if (bl_mfd != mfd)
  4361. bl_mfd->ext_ad_ctrl = mfd->index;
  4362. bl_mfd->ext_bl_ctrl = ad->cfg.bl_ctrl_mode;
  4363. } else {
  4364. if (ad->state & PP_AD_STATE_RUN) {
  4365. ad->reg_sts = PP_AD_STS_DIRTY_ENABLE;
  4366. /* Clear state and regs when going to off state*/
  4367. ad->sts = 0;
  4368. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  4369. ad->state &= !PP_AD_STATE_INIT;
  4370. ad->state &= !PP_AD_STATE_CFG;
  4371. ad->state &= !PP_AD_STATE_DATA;
  4372. ad->state &= !PP_AD_STATE_BL_LIN;
  4373. ad->ad_data = 0;
  4374. ad->ad_data_mode = 0;
  4375. ad->last_bl = 0;
  4376. ad->calc_itr = 0;
  4377. ad->calc_hw_num = PP_AD_BAD_HW_NUM;
  4378. memset(&ad->bl_lin, 0, sizeof(uint32_t) *
  4379. AD_BL_LIN_LEN);
  4380. memset(&ad->bl_lin_inv, 0, sizeof(uint32_t) *
  4381. AD_BL_LIN_LEN);
  4382. memset(&ad->bl_att_lut, 0, sizeof(uint32_t) *
  4383. AD_BL_ATT_LUT_LEN);
  4384. memset(&ad->init, 0, sizeof(struct mdss_ad_init));
  4385. memset(&ad->cfg, 0, sizeof(struct mdss_ad_cfg));
  4386. bl_mfd->ext_bl_ctrl = 0;
  4387. bl_mfd->ext_ad_ctrl = -1;
  4388. }
  4389. ad->state &= ~PP_AD_STATE_RUN;
  4390. }
  4391. if (!bypass)
  4392. ad->reg_sts |= PP_STS_ENABLE;
  4393. else
  4394. ad->reg_sts &= ~PP_STS_ENABLE;
  4395. if (PP_AD_STS_DIRTY_VSYNC & ad->sts) {
  4396. pr_debug("dirty vsync, calc_itr = %d", ad->calc_itr);
  4397. ad->sts &= ~PP_AD_STS_DIRTY_VSYNC;
  4398. if (!(PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr &&
  4399. (ad->state & PP_AD_STATE_RUN)) {
  4400. ctl->add_vsync_handler(ctl, &ad->handle);
  4401. ad->state |= PP_AD_STATE_VSYNC;
  4402. } else if ((PP_AD_STATE_VSYNC & ad->state) &&
  4403. (!ad->calc_itr || !(PP_AD_STATE_RUN & ad->state))) {
  4404. ctl->remove_vsync_handler(ctl, &ad->handle);
  4405. ad->state &= ~PP_AD_STATE_VSYNC;
  4406. }
  4407. }
  4408. if (ad->sts != last_sts || ad->state != last_state) {
  4409. last_sts = ad->sts;
  4410. last_state = ad->state;
  4411. pr_debug("end: ad->sts = 0x%08x, state = 0x%08x", ad->sts,
  4412. ad->state);
  4413. }
  4414. mutex_unlock(&ad->lock);
  4415. exit:
  4416. return ret;
  4417. }
  4418. #define MDSS_PP_AD_SLEEP 10
  4419. static void pp_ad_calc_worker(struct work_struct *work)
  4420. {
  4421. struct mdss_ad_info *ad;
  4422. struct mdss_mdp_ctl *ctl;
  4423. struct msm_fb_data_type *mfd, *bl_mfd;
  4424. struct mdss_data_type *mdata;
  4425. char __iomem *base;
  4426. u32 calc_done = 0;
  4427. ad = container_of(work, struct mdss_ad_info, calc_work);
  4428. mutex_lock(&ad->lock);
  4429. if (!ad->mfd || !ad->bl_mfd || !(ad->sts & PP_STS_ENABLE)) {
  4430. mutex_unlock(&ad->lock);
  4431. return;
  4432. }
  4433. mfd = ad->mfd;
  4434. bl_mfd = ad->bl_mfd;
  4435. ctl = mfd_to_ctl(ad->mfd);
  4436. mdata = mfd_to_mdata(ad->mfd);
  4437. if (!mdata || ad->calc_hw_num >= mdata->nad_cfgs) {
  4438. mutex_unlock(&ad->lock);
  4439. return;
  4440. }
  4441. base = mdata->ad_off[ad->calc_hw_num].base;
  4442. if ((ad->cfg.mode == MDSS_AD_MODE_AUTO_STR) && (ad->last_bl == 0)) {
  4443. complete(&ad->comp);
  4444. mutex_unlock(&ad->lock);
  4445. return;
  4446. }
  4447. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  4448. if (PP_AD_STATE_RUN & ad->state) {
  4449. /* Kick off calculation */
  4450. ad->calc_itr--;
  4451. writel_relaxed(1, base + MDSS_MDP_REG_AD_START_CALC);
  4452. }
  4453. if (ad->state & PP_AD_STATE_RUN) {
  4454. do {
  4455. calc_done = readl_relaxed(base +
  4456. MDSS_MDP_REG_AD_CALC_DONE);
  4457. if (!calc_done)
  4458. usleep(MDSS_PP_AD_SLEEP);
  4459. } while (!calc_done && (ad->state & PP_AD_STATE_RUN));
  4460. if (calc_done) {
  4461. ad->last_str = 0xFF & readl_relaxed(base +
  4462. MDSS_MDP_REG_AD_STR_OUT);
  4463. if (MDSS_AD_RUNNING_AUTO_BL(ad))
  4464. pr_err("AD auto backlight no longer supported.\n");
  4465. pr_debug("calc_str = %d, calc_itr %d",
  4466. ad->last_str & 0xFF,
  4467. ad->calc_itr);
  4468. } else {
  4469. ad->last_str = 0xFFFFFFFF;
  4470. }
  4471. }
  4472. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  4473. complete(&ad->comp);
  4474. if (!ad->calc_itr) {
  4475. ad->state &= ~PP_AD_STATE_VSYNC;
  4476. ctl->remove_vsync_handler(ctl, &ad->handle);
  4477. }
  4478. mutex_unlock(&ad->lock);
  4479. mutex_lock(&ctl->lock);
  4480. ctl->flush_bits |= BIT(13 + ad->num);
  4481. mutex_unlock(&ctl->lock);
  4482. /* Trigger update notify to wake up those waiting for display updates */
  4483. mdss_fb_update_notify_update(bl_mfd);
  4484. }
  4485. #define PP_AD_LUT_LEN 33
  4486. static void pp_ad_cfg_lut(char __iomem *addr, u32 *data)
  4487. {
  4488. int i;
  4489. u32 temp;
  4490. for (i = 0; i < PP_AD_LUT_LEN - 1; i += 2) {
  4491. temp = data[i+1] << 16;
  4492. temp |= (data[i] & 0xFFFF);
  4493. writel_relaxed(temp, addr + (i*2));
  4494. }
  4495. writel_relaxed(data[PP_AD_LUT_LEN - 1] << 16,
  4496. addr + ((PP_AD_LUT_LEN - 1) * 2));
  4497. }
  4498. /* must call this function from within ad->lock */
  4499. static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out)
  4500. {
  4501. u32 shift = 0, ratio_temp = 0;
  4502. u32 n, lut_interval, bl_att;
  4503. if (bl < 0) {
  4504. pr_err("Invalid backlight input\n");
  4505. return -EINVAL;
  4506. }
  4507. pr_debug("bl_in = %d\n", bl);
  4508. /* map panel backlight range to AD backlight range */
  4509. linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
  4510. MDSS_MDP_AD_BL_SCALE);
  4511. pr_debug("Before attenuation = %d\n", bl);
  4512. ratio_temp = MDSS_MDP_AD_BL_SCALE / (AD_BL_ATT_LUT_LEN - 1);
  4513. while (ratio_temp > 0) {
  4514. ratio_temp = ratio_temp >> 1;
  4515. shift++;
  4516. }
  4517. n = bl >> shift;
  4518. if (n >= (AD_BL_ATT_LUT_LEN - 1)) {
  4519. pr_err("Invalid index for BL attenuation: %d.\n", n);
  4520. return -EINVAL;
  4521. }
  4522. lut_interval = (MDSS_MDP_AD_BL_SCALE + 1) / (AD_BL_ATT_LUT_LEN - 1);
  4523. bl_att = ad->bl_att_lut[n] + (bl - lut_interval * n) *
  4524. (ad->bl_att_lut[n + 1] - ad->bl_att_lut[n]) /
  4525. lut_interval;
  4526. pr_debug("n = %d, bl_att = %d\n", n, bl_att);
  4527. if (ad->init.alpha_base)
  4528. *bl_out = (ad->init.alpha * bl_att +
  4529. (ad->init.alpha_base - ad->init.alpha) * bl) /
  4530. ad->init.alpha_base;
  4531. else
  4532. *bl_out = bl;
  4533. pr_debug("After attenuation = %d\n", *bl_out);
  4534. /* map AD backlight range back to panel backlight range */
  4535. linear_map(*bl_out, bl_out, MDSS_MDP_AD_BL_SCALE,
  4536. ad->bl_mfd->panel_info->bl_max);
  4537. pr_debug("bl_out = %d\n", *bl_out);
  4538. return 0;
  4539. }
  4540. /* must call this function from within ad->lock */
  4541. static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
  4542. int inv)
  4543. {
  4544. u32 n;
  4545. int ret = -EINVAL;
  4546. if (bl < 0 || bl > ad->bl_mfd->panel_info->bl_max) {
  4547. pr_err("Invalid backlight input: bl = %d, bl_max = %d\n", bl,
  4548. ad->bl_mfd->panel_info->bl_max);
  4549. return -EINVAL;
  4550. }
  4551. pr_debug("bl_in = %d, inv = %d\n", bl, inv);
  4552. /* map panel backlight range to AD backlight range */
  4553. linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
  4554. MDSS_MDP_AD_BL_SCALE);
  4555. pr_debug("Before linearization = %d\n", bl);
  4556. n = bl * (AD_BL_LIN_LEN - 1) / MDSS_MDP_AD_BL_SCALE;
  4557. pr_debug("n = %d\n", n);
  4558. if (n > (AD_BL_LIN_LEN - 1)) {
  4559. pr_err("Invalid index for BL linearization: %d.\n", n);
  4560. return ret;
  4561. } else if (n == (AD_BL_LIN_LEN - 1)) {
  4562. if (inv == MDP_PP_AD_BL_LINEAR_INV)
  4563. *bl_out = ad->bl_lin_inv[n];
  4564. else if (inv == MDP_PP_AD_BL_LINEAR)
  4565. *bl_out = ad->bl_lin[n];
  4566. } else {
  4567. /* linear piece-wise interpolation */
  4568. if (inv == MDP_PP_AD_BL_LINEAR_INV) {
  4569. *bl_out = bl * (AD_BL_LIN_LEN - 1) *
  4570. (ad->bl_lin_inv[n + 1] - ad->bl_lin_inv[n]) /
  4571. MDSS_MDP_AD_BL_SCALE - n *
  4572. (ad->bl_lin_inv[n + 1] - ad->bl_lin_inv[n]) +
  4573. ad->bl_lin_inv[n];
  4574. } else if (inv == MDP_PP_AD_BL_LINEAR) {
  4575. *bl_out = bl * (AD_BL_LIN_LEN - 1) *
  4576. (ad->bl_lin[n + 1] - ad->bl_lin[n]) /
  4577. MDSS_MDP_AD_BL_SCALE -
  4578. n * (ad->bl_lin[n + 1] - ad->bl_lin[n]) +
  4579. ad->bl_lin[n];
  4580. }
  4581. }
  4582. pr_debug("After linearization = %d\n", *bl_out);
  4583. /* map AD backlight range back to panel backlight range */
  4584. linear_map(*bl_out, bl_out, MDSS_MDP_AD_BL_SCALE,
  4585. ad->bl_mfd->panel_info->bl_max);
  4586. pr_debug("bl_out = %d\n", *bl_out);
  4587. return 0;
  4588. }
  4589. int mdss_mdp_ad_addr_setup(struct mdss_data_type *mdata, u32 *ad_offsets)
  4590. {
  4591. u32 i;
  4592. int rc = 0;
  4593. mdata->ad_off = devm_kzalloc(&mdata->pdev->dev,
  4594. sizeof(struct mdss_mdp_ad) * mdata->nad_cfgs,
  4595. GFP_KERNEL);
  4596. if (!mdata->ad_off) {
  4597. pr_err("unable to setup assertive display hw:devm_kzalloc fail\n");
  4598. return -ENOMEM;
  4599. }
  4600. mdata->ad_cfgs = devm_kzalloc(&mdata->pdev->dev,
  4601. sizeof(struct mdss_ad_info) * mdata->nad_cfgs,
  4602. GFP_KERNEL);
  4603. if (!mdata->ad_cfgs) {
  4604. pr_err("unable to setup assertive display:devm_kzalloc fail\n");
  4605. devm_kfree(&mdata->pdev->dev, mdata->ad_off);
  4606. return -ENOMEM;
  4607. }
  4608. mdata->ad_calc_wq = create_singlethread_workqueue("ad_calc_wq");
  4609. for (i = 0; i < mdata->nad_cfgs; i++) {
  4610. mdata->ad_off[i].base = mdata->mdp_base + ad_offsets[i];
  4611. mdata->ad_off[i].num = i;
  4612. mdata->ad_cfgs[i].num = i;
  4613. mdata->ad_cfgs[i].ops = 0;
  4614. mdata->ad_cfgs[i].reg_sts = 0;
  4615. mdata->ad_cfgs[i].calc_itr = 0;
  4616. mdata->ad_cfgs[i].last_str = 0xFFFFFFFF;
  4617. mdata->ad_cfgs[i].last_bl = 0;
  4618. mutex_init(&mdata->ad_cfgs[i].lock);
  4619. init_completion(&mdata->ad_cfgs[i].comp);
  4620. mdata->ad_cfgs[i].handle.vsync_handler = pp_ad_vsync_handler;
  4621. mdata->ad_cfgs[i].handle.cmd_post_flush = true;
  4622. INIT_WORK(&mdata->ad_cfgs[i].calc_work, pp_ad_calc_worker);
  4623. }
  4624. return rc;
  4625. }
  4626. static int is_valid_calib_ctrl_addr(char __iomem *ptr)
  4627. {
  4628. char __iomem *base;
  4629. int ret = 0, counter = 0;
  4630. int stage = 0;
  4631. struct mdss_mdp_ctl *ctl;
  4632. /* Controller */
  4633. for (counter = 0; counter < mdss_res->nctl; counter++) {
  4634. ctl = mdss_res->ctl_off + counter;
  4635. base = ctl->base;
  4636. if (ptr == base + MDSS_MDP_REG_CTL_TOP) {
  4637. ret = MDP_PP_OPS_READ;
  4638. break;
  4639. } else if (ptr == base + MDSS_MDP_REG_CTL_FLUSH) {
  4640. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4641. break;
  4642. }
  4643. for (stage = 0; stage < (mdss_res->nmixers_intf +
  4644. mdss_res->nmixers_wb); stage++)
  4645. if (ptr == base + MDSS_MDP_REG_CTL_LAYER(stage)) {
  4646. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4647. goto End;
  4648. }
  4649. }
  4650. End:
  4651. return ret;
  4652. }
  4653. static int is_valid_calib_dspp_addr(char __iomem *ptr)
  4654. {
  4655. char __iomem *base;
  4656. int ret = 0, counter = 0;
  4657. struct mdss_mdp_mixer *mixer;
  4658. for (counter = 0; counter < mdss_res->nmixers_intf; counter++) {
  4659. mixer = mdss_res->mixer_intf + counter;
  4660. base = mixer->dspp_base;
  4661. if (ptr == base) {
  4662. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4663. break;
  4664. /* PA range */
  4665. } else if ((ptr >= base + MDSS_MDP_REG_DSPP_PA_BASE) &&
  4666. (ptr <= base + MDSS_MDP_REG_DSPP_PA_BASE +
  4667. MDSS_MDP_PA_SIZE)) {
  4668. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4669. break;
  4670. /* PCC range */
  4671. } else if ((ptr >= base + MDSS_MDP_REG_DSPP_PCC_BASE) &&
  4672. (ptr <= base + MDSS_MDP_REG_DSPP_PCC_BASE +
  4673. MDSS_MDP_PCC_SIZE)) {
  4674. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4675. break;
  4676. /* Gamut range */
  4677. } else if ((ptr >= base + MDSS_MDP_REG_DSPP_GAMUT_BASE) &&
  4678. (ptr <= base + MDSS_MDP_REG_DSPP_GAMUT_BASE +
  4679. MDSS_MDP_GAMUT_SIZE)) {
  4680. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4681. break;
  4682. /* GC range */
  4683. } else if ((ptr >= base + MDSS_MDP_REG_DSPP_GC_BASE) &&
  4684. (ptr <= base + MDSS_MDP_REG_DSPP_GC_BASE +
  4685. MDSS_MDP_GC_SIZE)) {
  4686. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4687. break;
  4688. /* Dither enable/disable */
  4689. } else if ((ptr == base + MDSS_MDP_REG_DSPP_DITHER_DEPTH)) {
  4690. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4691. break;
  4692. /* Six zone and mem color */
  4693. } else if (mdss_res->mdp_rev >= MDSS_MDP_HW_REV_103 &&
  4694. (ptr >= base + MDSS_MDP_REG_DSPP_SIX_ZONE_BASE) &&
  4695. (ptr <= base + MDSS_MDP_REG_DSPP_SIX_ZONE_BASE +
  4696. MDSS_MDP_SIX_ZONE_SIZE +
  4697. MDSS_MDP_MEM_COL_SIZE)) {
  4698. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4699. break;
  4700. }
  4701. }
  4702. return ret;
  4703. }
  4704. static int is_valid_calib_vig_addr(char __iomem *ptr)
  4705. {
  4706. char __iomem *base;
  4707. int ret = 0, counter = 0;
  4708. struct mdss_mdp_pipe *pipe;
  4709. for (counter = 0; counter < mdss_res->nvig_pipes; counter++) {
  4710. pipe = mdss_res->vig_pipes + counter;
  4711. base = pipe->base;
  4712. if (ptr == base + MDSS_MDP_REG_VIG_OP_MODE) {
  4713. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4714. break;
  4715. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
  4716. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4717. break;
  4718. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
  4719. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4720. break;
  4721. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
  4722. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4723. break;
  4724. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
  4725. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4726. break;
  4727. /* QSEED2 range */
  4728. } else if ((ptr >= base + MDSS_MDP_REG_VIG_QSEED2_SHARP) &&
  4729. (ptr <= base + MDSS_MDP_REG_VIG_QSEED2_SHARP +
  4730. MDSS_MDP_VIG_QSEED2_SHARP_SIZE)) {
  4731. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4732. break;
  4733. /* PA range */
  4734. } else if ((ptr >= base + MDSS_MDP_REG_VIG_PA_BASE) &&
  4735. (ptr <= base + MDSS_MDP_REG_VIG_PA_BASE +
  4736. MDSS_MDP_PA_SIZE)) {
  4737. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4738. break;
  4739. /* Mem color range */
  4740. } else if (mdss_res->mdp_rev >= MDSS_MDP_HW_REV_103 &&
  4741. (ptr >= base + MDSS_MDP_REG_VIG_MEM_COL_BASE) &&
  4742. (ptr <= base + MDSS_MDP_REG_VIG_MEM_COL_BASE +
  4743. MDSS_MDP_MEM_COL_SIZE)) {
  4744. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4745. break;
  4746. }
  4747. }
  4748. return ret;
  4749. }
  4750. static int is_valid_calib_rgb_addr(char __iomem *ptr)
  4751. {
  4752. char __iomem *base;
  4753. int ret = 0, counter = 0;
  4754. struct mdss_mdp_pipe *pipe;
  4755. for (counter = 0; counter < mdss_res->nrgb_pipes; counter++) {
  4756. pipe = mdss_res->rgb_pipes + counter;
  4757. base = pipe->base;
  4758. if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
  4759. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4760. break;
  4761. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
  4762. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4763. break;
  4764. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
  4765. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4766. break;
  4767. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
  4768. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4769. break;
  4770. }
  4771. }
  4772. return ret;
  4773. }
  4774. static int is_valid_calib_dma_addr(char __iomem *ptr)
  4775. {
  4776. char __iomem *base;
  4777. int ret = 0, counter = 0;
  4778. struct mdss_mdp_pipe *pipe;
  4779. for (counter = 0; counter < mdss_res->ndma_pipes; counter++) {
  4780. pipe = mdss_res->dma_pipes + counter;
  4781. base = pipe->base;
  4782. if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
  4783. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4784. break;
  4785. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
  4786. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4787. break;
  4788. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
  4789. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4790. break;
  4791. } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
  4792. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4793. break;
  4794. }
  4795. }
  4796. return ret;
  4797. }
  4798. static int is_valid_calib_mixer_addr(char __iomem *ptr)
  4799. {
  4800. char __iomem *base;
  4801. int ret = 0, counter = 0;
  4802. int stage = 0;
  4803. struct mdss_mdp_mixer *mixer;
  4804. for (counter = 0; counter < (mdss_res->nmixers_intf +
  4805. mdss_res->nmixers_wb); counter++) {
  4806. mixer = mdss_res->mixer_intf + counter;
  4807. base = mixer->base;
  4808. if (ptr == base + MDSS_MDP_REG_LM_OP_MODE) {
  4809. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4810. break;
  4811. /* GC range */
  4812. } else if ((ptr >= base + MDSS_MDP_REG_LM_GC_LUT_BASE) &&
  4813. (ptr <= base + MDSS_MDP_REG_LM_GC_LUT_BASE +
  4814. MDSS_MDP_GC_SIZE)) {
  4815. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4816. break;
  4817. }
  4818. for (stage = 0; stage < TOTAL_BLEND_STAGES; stage++)
  4819. if (ptr == base + MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
  4820. MDSS_MDP_REG_LM_BLEND_OP) {
  4821. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4822. goto End;
  4823. } else if (ptr == base +
  4824. MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
  4825. MDSS_MDP_REG_LM_BLEND_FG_ALPHA) {
  4826. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4827. goto End;
  4828. } else if (ptr == base +
  4829. MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
  4830. MDSS_MDP_REG_LM_BLEND_BG_ALPHA) {
  4831. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4832. goto End;
  4833. }
  4834. }
  4835. End:
  4836. return ret;
  4837. }
  4838. static int is_valid_calib_addr(void *addr, u32 operation)
  4839. {
  4840. int ret = 0;
  4841. char __iomem *ptr = addr;
  4842. char __iomem *mixer_base = mdss_res->mixer_intf->base;
  4843. char __iomem *rgb_base = mdss_res->rgb_pipes->base;
  4844. char __iomem *dma_base = mdss_res->dma_pipes->base;
  4845. char __iomem *vig_base = mdss_res->vig_pipes->base;
  4846. char __iomem *ctl_base = mdss_res->ctl_off->base;
  4847. char __iomem *dspp_base = mdss_res->mixer_intf->dspp_base;
  4848. if ((unsigned int)addr % 4) {
  4849. ret = 0;
  4850. } else if (ptr == (mdss_res->mdp_base + MDSS_MDP_REG_HW_VERSION) ||
  4851. ptr == (mdss_res->mdp_base + MDSS_REG_HW_VERSION) ||
  4852. ptr == (mdss_res->mdp_base + MDSS_MDP_REG_DISP_INTF_SEL)) {
  4853. ret = MDP_PP_OPS_READ;
  4854. /* IGC DSPP range */
  4855. } else if (ptr >= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE) &&
  4856. ptr <= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE +
  4857. MDSS_MDP_IGC_DSPP_SIZE)) {
  4858. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4859. /* IGC SSPP range */
  4860. } else if (ptr >= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE) &&
  4861. ptr <= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE +
  4862. MDSS_MDP_IGC_SSPP_SIZE)) {
  4863. ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
  4864. } else if (ptr >= dspp_base && ptr < (dspp_base +
  4865. (mdss_res->nmixers_intf * MDSS_MDP_DSPP_ADDRESS_OFFSET))) {
  4866. ret = is_valid_calib_dspp_addr(ptr);
  4867. } else if (ptr >= ctl_base && ptr < (ctl_base + (mdss_res->nctl
  4868. * MDSS_MDP_CTL_ADDRESS_OFFSET))) {
  4869. ret = is_valid_calib_ctrl_addr(ptr);
  4870. } else if (ptr >= vig_base && ptr < (vig_base + (mdss_res->nvig_pipes
  4871. * MDSS_MDP_SSPP_ADDRESS_OFFSET))) {
  4872. ret = is_valid_calib_vig_addr(ptr);
  4873. } else if (ptr >= rgb_base && ptr < (rgb_base + (mdss_res->nrgb_pipes
  4874. * MDSS_MDP_SSPP_ADDRESS_OFFSET))) {
  4875. ret = is_valid_calib_rgb_addr(ptr);
  4876. } else if (ptr >= dma_base && ptr < (dma_base + (mdss_res->ndma_pipes
  4877. * MDSS_MDP_SSPP_ADDRESS_OFFSET))) {
  4878. ret = is_valid_calib_dma_addr(ptr);
  4879. } else if (ptr >= mixer_base && ptr < (mixer_base +
  4880. (mdss_res->nmixers_intf * MDSS_MDP_LM_ADDRESS_OFFSET))) {
  4881. ret = is_valid_calib_mixer_addr(ptr);
  4882. }
  4883. return ret & operation;
  4884. }
  4885. int mdss_mdp_calib_config(struct mdp_calib_config_data *cfg, u32 *copyback)
  4886. {
  4887. int ret = -1;
  4888. void *ptr = (void *) cfg->addr;
  4889. ptr = (void *)(((unsigned int) ptr) + (mdss_res->mdp_base));
  4890. if (is_valid_calib_addr(ptr, cfg->ops))
  4891. ret = 0;
  4892. else
  4893. return ret;
  4894. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  4895. if (cfg->ops & MDP_PP_OPS_READ) {
  4896. cfg->data = readl_relaxed(ptr);
  4897. *copyback = 1;
  4898. ret = 0;
  4899. } else if (cfg->ops & MDP_PP_OPS_WRITE) {
  4900. writel_relaxed(cfg->data, ptr);
  4901. ret = 0;
  4902. }
  4903. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  4904. return ret;
  4905. }
  4906. int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
  4907. struct mdss_calib_cfg *cfg)
  4908. {
  4909. if (!mdss_pp_res || !mfd)
  4910. return -EINVAL;
  4911. mutex_lock(&mdss_pp_mutex);
  4912. mfd->calib_mode = cfg->calib_mask;
  4913. mutex_unlock(&mdss_pp_mutex);
  4914. return 0;
  4915. }
  4916. #if defined(CONFIG_MDNIE_TFT_MSM8X26) || defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL) || defined(CONFIG_MDNIE_VIDEO_ENHANCED)
  4917. void mdss_negative_color(int is_negative_on)
  4918. {
  4919. u32 copyback;
  4920. int i;
  4921. struct mdss_mdp_ctl *ctl;
  4922. struct mdss_mdp_ctl *ctl_d = NULL;
  4923. struct mdss_data_type *mdata;
  4924. mdata = mdss_mdp_get_mdata();
  4925. for (i = 0; i < mdata->nctl; i++) {
  4926. ctl = mdata->ctl_off + i;
  4927. if ((ctl->power_on) && (ctl->mfd) && (ctl->mfd->index == 0)) {
  4928. ctl_d = ctl;
  4929. break;
  4930. }
  4931. }
  4932. if (ctl_d) {
  4933. if(is_negative_on)
  4934. mdss_mdp_pcc_config(&pcc_reverse, &copyback);
  4935. else
  4936. mdss_mdp_pcc_config(&pcc_normal, &copyback);
  4937. } else {
  4938. pr_info("%s:ctl_d is NULL ", __func__);
  4939. }
  4940. }
  4941. #endif
  4942. int mdss_mdp_calib_config_buffer(struct mdp_calib_config_buffer *cfg,
  4943. u32 *copyback)
  4944. {
  4945. int ret = -1, counter;
  4946. uint32_t *buff = NULL, *buff_org = NULL;
  4947. void *ptr;
  4948. int i = 0;
  4949. if (!cfg) {
  4950. pr_err("Invalid buffer pointer\n");
  4951. return ret;
  4952. }
  4953. if (cfg->size == 0) {
  4954. pr_err("Invalid buffer size\n");
  4955. return ret;
  4956. }
  4957. counter = cfg->size / (sizeof(uint32_t) * 2);
  4958. buff_org = buff = kzalloc(cfg->size, GFP_KERNEL);
  4959. if (buff == NULL) {
  4960. pr_err("Config buffer allocation failed\n");
  4961. return ret;
  4962. }
  4963. if (copy_from_user(buff, cfg->buffer, cfg->size)) {
  4964. kfree(buff);
  4965. pr_err("config buffer copy failed\n");
  4966. return ret;
  4967. }
  4968. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  4969. for (i = 0; i < counter; i++) {
  4970. ptr = (void *) (((unsigned int) *buff) + mdss_res->mdp_base);
  4971. if (!is_valid_calib_addr(ptr, cfg->ops)) {
  4972. ret = -1;
  4973. pr_err("Address validation failed or access not permitted\n");
  4974. break;
  4975. }
  4976. buff++;
  4977. if (cfg->ops & MDP_PP_OPS_READ)
  4978. *buff = readl_relaxed(ptr);
  4979. else if (cfg->ops & MDP_PP_OPS_WRITE)
  4980. writel_relaxed(*buff, ptr);
  4981. buff++;
  4982. }
  4983. if (ret & MDP_PP_OPS_READ) {
  4984. ret = copy_to_user(cfg->buffer, buff_org, cfg->size);
  4985. *copyback = 1;
  4986. }
  4987. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  4988. kfree(buff_org);
  4989. return ret;
  4990. }