intel_runtime_pm.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800
  1. /*
  2. * Copyright © 2012-2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. *
  27. */
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vgaarb.h>
  30. #include "i915_drv.h"
  31. #include "intel_drv.h"
  32. /**
  33. * DOC: runtime pm
  34. *
  35. * The i915 driver supports dynamic enabling and disabling of entire hardware
  36. * blocks at runtime. This is especially important on the display side where
  37. * software is supposed to control many power gates manually on recent hardware,
  38. * since on the GT side a lot of the power management is done by the hardware.
  39. * But even there some manual control at the device level is required.
  40. *
  41. * Since i915 supports a diverse set of platforms with a unified codebase and
  42. * hardware engineers just love to shuffle functionality around between power
  43. * domains there's a sizeable amount of indirection required. This file provides
  44. * generic functions to the driver for grabbing and releasing references for
  45. * abstract power domains. It then maps those to the actual power wells
  46. * present for a given platform.
  47. */
  48. #define for_each_power_well(i, power_well, domain_mask, power_domains) \
  49. for (i = 0; \
  50. i < (power_domains)->power_well_count && \
  51. ((power_well) = &(power_domains)->power_wells[i]); \
  52. i++) \
  53. for_each_if ((power_well)->domains & (domain_mask))
  54. #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  55. for (i = (power_domains)->power_well_count - 1; \
  56. i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  57. i--) \
  58. for_each_if ((power_well)->domains & (domain_mask))
  59. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  60. int power_well_id);
  61. static struct i915_power_well *
  62. lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
  63. const char *
  64. intel_display_power_domain_str(enum intel_display_power_domain domain)
  65. {
  66. switch (domain) {
  67. case POWER_DOMAIN_PIPE_A:
  68. return "PIPE_A";
  69. case POWER_DOMAIN_PIPE_B:
  70. return "PIPE_B";
  71. case POWER_DOMAIN_PIPE_C:
  72. return "PIPE_C";
  73. case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  74. return "PIPE_A_PANEL_FITTER";
  75. case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  76. return "PIPE_B_PANEL_FITTER";
  77. case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  78. return "PIPE_C_PANEL_FITTER";
  79. case POWER_DOMAIN_TRANSCODER_A:
  80. return "TRANSCODER_A";
  81. case POWER_DOMAIN_TRANSCODER_B:
  82. return "TRANSCODER_B";
  83. case POWER_DOMAIN_TRANSCODER_C:
  84. return "TRANSCODER_C";
  85. case POWER_DOMAIN_TRANSCODER_EDP:
  86. return "TRANSCODER_EDP";
  87. case POWER_DOMAIN_TRANSCODER_DSI_A:
  88. return "TRANSCODER_DSI_A";
  89. case POWER_DOMAIN_TRANSCODER_DSI_C:
  90. return "TRANSCODER_DSI_C";
  91. case POWER_DOMAIN_PORT_DDI_A_LANES:
  92. return "PORT_DDI_A_LANES";
  93. case POWER_DOMAIN_PORT_DDI_B_LANES:
  94. return "PORT_DDI_B_LANES";
  95. case POWER_DOMAIN_PORT_DDI_C_LANES:
  96. return "PORT_DDI_C_LANES";
  97. case POWER_DOMAIN_PORT_DDI_D_LANES:
  98. return "PORT_DDI_D_LANES";
  99. case POWER_DOMAIN_PORT_DDI_E_LANES:
  100. return "PORT_DDI_E_LANES";
  101. case POWER_DOMAIN_PORT_DSI:
  102. return "PORT_DSI";
  103. case POWER_DOMAIN_PORT_CRT:
  104. return "PORT_CRT";
  105. case POWER_DOMAIN_PORT_OTHER:
  106. return "PORT_OTHER";
  107. case POWER_DOMAIN_VGA:
  108. return "VGA";
  109. case POWER_DOMAIN_AUDIO:
  110. return "AUDIO";
  111. case POWER_DOMAIN_PLLS:
  112. return "PLLS";
  113. case POWER_DOMAIN_AUX_A:
  114. return "AUX_A";
  115. case POWER_DOMAIN_AUX_B:
  116. return "AUX_B";
  117. case POWER_DOMAIN_AUX_C:
  118. return "AUX_C";
  119. case POWER_DOMAIN_AUX_D:
  120. return "AUX_D";
  121. case POWER_DOMAIN_GMBUS:
  122. return "GMBUS";
  123. case POWER_DOMAIN_INIT:
  124. return "INIT";
  125. case POWER_DOMAIN_MODESET:
  126. return "MODESET";
  127. default:
  128. MISSING_CASE(domain);
  129. return "?";
  130. }
  131. }
  132. static void intel_power_well_enable(struct drm_i915_private *dev_priv,
  133. struct i915_power_well *power_well)
  134. {
  135. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  136. power_well->ops->enable(dev_priv, power_well);
  137. power_well->hw_enabled = true;
  138. }
  139. static void intel_power_well_disable(struct drm_i915_private *dev_priv,
  140. struct i915_power_well *power_well)
  141. {
  142. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  143. power_well->hw_enabled = false;
  144. power_well->ops->disable(dev_priv, power_well);
  145. }
  146. static void intel_power_well_get(struct drm_i915_private *dev_priv,
  147. struct i915_power_well *power_well)
  148. {
  149. if (!power_well->count++)
  150. intel_power_well_enable(dev_priv, power_well);
  151. }
  152. static void intel_power_well_put(struct drm_i915_private *dev_priv,
  153. struct i915_power_well *power_well)
  154. {
  155. WARN(!power_well->count, "Use count on power well %s is already zero",
  156. power_well->name);
  157. if (!--power_well->count)
  158. intel_power_well_disable(dev_priv, power_well);
  159. }
  160. /*
  161. * We should only use the power well if we explicitly asked the hardware to
  162. * enable it, so check if it's enabled and also check if we've requested it to
  163. * be enabled.
  164. */
  165. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  166. struct i915_power_well *power_well)
  167. {
  168. return I915_READ(HSW_PWR_WELL_DRIVER) ==
  169. (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  170. }
  171. /**
  172. * __intel_display_power_is_enabled - unlocked check for a power domain
  173. * @dev_priv: i915 device instance
  174. * @domain: power domain to check
  175. *
  176. * This is the unlocked version of intel_display_power_is_enabled() and should
  177. * only be used from error capture and recovery code where deadlocks are
  178. * possible.
  179. *
  180. * Returns:
  181. * True when the power domain is enabled, false otherwise.
  182. */
  183. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  184. enum intel_display_power_domain domain)
  185. {
  186. struct i915_power_domains *power_domains;
  187. struct i915_power_well *power_well;
  188. bool is_enabled;
  189. int i;
  190. if (dev_priv->pm.suspended)
  191. return false;
  192. power_domains = &dev_priv->power_domains;
  193. is_enabled = true;
  194. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  195. if (power_well->always_on)
  196. continue;
  197. if (!power_well->hw_enabled) {
  198. is_enabled = false;
  199. break;
  200. }
  201. }
  202. return is_enabled;
  203. }
  204. /**
  205. * intel_display_power_is_enabled - check for a power domain
  206. * @dev_priv: i915 device instance
  207. * @domain: power domain to check
  208. *
  209. * This function can be used to check the hw power domain state. It is mostly
  210. * used in hardware state readout functions. Everywhere else code should rely
  211. * upon explicit power domain reference counting to ensure that the hardware
  212. * block is powered up before accessing it.
  213. *
  214. * Callers must hold the relevant modesetting locks to ensure that concurrent
  215. * threads can't disable the power well while the caller tries to read a few
  216. * registers.
  217. *
  218. * Returns:
  219. * True when the power domain is enabled, false otherwise.
  220. */
  221. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  222. enum intel_display_power_domain domain)
  223. {
  224. struct i915_power_domains *power_domains;
  225. bool ret;
  226. power_domains = &dev_priv->power_domains;
  227. mutex_lock(&power_domains->lock);
  228. ret = __intel_display_power_is_enabled(dev_priv, domain);
  229. mutex_unlock(&power_domains->lock);
  230. return ret;
  231. }
  232. /**
  233. * intel_display_set_init_power - set the initial power domain state
  234. * @dev_priv: i915 device instance
  235. * @enable: whether to enable or disable the initial power domain state
  236. *
  237. * For simplicity our driver load/unload and system suspend/resume code assumes
  238. * that all power domains are always enabled. This functions controls the state
  239. * of this little hack. While the initial power domain state is enabled runtime
  240. * pm is effectively disabled.
  241. */
  242. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  243. bool enable)
  244. {
  245. if (dev_priv->power_domains.init_power_on == enable)
  246. return;
  247. if (enable)
  248. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  249. else
  250. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  251. dev_priv->power_domains.init_power_on = enable;
  252. }
  253. /*
  254. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  255. * when not needed anymore. We have 4 registers that can request the power well
  256. * to be enabled, and it will only be disabled if none of the registers is
  257. * requesting it to be enabled.
  258. */
  259. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  260. {
  261. struct pci_dev *pdev = dev_priv->drm.pdev;
  262. struct drm_device *dev = &dev_priv->drm;
  263. /*
  264. * After we re-enable the power well, if we touch VGA register 0x3d5
  265. * we'll get unclaimed register interrupts. This stops after we write
  266. * anything to the VGA MSR register. The vgacon module uses this
  267. * register all the time, so if we unbind our driver and, as a
  268. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  269. * console_unlock(). So make here we touch the VGA MSR register, making
  270. * sure vgacon can keep working normally without triggering interrupts
  271. * and error messages.
  272. */
  273. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  274. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  275. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  276. if (IS_BROADWELL(dev))
  277. gen8_irq_power_well_post_enable(dev_priv,
  278. 1 << PIPE_C | 1 << PIPE_B);
  279. }
  280. static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
  281. {
  282. if (IS_BROADWELL(dev_priv))
  283. gen8_irq_power_well_pre_disable(dev_priv,
  284. 1 << PIPE_C | 1 << PIPE_B);
  285. }
  286. static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
  287. struct i915_power_well *power_well)
  288. {
  289. struct pci_dev *pdev = dev_priv->drm.pdev;
  290. /*
  291. * After we re-enable the power well, if we touch VGA register 0x3d5
  292. * we'll get unclaimed register interrupts. This stops after we write
  293. * anything to the VGA MSR register. The vgacon module uses this
  294. * register all the time, so if we unbind our driver and, as a
  295. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  296. * console_unlock(). So make here we touch the VGA MSR register, making
  297. * sure vgacon can keep working normally without triggering interrupts
  298. * and error messages.
  299. */
  300. if (power_well->data == SKL_DISP_PW_2) {
  301. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  302. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  303. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  304. gen8_irq_power_well_post_enable(dev_priv,
  305. 1 << PIPE_C | 1 << PIPE_B);
  306. }
  307. }
  308. static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
  309. struct i915_power_well *power_well)
  310. {
  311. if (power_well->data == SKL_DISP_PW_2)
  312. gen8_irq_power_well_pre_disable(dev_priv,
  313. 1 << PIPE_C | 1 << PIPE_B);
  314. }
  315. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  316. struct i915_power_well *power_well, bool enable)
  317. {
  318. bool is_enabled, enable_requested;
  319. uint32_t tmp;
  320. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  321. is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  322. enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  323. if (enable) {
  324. if (!enable_requested)
  325. I915_WRITE(HSW_PWR_WELL_DRIVER,
  326. HSW_PWR_WELL_ENABLE_REQUEST);
  327. if (!is_enabled) {
  328. DRM_DEBUG_KMS("Enabling power well\n");
  329. if (intel_wait_for_register(dev_priv,
  330. HSW_PWR_WELL_DRIVER,
  331. HSW_PWR_WELL_STATE_ENABLED,
  332. HSW_PWR_WELL_STATE_ENABLED,
  333. 20))
  334. DRM_ERROR("Timeout enabling power well\n");
  335. hsw_power_well_post_enable(dev_priv);
  336. }
  337. } else {
  338. if (enable_requested) {
  339. hsw_power_well_pre_disable(dev_priv);
  340. I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  341. POSTING_READ(HSW_PWR_WELL_DRIVER);
  342. DRM_DEBUG_KMS("Requesting to disable the power well\n");
  343. }
  344. }
  345. }
  346. #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  347. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  348. BIT(POWER_DOMAIN_PIPE_B) | \
  349. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  350. BIT(POWER_DOMAIN_PIPE_C) | \
  351. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  352. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  353. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  354. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  355. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  356. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  357. BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
  358. BIT(POWER_DOMAIN_AUX_B) | \
  359. BIT(POWER_DOMAIN_AUX_C) | \
  360. BIT(POWER_DOMAIN_AUX_D) | \
  361. BIT(POWER_DOMAIN_AUDIO) | \
  362. BIT(POWER_DOMAIN_VGA) | \
  363. BIT(POWER_DOMAIN_INIT))
  364. #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
  365. BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  366. BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
  367. BIT(POWER_DOMAIN_INIT))
  368. #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
  369. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  370. BIT(POWER_DOMAIN_INIT))
  371. #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
  372. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  373. BIT(POWER_DOMAIN_INIT))
  374. #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
  375. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  376. BIT(POWER_DOMAIN_INIT))
  377. #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  378. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  379. BIT(POWER_DOMAIN_MODESET) | \
  380. BIT(POWER_DOMAIN_AUX_A) | \
  381. BIT(POWER_DOMAIN_INIT))
  382. #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  383. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  384. BIT(POWER_DOMAIN_PIPE_B) | \
  385. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  386. BIT(POWER_DOMAIN_PIPE_C) | \
  387. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  388. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  389. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  390. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  391. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  392. BIT(POWER_DOMAIN_AUX_B) | \
  393. BIT(POWER_DOMAIN_AUX_C) | \
  394. BIT(POWER_DOMAIN_AUDIO) | \
  395. BIT(POWER_DOMAIN_VGA) | \
  396. BIT(POWER_DOMAIN_GMBUS) | \
  397. BIT(POWER_DOMAIN_INIT))
  398. #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  399. BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  400. BIT(POWER_DOMAIN_MODESET) | \
  401. BIT(POWER_DOMAIN_AUX_A) | \
  402. BIT(POWER_DOMAIN_INIT))
  403. #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
  404. BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  405. BIT(POWER_DOMAIN_AUX_A) | \
  406. BIT(POWER_DOMAIN_INIT))
  407. #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
  408. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  409. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  410. BIT(POWER_DOMAIN_AUX_B) | \
  411. BIT(POWER_DOMAIN_AUX_C) | \
  412. BIT(POWER_DOMAIN_INIT))
  413. static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
  414. {
  415. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
  416. "DC9 already programmed to be enabled.\n");
  417. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  418. "DC5 still not disabled to enable DC9.\n");
  419. WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
  420. WARN_ONCE(intel_irqs_enabled(dev_priv),
  421. "Interrupts not disabled yet.\n");
  422. /*
  423. * TODO: check for the following to verify the conditions to enter DC9
  424. * state are satisfied:
  425. * 1] Check relevant display engine registers to verify if mode set
  426. * disable sequence was followed.
  427. * 2] Check if display uninitialize sequence is initialized.
  428. */
  429. }
  430. static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
  431. {
  432. WARN_ONCE(intel_irqs_enabled(dev_priv),
  433. "Interrupts not disabled yet.\n");
  434. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  435. "DC5 still not disabled.\n");
  436. /*
  437. * TODO: check for the following to verify DC9 state was indeed
  438. * entered before programming to disable it:
  439. * 1] Check relevant display engine registers to verify if mode
  440. * set disable sequence was followed.
  441. * 2] Check if display uninitialize sequence is initialized.
  442. */
  443. }
  444. static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
  445. u32 state)
  446. {
  447. int rewrites = 0;
  448. int rereads = 0;
  449. u32 v;
  450. I915_WRITE(DC_STATE_EN, state);
  451. /* It has been observed that disabling the dc6 state sometimes
  452. * doesn't stick and dmc keeps returning old value. Make sure
  453. * the write really sticks enough times and also force rewrite until
  454. * we are confident that state is exactly what we want.
  455. */
  456. do {
  457. v = I915_READ(DC_STATE_EN);
  458. if (v != state) {
  459. I915_WRITE(DC_STATE_EN, state);
  460. rewrites++;
  461. rereads = 0;
  462. } else if (rereads++ > 5) {
  463. break;
  464. }
  465. } while (rewrites < 100);
  466. if (v != state)
  467. DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
  468. state, v);
  469. /* Most of the times we need one retry, avoid spam */
  470. if (rewrites > 1)
  471. DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
  472. state, rewrites);
  473. }
  474. static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
  475. {
  476. u32 mask;
  477. mask = DC_STATE_EN_UPTO_DC5;
  478. if (IS_BROXTON(dev_priv))
  479. mask |= DC_STATE_EN_DC9;
  480. else
  481. mask |= DC_STATE_EN_UPTO_DC6;
  482. return mask;
  483. }
  484. void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
  485. {
  486. u32 val;
  487. val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
  488. DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
  489. dev_priv->csr.dc_state, val);
  490. dev_priv->csr.dc_state = val;
  491. }
  492. static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
  493. {
  494. uint32_t val;
  495. uint32_t mask;
  496. if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
  497. state &= dev_priv->csr.allowed_dc_mask;
  498. val = I915_READ(DC_STATE_EN);
  499. mask = gen9_dc_mask(dev_priv);
  500. DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
  501. val & mask, state);
  502. /* Check if DMC is ignoring our DC state requests */
  503. if ((val & mask) != dev_priv->csr.dc_state)
  504. DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
  505. dev_priv->csr.dc_state, val & mask);
  506. val &= ~mask;
  507. val |= state;
  508. gen9_write_dc_state(dev_priv, val);
  509. dev_priv->csr.dc_state = val & mask;
  510. }
  511. void bxt_enable_dc9(struct drm_i915_private *dev_priv)
  512. {
  513. assert_can_enable_dc9(dev_priv);
  514. DRM_DEBUG_KMS("Enabling DC9\n");
  515. intel_power_sequencer_reset(dev_priv);
  516. gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
  517. }
  518. void bxt_disable_dc9(struct drm_i915_private *dev_priv)
  519. {
  520. assert_can_disable_dc9(dev_priv);
  521. DRM_DEBUG_KMS("Disabling DC9\n");
  522. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  523. intel_pps_unlock_regs_wa(dev_priv);
  524. }
  525. static void assert_csr_loaded(struct drm_i915_private *dev_priv)
  526. {
  527. WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
  528. "CSR program storage start is NULL\n");
  529. WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
  530. WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
  531. }
  532. static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
  533. {
  534. bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
  535. SKL_DISP_PW_2);
  536. WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
  537. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
  538. "DC5 already programmed to be enabled.\n");
  539. assert_rpm_wakelock_held(dev_priv);
  540. assert_csr_loaded(dev_priv);
  541. }
  542. void gen9_enable_dc5(struct drm_i915_private *dev_priv)
  543. {
  544. assert_can_enable_dc5(dev_priv);
  545. DRM_DEBUG_KMS("Enabling DC5\n");
  546. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
  547. }
  548. static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
  549. {
  550. WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  551. "Backlight is not disabled.\n");
  552. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
  553. "DC6 already programmed to be enabled.\n");
  554. assert_csr_loaded(dev_priv);
  555. }
  556. void skl_enable_dc6(struct drm_i915_private *dev_priv)
  557. {
  558. assert_can_enable_dc6(dev_priv);
  559. DRM_DEBUG_KMS("Enabling DC6\n");
  560. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
  561. }
  562. void skl_disable_dc6(struct drm_i915_private *dev_priv)
  563. {
  564. DRM_DEBUG_KMS("Disabling DC6\n");
  565. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  566. }
  567. static void
  568. gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
  569. struct i915_power_well *power_well)
  570. {
  571. enum skl_disp_power_wells power_well_id = power_well->data;
  572. u32 val;
  573. u32 mask;
  574. mask = SKL_POWER_WELL_REQ(power_well_id);
  575. val = I915_READ(HSW_PWR_WELL_KVMR);
  576. if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
  577. power_well->name))
  578. I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
  579. val = I915_READ(HSW_PWR_WELL_BIOS);
  580. val |= I915_READ(HSW_PWR_WELL_DEBUG);
  581. if (!(val & mask))
  582. return;
  583. /*
  584. * DMC is known to force on the request bits for power well 1 on SKL
  585. * and BXT and the misc IO power well on SKL but we don't expect any
  586. * other request bits to be set, so WARN for those.
  587. */
  588. if (power_well_id == SKL_DISP_PW_1 ||
  589. ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
  590. power_well_id == SKL_DISP_PW_MISC_IO))
  591. DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
  592. "by DMC\n", power_well->name);
  593. else
  594. WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
  595. power_well->name);
  596. I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
  597. I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
  598. }
  599. static void skl_set_power_well(struct drm_i915_private *dev_priv,
  600. struct i915_power_well *power_well, bool enable)
  601. {
  602. uint32_t tmp, fuse_status;
  603. uint32_t req_mask, state_mask;
  604. bool is_enabled, enable_requested, check_fuse_status = false;
  605. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  606. fuse_status = I915_READ(SKL_FUSE_STATUS);
  607. switch (power_well->data) {
  608. case SKL_DISP_PW_1:
  609. if (intel_wait_for_register(dev_priv,
  610. SKL_FUSE_STATUS,
  611. SKL_FUSE_PG0_DIST_STATUS,
  612. SKL_FUSE_PG0_DIST_STATUS,
  613. 1)) {
  614. DRM_ERROR("PG0 not enabled\n");
  615. return;
  616. }
  617. break;
  618. case SKL_DISP_PW_2:
  619. if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
  620. DRM_ERROR("PG1 in disabled state\n");
  621. return;
  622. }
  623. break;
  624. case SKL_DISP_PW_DDI_A_E:
  625. case SKL_DISP_PW_DDI_B:
  626. case SKL_DISP_PW_DDI_C:
  627. case SKL_DISP_PW_DDI_D:
  628. case SKL_DISP_PW_MISC_IO:
  629. break;
  630. default:
  631. WARN(1, "Unknown power well %lu\n", power_well->data);
  632. return;
  633. }
  634. req_mask = SKL_POWER_WELL_REQ(power_well->data);
  635. enable_requested = tmp & req_mask;
  636. state_mask = SKL_POWER_WELL_STATE(power_well->data);
  637. is_enabled = tmp & state_mask;
  638. if (!enable && enable_requested)
  639. skl_power_well_pre_disable(dev_priv, power_well);
  640. if (enable) {
  641. if (!enable_requested) {
  642. WARN((tmp & state_mask) &&
  643. !I915_READ(HSW_PWR_WELL_BIOS),
  644. "Invalid for power well status to be enabled, unless done by the BIOS, \
  645. when request is to disable!\n");
  646. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
  647. }
  648. if (!is_enabled) {
  649. DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
  650. check_fuse_status = true;
  651. }
  652. } else {
  653. if (enable_requested) {
  654. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
  655. POSTING_READ(HSW_PWR_WELL_DRIVER);
  656. DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
  657. }
  658. if (IS_GEN9(dev_priv))
  659. gen9_sanitize_power_well_requests(dev_priv, power_well);
  660. }
  661. if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
  662. 1))
  663. DRM_ERROR("%s %s timeout\n",
  664. power_well->name, enable ? "enable" : "disable");
  665. if (check_fuse_status) {
  666. if (power_well->data == SKL_DISP_PW_1) {
  667. if (intel_wait_for_register(dev_priv,
  668. SKL_FUSE_STATUS,
  669. SKL_FUSE_PG1_DIST_STATUS,
  670. SKL_FUSE_PG1_DIST_STATUS,
  671. 1))
  672. DRM_ERROR("PG1 distributing status timeout\n");
  673. } else if (power_well->data == SKL_DISP_PW_2) {
  674. if (intel_wait_for_register(dev_priv,
  675. SKL_FUSE_STATUS,
  676. SKL_FUSE_PG2_DIST_STATUS,
  677. SKL_FUSE_PG2_DIST_STATUS,
  678. 1))
  679. DRM_ERROR("PG2 distributing status timeout\n");
  680. }
  681. }
  682. if (enable && !is_enabled)
  683. skl_power_well_post_enable(dev_priv, power_well);
  684. }
  685. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  686. struct i915_power_well *power_well)
  687. {
  688. hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  689. /*
  690. * We're taking over the BIOS, so clear any requests made by it since
  691. * the driver is in charge now.
  692. */
  693. if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
  694. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  695. }
  696. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  697. struct i915_power_well *power_well)
  698. {
  699. hsw_set_power_well(dev_priv, power_well, true);
  700. }
  701. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  702. struct i915_power_well *power_well)
  703. {
  704. hsw_set_power_well(dev_priv, power_well, false);
  705. }
  706. static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
  707. struct i915_power_well *power_well)
  708. {
  709. uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
  710. SKL_POWER_WELL_STATE(power_well->data);
  711. return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
  712. }
  713. static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
  714. struct i915_power_well *power_well)
  715. {
  716. skl_set_power_well(dev_priv, power_well, power_well->count > 0);
  717. /* Clear any request made by BIOS as driver is taking over */
  718. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  719. }
  720. static void skl_power_well_enable(struct drm_i915_private *dev_priv,
  721. struct i915_power_well *power_well)
  722. {
  723. skl_set_power_well(dev_priv, power_well, true);
  724. }
  725. static void skl_power_well_disable(struct drm_i915_private *dev_priv,
  726. struct i915_power_well *power_well)
  727. {
  728. skl_set_power_well(dev_priv, power_well, false);
  729. }
  730. static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
  731. {
  732. enum skl_disp_power_wells power_well_id = power_well->data;
  733. return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
  734. }
  735. static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  736. struct i915_power_well *power_well)
  737. {
  738. enum skl_disp_power_wells power_well_id = power_well->data;
  739. struct i915_power_well *cmn_a_well = NULL;
  740. if (power_well_id == BXT_DPIO_CMN_BC) {
  741. /*
  742. * We need to copy the GRC calibration value from the eDP PHY,
  743. * so make sure it's powered up.
  744. */
  745. cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
  746. intel_power_well_get(dev_priv, cmn_a_well);
  747. }
  748. bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
  749. if (cmn_a_well)
  750. intel_power_well_put(dev_priv, cmn_a_well);
  751. }
  752. static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  753. struct i915_power_well *power_well)
  754. {
  755. bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
  756. }
  757. static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
  758. struct i915_power_well *power_well)
  759. {
  760. return bxt_ddi_phy_is_enabled(dev_priv,
  761. bxt_power_well_to_phy(power_well));
  762. }
  763. static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
  764. struct i915_power_well *power_well)
  765. {
  766. if (power_well->count > 0)
  767. bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
  768. else
  769. bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
  770. }
  771. static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
  772. {
  773. struct i915_power_well *power_well;
  774. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
  775. if (power_well->count > 0)
  776. bxt_ddi_phy_verify_state(dev_priv,
  777. bxt_power_well_to_phy(power_well));
  778. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
  779. if (power_well->count > 0)
  780. bxt_ddi_phy_verify_state(dev_priv,
  781. bxt_power_well_to_phy(power_well));
  782. }
  783. static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
  784. struct i915_power_well *power_well)
  785. {
  786. return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
  787. }
  788. static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
  789. {
  790. u32 tmp = I915_READ(DBUF_CTL);
  791. WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
  792. (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
  793. "Unexpected DBuf power power state (0x%08x)\n", tmp);
  794. }
  795. static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
  796. struct i915_power_well *power_well)
  797. {
  798. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  799. WARN_ON(dev_priv->cdclk_freq !=
  800. dev_priv->display.get_display_clock_speed(&dev_priv->drm));
  801. gen9_assert_dbuf_enabled(dev_priv);
  802. if (IS_BROXTON(dev_priv))
  803. bxt_verify_ddi_phy_power_wells(dev_priv);
  804. }
  805. static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
  806. struct i915_power_well *power_well)
  807. {
  808. if (!dev_priv->csr.dmc_payload)
  809. return;
  810. if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
  811. skl_enable_dc6(dev_priv);
  812. else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
  813. gen9_enable_dc5(dev_priv);
  814. }
  815. static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
  816. struct i915_power_well *power_well)
  817. {
  818. if (power_well->count > 0)
  819. gen9_dc_off_power_well_enable(dev_priv, power_well);
  820. else
  821. gen9_dc_off_power_well_disable(dev_priv, power_well);
  822. }
  823. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  824. struct i915_power_well *power_well)
  825. {
  826. }
  827. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  828. struct i915_power_well *power_well)
  829. {
  830. return true;
  831. }
  832. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  833. struct i915_power_well *power_well, bool enable)
  834. {
  835. enum punit_power_well power_well_id = power_well->data;
  836. u32 mask;
  837. u32 state;
  838. u32 ctrl;
  839. mask = PUNIT_PWRGT_MASK(power_well_id);
  840. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  841. PUNIT_PWRGT_PWR_GATE(power_well_id);
  842. mutex_lock(&dev_priv->rps.hw_lock);
  843. #define COND \
  844. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  845. if (COND)
  846. goto out;
  847. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  848. ctrl &= ~mask;
  849. ctrl |= state;
  850. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  851. if (wait_for(COND, 100))
  852. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  853. state,
  854. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  855. #undef COND
  856. out:
  857. mutex_unlock(&dev_priv->rps.hw_lock);
  858. }
  859. static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
  860. struct i915_power_well *power_well)
  861. {
  862. vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
  863. }
  864. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  865. struct i915_power_well *power_well)
  866. {
  867. vlv_set_power_well(dev_priv, power_well, true);
  868. }
  869. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  870. struct i915_power_well *power_well)
  871. {
  872. vlv_set_power_well(dev_priv, power_well, false);
  873. }
  874. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  875. struct i915_power_well *power_well)
  876. {
  877. int power_well_id = power_well->data;
  878. bool enabled = false;
  879. u32 mask;
  880. u32 state;
  881. u32 ctrl;
  882. mask = PUNIT_PWRGT_MASK(power_well_id);
  883. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  884. mutex_lock(&dev_priv->rps.hw_lock);
  885. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  886. /*
  887. * We only ever set the power-on and power-gate states, anything
  888. * else is unexpected.
  889. */
  890. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  891. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  892. if (state == ctrl)
  893. enabled = true;
  894. /*
  895. * A transient state at this point would mean some unexpected party
  896. * is poking at the power controls too.
  897. */
  898. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  899. WARN_ON(ctrl != state);
  900. mutex_unlock(&dev_priv->rps.hw_lock);
  901. return enabled;
  902. }
  903. static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
  904. {
  905. u32 val;
  906. /*
  907. * On driver load, a pipe may be active and driving a DSI display.
  908. * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
  909. * (and never recovering) in this case. intel_dsi_post_disable() will
  910. * clear it when we turn off the display.
  911. */
  912. val = I915_READ(DSPCLK_GATE_D);
  913. val &= DPOUNIT_CLOCK_GATE_DISABLE;
  914. val |= VRHUNIT_CLOCK_GATE_DISABLE;
  915. I915_WRITE(DSPCLK_GATE_D, val);
  916. /*
  917. * Disable trickle feed and enable pnd deadline calculation
  918. */
  919. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  920. I915_WRITE(CBR1_VLV, 0);
  921. WARN_ON(dev_priv->rawclk_freq == 0);
  922. I915_WRITE(RAWCLK_FREQ_VLV,
  923. DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
  924. }
  925. static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
  926. {
  927. struct intel_encoder *encoder;
  928. enum pipe pipe;
  929. /*
  930. * Enable the CRI clock source so we can get at the
  931. * display and the reference clock for VGA
  932. * hotplug / manual detection. Supposedly DSI also
  933. * needs the ref clock up and running.
  934. *
  935. * CHV DPLL B/C have some issues if VGA mode is enabled.
  936. */
  937. for_each_pipe(&dev_priv->drm, pipe) {
  938. u32 val = I915_READ(DPLL(pipe));
  939. val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  940. if (pipe != PIPE_A)
  941. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  942. I915_WRITE(DPLL(pipe), val);
  943. }
  944. vlv_init_display_clock_gating(dev_priv);
  945. spin_lock_irq(&dev_priv->irq_lock);
  946. valleyview_enable_display_irqs(dev_priv);
  947. spin_unlock_irq(&dev_priv->irq_lock);
  948. /*
  949. * During driver initialization/resume we can avoid restoring the
  950. * part of the HW/SW state that will be inited anyway explicitly.
  951. */
  952. if (dev_priv->power_domains.initializing)
  953. return;
  954. intel_hpd_init(dev_priv);
  955. /* Re-enable the ADPA, if we have one */
  956. for_each_intel_encoder(&dev_priv->drm, encoder) {
  957. if (encoder->type == INTEL_OUTPUT_ANALOG)
  958. intel_crt_reset(&encoder->base);
  959. }
  960. i915_redisable_vga_power_on(&dev_priv->drm);
  961. intel_pps_unlock_regs_wa(dev_priv);
  962. }
  963. static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
  964. {
  965. spin_lock_irq(&dev_priv->irq_lock);
  966. valleyview_disable_display_irqs(dev_priv);
  967. spin_unlock_irq(&dev_priv->irq_lock);
  968. /* make sure we're done processing display irqs */
  969. synchronize_irq(dev_priv->drm.irq);
  970. intel_power_sequencer_reset(dev_priv);
  971. /* Prevent us from re-enabling polling on accident in late suspend */
  972. if (!dev_priv->drm.dev->power.is_suspended)
  973. intel_hpd_poll_init(dev_priv);
  974. }
  975. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  976. struct i915_power_well *power_well)
  977. {
  978. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  979. vlv_set_power_well(dev_priv, power_well, true);
  980. vlv_display_power_well_init(dev_priv);
  981. }
  982. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  983. struct i915_power_well *power_well)
  984. {
  985. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  986. vlv_display_power_well_deinit(dev_priv);
  987. vlv_set_power_well(dev_priv, power_well, false);
  988. }
  989. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  990. struct i915_power_well *power_well)
  991. {
  992. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  993. /* since ref/cri clock was enabled */
  994. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  995. vlv_set_power_well(dev_priv, power_well, true);
  996. /*
  997. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  998. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  999. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  1000. * b. The other bits such as sfr settings / modesel may all
  1001. * be set to 0.
  1002. *
  1003. * This should only be done on init and resume from S3 with
  1004. * both PLLs disabled, or we risk losing DPIO and PLL
  1005. * synchronization.
  1006. */
  1007. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  1008. }
  1009. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  1010. struct i915_power_well *power_well)
  1011. {
  1012. enum pipe pipe;
  1013. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  1014. for_each_pipe(dev_priv, pipe)
  1015. assert_pll_disabled(dev_priv, pipe);
  1016. /* Assert common reset */
  1017. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  1018. vlv_set_power_well(dev_priv, power_well, false);
  1019. }
  1020. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  1021. static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
  1022. int power_well_id)
  1023. {
  1024. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1025. int i;
  1026. for (i = 0; i < power_domains->power_well_count; i++) {
  1027. struct i915_power_well *power_well;
  1028. power_well = &power_domains->power_wells[i];
  1029. if (power_well->data == power_well_id)
  1030. return power_well;
  1031. }
  1032. return NULL;
  1033. }
  1034. #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
  1035. static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  1036. {
  1037. struct i915_power_well *cmn_bc =
  1038. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  1039. struct i915_power_well *cmn_d =
  1040. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  1041. u32 phy_control = dev_priv->chv_phy_control;
  1042. u32 phy_status = 0;
  1043. u32 phy_status_mask = 0xffffffff;
  1044. /*
  1045. * The BIOS can leave the PHY is some weird state
  1046. * where it doesn't fully power down some parts.
  1047. * Disable the asserts until the PHY has been fully
  1048. * reset (ie. the power well has been disabled at
  1049. * least once).
  1050. */
  1051. if (!dev_priv->chv_phy_assert[DPIO_PHY0])
  1052. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
  1053. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
  1054. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
  1055. PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
  1056. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
  1057. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
  1058. if (!dev_priv->chv_phy_assert[DPIO_PHY1])
  1059. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
  1060. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
  1061. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
  1062. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  1063. phy_status |= PHY_POWERGOOD(DPIO_PHY0);
  1064. /* this assumes override is only used to enable lanes */
  1065. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
  1066. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
  1067. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
  1068. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
  1069. /* CL1 is on whenever anything is on in either channel */
  1070. if (BITS_SET(phy_control,
  1071. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
  1072. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
  1073. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
  1074. /*
  1075. * The DPLLB check accounts for the pipe B + port A usage
  1076. * with CL2 powered up but all the lanes in the second channel
  1077. * powered down.
  1078. */
  1079. if (BITS_SET(phy_control,
  1080. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
  1081. (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
  1082. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
  1083. if (BITS_SET(phy_control,
  1084. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
  1085. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
  1086. if (BITS_SET(phy_control,
  1087. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
  1088. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
  1089. if (BITS_SET(phy_control,
  1090. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
  1091. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
  1092. if (BITS_SET(phy_control,
  1093. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
  1094. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
  1095. }
  1096. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  1097. phy_status |= PHY_POWERGOOD(DPIO_PHY1);
  1098. /* this assumes override is only used to enable lanes */
  1099. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
  1100. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
  1101. if (BITS_SET(phy_control,
  1102. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
  1103. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
  1104. if (BITS_SET(phy_control,
  1105. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
  1106. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
  1107. if (BITS_SET(phy_control,
  1108. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
  1109. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
  1110. }
  1111. phy_status &= phy_status_mask;
  1112. /*
  1113. * The PHY may be busy with some initial calibration and whatnot,
  1114. * so the power state can take a while to actually change.
  1115. */
  1116. if (intel_wait_for_register(dev_priv,
  1117. DISPLAY_PHY_STATUS,
  1118. phy_status_mask,
  1119. phy_status,
  1120. 10))
  1121. DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
  1122. I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
  1123. phy_status, dev_priv->chv_phy_control);
  1124. }
  1125. #undef BITS_SET
  1126. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  1127. struct i915_power_well *power_well)
  1128. {
  1129. enum dpio_phy phy;
  1130. enum pipe pipe;
  1131. uint32_t tmp;
  1132. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  1133. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  1134. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1135. pipe = PIPE_A;
  1136. phy = DPIO_PHY0;
  1137. } else {
  1138. pipe = PIPE_C;
  1139. phy = DPIO_PHY1;
  1140. }
  1141. /* since ref/cri clock was enabled */
  1142. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  1143. vlv_set_power_well(dev_priv, power_well, true);
  1144. /* Poll for phypwrgood signal */
  1145. if (intel_wait_for_register(dev_priv,
  1146. DISPLAY_PHY_STATUS,
  1147. PHY_POWERGOOD(phy),
  1148. PHY_POWERGOOD(phy),
  1149. 1))
  1150. DRM_ERROR("Display PHY %d is not power up\n", phy);
  1151. mutex_lock(&dev_priv->sb_lock);
  1152. /* Enable dynamic power down */
  1153. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
  1154. tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
  1155. DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
  1156. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
  1157. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1158. tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
  1159. tmp |= DPIO_DYNPWRDOWNEN_CH1;
  1160. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
  1161. } else {
  1162. /*
  1163. * Force the non-existing CL2 off. BXT does this
  1164. * too, so maybe it saves some power even though
  1165. * CL2 doesn't exist?
  1166. */
  1167. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
  1168. tmp |= DPIO_CL2_LDOFUSE_PWRENB;
  1169. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
  1170. }
  1171. mutex_unlock(&dev_priv->sb_lock);
  1172. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
  1173. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1174. DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1175. phy, dev_priv->chv_phy_control);
  1176. assert_chv_phy_status(dev_priv);
  1177. }
  1178. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  1179. struct i915_power_well *power_well)
  1180. {
  1181. enum dpio_phy phy;
  1182. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  1183. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  1184. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1185. phy = DPIO_PHY0;
  1186. assert_pll_disabled(dev_priv, PIPE_A);
  1187. assert_pll_disabled(dev_priv, PIPE_B);
  1188. } else {
  1189. phy = DPIO_PHY1;
  1190. assert_pll_disabled(dev_priv, PIPE_C);
  1191. }
  1192. dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
  1193. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1194. vlv_set_power_well(dev_priv, power_well, false);
  1195. DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1196. phy, dev_priv->chv_phy_control);
  1197. /* PHY is fully reset now, so we can enable the PHY state asserts */
  1198. dev_priv->chv_phy_assert[phy] = true;
  1199. assert_chv_phy_status(dev_priv);
  1200. }
  1201. static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1202. enum dpio_channel ch, bool override, unsigned int mask)
  1203. {
  1204. enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
  1205. u32 reg, val, expected, actual;
  1206. /*
  1207. * The BIOS can leave the PHY is some weird state
  1208. * where it doesn't fully power down some parts.
  1209. * Disable the asserts until the PHY has been fully
  1210. * reset (ie. the power well has been disabled at
  1211. * least once).
  1212. */
  1213. if (!dev_priv->chv_phy_assert[phy])
  1214. return;
  1215. if (ch == DPIO_CH0)
  1216. reg = _CHV_CMN_DW0_CH0;
  1217. else
  1218. reg = _CHV_CMN_DW6_CH1;
  1219. mutex_lock(&dev_priv->sb_lock);
  1220. val = vlv_dpio_read(dev_priv, pipe, reg);
  1221. mutex_unlock(&dev_priv->sb_lock);
  1222. /*
  1223. * This assumes !override is only used when the port is disabled.
  1224. * All lanes should power down even without the override when
  1225. * the port is disabled.
  1226. */
  1227. if (!override || mask == 0xf) {
  1228. expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1229. /*
  1230. * If CH1 common lane is not active anymore
  1231. * (eg. for pipe B DPLL) the entire channel will
  1232. * shut down, which causes the common lane registers
  1233. * to read as 0. That means we can't actually check
  1234. * the lane power down status bits, but as the entire
  1235. * register reads as 0 it's a good indication that the
  1236. * channel is indeed entirely powered down.
  1237. */
  1238. if (ch == DPIO_CH1 && val == 0)
  1239. expected = 0;
  1240. } else if (mask != 0x0) {
  1241. expected = DPIO_ANYDL_POWERDOWN;
  1242. } else {
  1243. expected = 0;
  1244. }
  1245. if (ch == DPIO_CH0)
  1246. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
  1247. else
  1248. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
  1249. actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1250. WARN(actual != expected,
  1251. "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
  1252. !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
  1253. !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
  1254. reg, val);
  1255. }
  1256. bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1257. enum dpio_channel ch, bool override)
  1258. {
  1259. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1260. bool was_override;
  1261. mutex_lock(&power_domains->lock);
  1262. was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1263. if (override == was_override)
  1264. goto out;
  1265. if (override)
  1266. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1267. else
  1268. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1269. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1270. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
  1271. phy, ch, dev_priv->chv_phy_control);
  1272. assert_chv_phy_status(dev_priv);
  1273. out:
  1274. mutex_unlock(&power_domains->lock);
  1275. return was_override;
  1276. }
  1277. void chv_phy_powergate_lanes(struct intel_encoder *encoder,
  1278. bool override, unsigned int mask)
  1279. {
  1280. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  1281. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1282. enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
  1283. enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
  1284. mutex_lock(&power_domains->lock);
  1285. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
  1286. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
  1287. if (override)
  1288. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1289. else
  1290. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1291. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1292. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
  1293. phy, ch, mask, dev_priv->chv_phy_control);
  1294. assert_chv_phy_status(dev_priv);
  1295. assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
  1296. mutex_unlock(&power_domains->lock);
  1297. }
  1298. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  1299. struct i915_power_well *power_well)
  1300. {
  1301. enum pipe pipe = power_well->data;
  1302. bool enabled;
  1303. u32 state, ctrl;
  1304. mutex_lock(&dev_priv->rps.hw_lock);
  1305. state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  1306. /*
  1307. * We only ever set the power-on and power-gate states, anything
  1308. * else is unexpected.
  1309. */
  1310. WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  1311. enabled = state == DP_SSS_PWR_ON(pipe);
  1312. /*
  1313. * A transient state at this point would mean some unexpected party
  1314. * is poking at the power controls too.
  1315. */
  1316. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  1317. WARN_ON(ctrl << 16 != state);
  1318. mutex_unlock(&dev_priv->rps.hw_lock);
  1319. return enabled;
  1320. }
  1321. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  1322. struct i915_power_well *power_well,
  1323. bool enable)
  1324. {
  1325. enum pipe pipe = power_well->data;
  1326. u32 state;
  1327. u32 ctrl;
  1328. state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  1329. mutex_lock(&dev_priv->rps.hw_lock);
  1330. #define COND \
  1331. ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  1332. if (COND)
  1333. goto out;
  1334. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  1335. ctrl &= ~DP_SSC_MASK(pipe);
  1336. ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  1337. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  1338. if (wait_for(COND, 100))
  1339. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  1340. state,
  1341. vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  1342. #undef COND
  1343. out:
  1344. mutex_unlock(&dev_priv->rps.hw_lock);
  1345. }
  1346. static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
  1347. struct i915_power_well *power_well)
  1348. {
  1349. WARN_ON_ONCE(power_well->data != PIPE_A);
  1350. chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
  1351. }
  1352. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  1353. struct i915_power_well *power_well)
  1354. {
  1355. WARN_ON_ONCE(power_well->data != PIPE_A);
  1356. chv_set_pipe_power_well(dev_priv, power_well, true);
  1357. vlv_display_power_well_init(dev_priv);
  1358. }
  1359. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  1360. struct i915_power_well *power_well)
  1361. {
  1362. WARN_ON_ONCE(power_well->data != PIPE_A);
  1363. vlv_display_power_well_deinit(dev_priv);
  1364. chv_set_pipe_power_well(dev_priv, power_well, false);
  1365. }
  1366. static void
  1367. __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
  1368. enum intel_display_power_domain domain)
  1369. {
  1370. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1371. struct i915_power_well *power_well;
  1372. int i;
  1373. for_each_power_well(i, power_well, BIT(domain), power_domains)
  1374. intel_power_well_get(dev_priv, power_well);
  1375. power_domains->domain_use_count[domain]++;
  1376. }
  1377. /**
  1378. * intel_display_power_get - grab a power domain reference
  1379. * @dev_priv: i915 device instance
  1380. * @domain: power domain to reference
  1381. *
  1382. * This function grabs a power domain reference for @domain and ensures that the
  1383. * power domain and all its parents are powered up. Therefore users should only
  1384. * grab a reference to the innermost power domain they need.
  1385. *
  1386. * Any power domain reference obtained by this function must have a symmetric
  1387. * call to intel_display_power_put() to release the reference again.
  1388. */
  1389. void intel_display_power_get(struct drm_i915_private *dev_priv,
  1390. enum intel_display_power_domain domain)
  1391. {
  1392. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1393. intel_runtime_pm_get(dev_priv);
  1394. mutex_lock(&power_domains->lock);
  1395. __intel_display_power_get_domain(dev_priv, domain);
  1396. mutex_unlock(&power_domains->lock);
  1397. }
  1398. /**
  1399. * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
  1400. * @dev_priv: i915 device instance
  1401. * @domain: power domain to reference
  1402. *
  1403. * This function grabs a power domain reference for @domain and ensures that the
  1404. * power domain and all its parents are powered up. Therefore users should only
  1405. * grab a reference to the innermost power domain they need.
  1406. *
  1407. * Any power domain reference obtained by this function must have a symmetric
  1408. * call to intel_display_power_put() to release the reference again.
  1409. */
  1410. bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
  1411. enum intel_display_power_domain domain)
  1412. {
  1413. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1414. bool is_enabled;
  1415. if (!intel_runtime_pm_get_if_in_use(dev_priv))
  1416. return false;
  1417. mutex_lock(&power_domains->lock);
  1418. if (__intel_display_power_is_enabled(dev_priv, domain)) {
  1419. __intel_display_power_get_domain(dev_priv, domain);
  1420. is_enabled = true;
  1421. } else {
  1422. is_enabled = false;
  1423. }
  1424. mutex_unlock(&power_domains->lock);
  1425. if (!is_enabled)
  1426. intel_runtime_pm_put(dev_priv);
  1427. return is_enabled;
  1428. }
  1429. /**
  1430. * intel_display_power_put - release a power domain reference
  1431. * @dev_priv: i915 device instance
  1432. * @domain: power domain to reference
  1433. *
  1434. * This function drops the power domain reference obtained by
  1435. * intel_display_power_get() and might power down the corresponding hardware
  1436. * block right away if this is the last reference.
  1437. */
  1438. void intel_display_power_put(struct drm_i915_private *dev_priv,
  1439. enum intel_display_power_domain domain)
  1440. {
  1441. struct i915_power_domains *power_domains;
  1442. struct i915_power_well *power_well;
  1443. int i;
  1444. power_domains = &dev_priv->power_domains;
  1445. mutex_lock(&power_domains->lock);
  1446. WARN(!power_domains->domain_use_count[domain],
  1447. "Use count on domain %s is already zero\n",
  1448. intel_display_power_domain_str(domain));
  1449. power_domains->domain_use_count[domain]--;
  1450. for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
  1451. intel_power_well_put(dev_priv, power_well);
  1452. mutex_unlock(&power_domains->lock);
  1453. intel_runtime_pm_put(dev_priv);
  1454. }
  1455. #define HSW_DISPLAY_POWER_DOMAINS ( \
  1456. BIT(POWER_DOMAIN_PIPE_B) | \
  1457. BIT(POWER_DOMAIN_PIPE_C) | \
  1458. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1459. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1460. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1461. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1462. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1463. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  1464. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1465. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1466. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1467. BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1468. BIT(POWER_DOMAIN_VGA) | \
  1469. BIT(POWER_DOMAIN_AUDIO) | \
  1470. BIT(POWER_DOMAIN_INIT))
  1471. #define BDW_DISPLAY_POWER_DOMAINS ( \
  1472. BIT(POWER_DOMAIN_PIPE_B) | \
  1473. BIT(POWER_DOMAIN_PIPE_C) | \
  1474. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1475. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1476. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1477. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1478. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  1479. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1480. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1481. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1482. BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1483. BIT(POWER_DOMAIN_VGA) | \
  1484. BIT(POWER_DOMAIN_AUDIO) | \
  1485. BIT(POWER_DOMAIN_INIT))
  1486. #define VLV_DISPLAY_POWER_DOMAINS ( \
  1487. BIT(POWER_DOMAIN_PIPE_A) | \
  1488. BIT(POWER_DOMAIN_PIPE_B) | \
  1489. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1490. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1491. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1492. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1493. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1494. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1495. BIT(POWER_DOMAIN_PORT_DSI) | \
  1496. BIT(POWER_DOMAIN_PORT_CRT) | \
  1497. BIT(POWER_DOMAIN_VGA) | \
  1498. BIT(POWER_DOMAIN_AUDIO) | \
  1499. BIT(POWER_DOMAIN_AUX_B) | \
  1500. BIT(POWER_DOMAIN_AUX_C) | \
  1501. BIT(POWER_DOMAIN_GMBUS) | \
  1502. BIT(POWER_DOMAIN_INIT))
  1503. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1504. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1505. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1506. BIT(POWER_DOMAIN_PORT_CRT) | \
  1507. BIT(POWER_DOMAIN_AUX_B) | \
  1508. BIT(POWER_DOMAIN_AUX_C) | \
  1509. BIT(POWER_DOMAIN_INIT))
  1510. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  1511. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1512. BIT(POWER_DOMAIN_AUX_B) | \
  1513. BIT(POWER_DOMAIN_INIT))
  1514. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  1515. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1516. BIT(POWER_DOMAIN_AUX_B) | \
  1517. BIT(POWER_DOMAIN_INIT))
  1518. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  1519. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1520. BIT(POWER_DOMAIN_AUX_C) | \
  1521. BIT(POWER_DOMAIN_INIT))
  1522. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  1523. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1524. BIT(POWER_DOMAIN_AUX_C) | \
  1525. BIT(POWER_DOMAIN_INIT))
  1526. #define CHV_DISPLAY_POWER_DOMAINS ( \
  1527. BIT(POWER_DOMAIN_PIPE_A) | \
  1528. BIT(POWER_DOMAIN_PIPE_B) | \
  1529. BIT(POWER_DOMAIN_PIPE_C) | \
  1530. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1531. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1532. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1533. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1534. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1535. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  1536. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1537. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1538. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1539. BIT(POWER_DOMAIN_PORT_DSI) | \
  1540. BIT(POWER_DOMAIN_VGA) | \
  1541. BIT(POWER_DOMAIN_AUDIO) | \
  1542. BIT(POWER_DOMAIN_AUX_B) | \
  1543. BIT(POWER_DOMAIN_AUX_C) | \
  1544. BIT(POWER_DOMAIN_AUX_D) | \
  1545. BIT(POWER_DOMAIN_GMBUS) | \
  1546. BIT(POWER_DOMAIN_INIT))
  1547. #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1548. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1549. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1550. BIT(POWER_DOMAIN_AUX_B) | \
  1551. BIT(POWER_DOMAIN_AUX_C) | \
  1552. BIT(POWER_DOMAIN_INIT))
  1553. #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
  1554. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1555. BIT(POWER_DOMAIN_AUX_D) | \
  1556. BIT(POWER_DOMAIN_INIT))
  1557. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  1558. .sync_hw = i9xx_always_on_power_well_noop,
  1559. .enable = i9xx_always_on_power_well_noop,
  1560. .disable = i9xx_always_on_power_well_noop,
  1561. .is_enabled = i9xx_always_on_power_well_enabled,
  1562. };
  1563. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  1564. .sync_hw = chv_pipe_power_well_sync_hw,
  1565. .enable = chv_pipe_power_well_enable,
  1566. .disable = chv_pipe_power_well_disable,
  1567. .is_enabled = chv_pipe_power_well_enabled,
  1568. };
  1569. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  1570. .sync_hw = vlv_power_well_sync_hw,
  1571. .enable = chv_dpio_cmn_power_well_enable,
  1572. .disable = chv_dpio_cmn_power_well_disable,
  1573. .is_enabled = vlv_power_well_enabled,
  1574. };
  1575. static struct i915_power_well i9xx_always_on_power_well[] = {
  1576. {
  1577. .name = "always-on",
  1578. .always_on = 1,
  1579. .domains = POWER_DOMAIN_MASK,
  1580. .ops = &i9xx_always_on_power_well_ops,
  1581. },
  1582. };
  1583. static const struct i915_power_well_ops hsw_power_well_ops = {
  1584. .sync_hw = hsw_power_well_sync_hw,
  1585. .enable = hsw_power_well_enable,
  1586. .disable = hsw_power_well_disable,
  1587. .is_enabled = hsw_power_well_enabled,
  1588. };
  1589. static const struct i915_power_well_ops skl_power_well_ops = {
  1590. .sync_hw = skl_power_well_sync_hw,
  1591. .enable = skl_power_well_enable,
  1592. .disable = skl_power_well_disable,
  1593. .is_enabled = skl_power_well_enabled,
  1594. };
  1595. static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
  1596. .sync_hw = gen9_dc_off_power_well_sync_hw,
  1597. .enable = gen9_dc_off_power_well_enable,
  1598. .disable = gen9_dc_off_power_well_disable,
  1599. .is_enabled = gen9_dc_off_power_well_enabled,
  1600. };
  1601. static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
  1602. .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
  1603. .enable = bxt_dpio_cmn_power_well_enable,
  1604. .disable = bxt_dpio_cmn_power_well_disable,
  1605. .is_enabled = bxt_dpio_cmn_power_well_enabled,
  1606. };
  1607. static struct i915_power_well hsw_power_wells[] = {
  1608. {
  1609. .name = "always-on",
  1610. .always_on = 1,
  1611. .domains = POWER_DOMAIN_MASK,
  1612. .ops = &i9xx_always_on_power_well_ops,
  1613. },
  1614. {
  1615. .name = "display",
  1616. .domains = HSW_DISPLAY_POWER_DOMAINS,
  1617. .ops = &hsw_power_well_ops,
  1618. },
  1619. };
  1620. static struct i915_power_well bdw_power_wells[] = {
  1621. {
  1622. .name = "always-on",
  1623. .always_on = 1,
  1624. .domains = POWER_DOMAIN_MASK,
  1625. .ops = &i9xx_always_on_power_well_ops,
  1626. },
  1627. {
  1628. .name = "display",
  1629. .domains = BDW_DISPLAY_POWER_DOMAINS,
  1630. .ops = &hsw_power_well_ops,
  1631. },
  1632. };
  1633. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  1634. .sync_hw = vlv_power_well_sync_hw,
  1635. .enable = vlv_display_power_well_enable,
  1636. .disable = vlv_display_power_well_disable,
  1637. .is_enabled = vlv_power_well_enabled,
  1638. };
  1639. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  1640. .sync_hw = vlv_power_well_sync_hw,
  1641. .enable = vlv_dpio_cmn_power_well_enable,
  1642. .disable = vlv_dpio_cmn_power_well_disable,
  1643. .is_enabled = vlv_power_well_enabled,
  1644. };
  1645. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  1646. .sync_hw = vlv_power_well_sync_hw,
  1647. .enable = vlv_power_well_enable,
  1648. .disable = vlv_power_well_disable,
  1649. .is_enabled = vlv_power_well_enabled,
  1650. };
  1651. static struct i915_power_well vlv_power_wells[] = {
  1652. {
  1653. .name = "always-on",
  1654. .always_on = 1,
  1655. .domains = POWER_DOMAIN_MASK,
  1656. .ops = &i9xx_always_on_power_well_ops,
  1657. .data = PUNIT_POWER_WELL_ALWAYS_ON,
  1658. },
  1659. {
  1660. .name = "display",
  1661. .domains = VLV_DISPLAY_POWER_DOMAINS,
  1662. .data = PUNIT_POWER_WELL_DISP2D,
  1663. .ops = &vlv_display_power_well_ops,
  1664. },
  1665. {
  1666. .name = "dpio-tx-b-01",
  1667. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1668. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1669. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1670. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1671. .ops = &vlv_dpio_power_well_ops,
  1672. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1673. },
  1674. {
  1675. .name = "dpio-tx-b-23",
  1676. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1677. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1678. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1679. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1680. .ops = &vlv_dpio_power_well_ops,
  1681. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1682. },
  1683. {
  1684. .name = "dpio-tx-c-01",
  1685. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1686. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1687. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1688. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1689. .ops = &vlv_dpio_power_well_ops,
  1690. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1691. },
  1692. {
  1693. .name = "dpio-tx-c-23",
  1694. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1695. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1696. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1697. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1698. .ops = &vlv_dpio_power_well_ops,
  1699. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1700. },
  1701. {
  1702. .name = "dpio-common",
  1703. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  1704. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1705. .ops = &vlv_dpio_cmn_power_well_ops,
  1706. },
  1707. };
  1708. static struct i915_power_well chv_power_wells[] = {
  1709. {
  1710. .name = "always-on",
  1711. .always_on = 1,
  1712. .domains = POWER_DOMAIN_MASK,
  1713. .ops = &i9xx_always_on_power_well_ops,
  1714. },
  1715. {
  1716. .name = "display",
  1717. /*
  1718. * Pipe A power well is the new disp2d well. Pipe B and C
  1719. * power wells don't actually exist. Pipe A power well is
  1720. * required for any pipe to work.
  1721. */
  1722. .domains = CHV_DISPLAY_POWER_DOMAINS,
  1723. .data = PIPE_A,
  1724. .ops = &chv_pipe_power_well_ops,
  1725. },
  1726. {
  1727. .name = "dpio-common-bc",
  1728. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
  1729. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1730. .ops = &chv_dpio_cmn_power_well_ops,
  1731. },
  1732. {
  1733. .name = "dpio-common-d",
  1734. .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
  1735. .data = PUNIT_POWER_WELL_DPIO_CMN_D,
  1736. .ops = &chv_dpio_cmn_power_well_ops,
  1737. },
  1738. };
  1739. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  1740. int power_well_id)
  1741. {
  1742. struct i915_power_well *power_well;
  1743. bool ret;
  1744. power_well = lookup_power_well(dev_priv, power_well_id);
  1745. ret = power_well->ops->is_enabled(dev_priv, power_well);
  1746. return ret;
  1747. }
  1748. static struct i915_power_well skl_power_wells[] = {
  1749. {
  1750. .name = "always-on",
  1751. .always_on = 1,
  1752. .domains = POWER_DOMAIN_MASK,
  1753. .ops = &i9xx_always_on_power_well_ops,
  1754. .data = SKL_DISP_PW_ALWAYS_ON,
  1755. },
  1756. {
  1757. .name = "power well 1",
  1758. /* Handled by the DMC firmware */
  1759. .domains = 0,
  1760. .ops = &skl_power_well_ops,
  1761. .data = SKL_DISP_PW_1,
  1762. },
  1763. {
  1764. .name = "MISC IO power well",
  1765. /* Handled by the DMC firmware */
  1766. .domains = 0,
  1767. .ops = &skl_power_well_ops,
  1768. .data = SKL_DISP_PW_MISC_IO,
  1769. },
  1770. {
  1771. .name = "DC off",
  1772. .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
  1773. .ops = &gen9_dc_off_power_well_ops,
  1774. .data = SKL_DISP_PW_DC_OFF,
  1775. },
  1776. {
  1777. .name = "power well 2",
  1778. .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1779. .ops = &skl_power_well_ops,
  1780. .data = SKL_DISP_PW_2,
  1781. },
  1782. {
  1783. .name = "DDI A/E power well",
  1784. .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
  1785. .ops = &skl_power_well_ops,
  1786. .data = SKL_DISP_PW_DDI_A_E,
  1787. },
  1788. {
  1789. .name = "DDI B power well",
  1790. .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
  1791. .ops = &skl_power_well_ops,
  1792. .data = SKL_DISP_PW_DDI_B,
  1793. },
  1794. {
  1795. .name = "DDI C power well",
  1796. .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
  1797. .ops = &skl_power_well_ops,
  1798. .data = SKL_DISP_PW_DDI_C,
  1799. },
  1800. {
  1801. .name = "DDI D power well",
  1802. .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
  1803. .ops = &skl_power_well_ops,
  1804. .data = SKL_DISP_PW_DDI_D,
  1805. },
  1806. };
  1807. static struct i915_power_well bxt_power_wells[] = {
  1808. {
  1809. .name = "always-on",
  1810. .always_on = 1,
  1811. .domains = POWER_DOMAIN_MASK,
  1812. .ops = &i9xx_always_on_power_well_ops,
  1813. },
  1814. {
  1815. .name = "power well 1",
  1816. .domains = 0,
  1817. .ops = &skl_power_well_ops,
  1818. .data = SKL_DISP_PW_1,
  1819. },
  1820. {
  1821. .name = "DC off",
  1822. .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
  1823. .ops = &gen9_dc_off_power_well_ops,
  1824. .data = SKL_DISP_PW_DC_OFF,
  1825. },
  1826. {
  1827. .name = "power well 2",
  1828. .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1829. .ops = &skl_power_well_ops,
  1830. .data = SKL_DISP_PW_2,
  1831. },
  1832. {
  1833. .name = "dpio-common-a",
  1834. .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
  1835. .ops = &bxt_dpio_cmn_power_well_ops,
  1836. .data = BXT_DPIO_CMN_A,
  1837. },
  1838. {
  1839. .name = "dpio-common-bc",
  1840. .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
  1841. .ops = &bxt_dpio_cmn_power_well_ops,
  1842. .data = BXT_DPIO_CMN_BC,
  1843. },
  1844. };
  1845. static int
  1846. sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
  1847. int disable_power_well)
  1848. {
  1849. if (disable_power_well >= 0)
  1850. return !!disable_power_well;
  1851. return 1;
  1852. }
  1853. static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
  1854. int enable_dc)
  1855. {
  1856. uint32_t mask;
  1857. int requested_dc;
  1858. int max_dc;
  1859. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  1860. max_dc = 2;
  1861. mask = 0;
  1862. } else if (IS_BROXTON(dev_priv)) {
  1863. max_dc = 1;
  1864. /*
  1865. * DC9 has a separate HW flow from the rest of the DC states,
  1866. * not depending on the DMC firmware. It's needed by system
  1867. * suspend/resume, so allow it unconditionally.
  1868. */
  1869. mask = DC_STATE_EN_DC9;
  1870. } else {
  1871. max_dc = 0;
  1872. mask = 0;
  1873. }
  1874. if (!i915.disable_power_well)
  1875. max_dc = 0;
  1876. if (enable_dc >= 0 && enable_dc <= max_dc) {
  1877. requested_dc = enable_dc;
  1878. } else if (enable_dc == -1) {
  1879. requested_dc = max_dc;
  1880. } else if (enable_dc > max_dc && enable_dc <= 2) {
  1881. DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
  1882. enable_dc, max_dc);
  1883. requested_dc = max_dc;
  1884. } else {
  1885. DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
  1886. requested_dc = max_dc;
  1887. }
  1888. if (requested_dc > 1)
  1889. mask |= DC_STATE_EN_UPTO_DC6;
  1890. if (requested_dc > 0)
  1891. mask |= DC_STATE_EN_UPTO_DC5;
  1892. DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
  1893. return mask;
  1894. }
  1895. #define set_power_wells(power_domains, __power_wells) ({ \
  1896. (power_domains)->power_wells = (__power_wells); \
  1897. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  1898. })
  1899. /**
  1900. * intel_power_domains_init - initializes the power domain structures
  1901. * @dev_priv: i915 device instance
  1902. *
  1903. * Initializes the power domain structures for @dev_priv depending upon the
  1904. * supported platform.
  1905. */
  1906. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  1907. {
  1908. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1909. i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
  1910. i915.disable_power_well);
  1911. dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
  1912. i915.enable_dc);
  1913. BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
  1914. mutex_init(&power_domains->lock);
  1915. /*
  1916. * The enabling order will be from lower to higher indexed wells,
  1917. * the disabling order is reversed.
  1918. */
  1919. if (IS_HASWELL(dev_priv)) {
  1920. set_power_wells(power_domains, hsw_power_wells);
  1921. } else if (IS_BROADWELL(dev_priv)) {
  1922. set_power_wells(power_domains, bdw_power_wells);
  1923. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  1924. set_power_wells(power_domains, skl_power_wells);
  1925. } else if (IS_BROXTON(dev_priv)) {
  1926. set_power_wells(power_domains, bxt_power_wells);
  1927. } else if (IS_CHERRYVIEW(dev_priv)) {
  1928. set_power_wells(power_domains, chv_power_wells);
  1929. } else if (IS_VALLEYVIEW(dev_priv)) {
  1930. set_power_wells(power_domains, vlv_power_wells);
  1931. } else {
  1932. set_power_wells(power_domains, i9xx_always_on_power_well);
  1933. }
  1934. return 0;
  1935. }
  1936. /**
  1937. * intel_power_domains_fini - finalizes the power domain structures
  1938. * @dev_priv: i915 device instance
  1939. *
  1940. * Finalizes the power domain structures for @dev_priv depending upon the
  1941. * supported platform. This function also disables runtime pm and ensures that
  1942. * the device stays powered up so that the driver can be reloaded.
  1943. */
  1944. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  1945. {
  1946. struct device *kdev = &dev_priv->drm.pdev->dev;
  1947. /*
  1948. * The i915.ko module is still not prepared to be loaded when
  1949. * the power well is not enabled, so just enable it in case
  1950. * we're going to unload/reload.
  1951. * The following also reacquires the RPM reference the core passed
  1952. * to the driver during loading, which is dropped in
  1953. * intel_runtime_pm_enable(). We have to hand back the control of the
  1954. * device to the core with this reference held.
  1955. */
  1956. intel_display_set_init_power(dev_priv, true);
  1957. /* Remove the refcount we took to keep power well support disabled. */
  1958. if (!i915.disable_power_well)
  1959. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  1960. /*
  1961. * Remove the refcount we took in intel_runtime_pm_enable() in case
  1962. * the platform doesn't support runtime PM.
  1963. */
  1964. if (!HAS_RUNTIME_PM(dev_priv))
  1965. pm_runtime_put(kdev);
  1966. }
  1967. static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
  1968. {
  1969. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1970. struct i915_power_well *power_well;
  1971. int i;
  1972. mutex_lock(&power_domains->lock);
  1973. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1974. power_well->ops->sync_hw(dev_priv, power_well);
  1975. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  1976. power_well);
  1977. }
  1978. mutex_unlock(&power_domains->lock);
  1979. }
  1980. static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
  1981. {
  1982. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
  1983. POSTING_READ(DBUF_CTL);
  1984. udelay(10);
  1985. if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
  1986. DRM_ERROR("DBuf power enable timeout\n");
  1987. }
  1988. static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
  1989. {
  1990. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
  1991. POSTING_READ(DBUF_CTL);
  1992. udelay(10);
  1993. if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
  1994. DRM_ERROR("DBuf power disable timeout!\n");
  1995. }
  1996. static void skl_display_core_init(struct drm_i915_private *dev_priv,
  1997. bool resume)
  1998. {
  1999. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2000. struct i915_power_well *well;
  2001. uint32_t val;
  2002. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2003. /* enable PCH reset handshake */
  2004. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2005. I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
  2006. /* enable PG1 and Misc I/O */
  2007. mutex_lock(&power_domains->lock);
  2008. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2009. intel_power_well_enable(dev_priv, well);
  2010. well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
  2011. intel_power_well_enable(dev_priv, well);
  2012. mutex_unlock(&power_domains->lock);
  2013. skl_init_cdclk(dev_priv);
  2014. gen9_dbuf_enable(dev_priv);
  2015. if (resume && dev_priv->csr.dmc_payload)
  2016. intel_csr_load_program(dev_priv);
  2017. }
  2018. static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
  2019. {
  2020. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2021. struct i915_power_well *well;
  2022. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2023. gen9_dbuf_disable(dev_priv);
  2024. skl_uninit_cdclk(dev_priv);
  2025. /* The spec doesn't call for removing the reset handshake flag */
  2026. /* disable PG1 and Misc I/O */
  2027. mutex_lock(&power_domains->lock);
  2028. well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
  2029. intel_power_well_disable(dev_priv, well);
  2030. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2031. intel_power_well_disable(dev_priv, well);
  2032. mutex_unlock(&power_domains->lock);
  2033. }
  2034. void bxt_display_core_init(struct drm_i915_private *dev_priv,
  2035. bool resume)
  2036. {
  2037. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2038. struct i915_power_well *well;
  2039. uint32_t val;
  2040. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2041. /*
  2042. * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
  2043. * or else the reset will hang because there is no PCH to respond.
  2044. * Move the handshake programming to initialization sequence.
  2045. * Previously was left up to BIOS.
  2046. */
  2047. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2048. val &= ~RESET_PCH_HANDSHAKE_ENABLE;
  2049. I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
  2050. /* Enable PG1 */
  2051. mutex_lock(&power_domains->lock);
  2052. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2053. intel_power_well_enable(dev_priv, well);
  2054. mutex_unlock(&power_domains->lock);
  2055. bxt_init_cdclk(dev_priv);
  2056. gen9_dbuf_enable(dev_priv);
  2057. if (resume && dev_priv->csr.dmc_payload)
  2058. intel_csr_load_program(dev_priv);
  2059. }
  2060. void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
  2061. {
  2062. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2063. struct i915_power_well *well;
  2064. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2065. gen9_dbuf_disable(dev_priv);
  2066. bxt_uninit_cdclk(dev_priv);
  2067. /* The spec doesn't call for removing the reset handshake flag */
  2068. /* Disable PG1 */
  2069. mutex_lock(&power_domains->lock);
  2070. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2071. intel_power_well_disable(dev_priv, well);
  2072. mutex_unlock(&power_domains->lock);
  2073. }
  2074. static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  2075. {
  2076. struct i915_power_well *cmn_bc =
  2077. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2078. struct i915_power_well *cmn_d =
  2079. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  2080. /*
  2081. * DISPLAY_PHY_CONTROL can get corrupted if read. As a
  2082. * workaround never ever read DISPLAY_PHY_CONTROL, and
  2083. * instead maintain a shadow copy ourselves. Use the actual
  2084. * power well state and lane status to reconstruct the
  2085. * expected initial value.
  2086. */
  2087. dev_priv->chv_phy_control =
  2088. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
  2089. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
  2090. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
  2091. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
  2092. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
  2093. /*
  2094. * If all lanes are disabled we leave the override disabled
  2095. * with all power down bits cleared to match the state we
  2096. * would use after disabling the port. Otherwise enable the
  2097. * override and set the lane powerdown bits accding to the
  2098. * current lane status.
  2099. */
  2100. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  2101. uint32_t status = I915_READ(DPLL(PIPE_A));
  2102. unsigned int mask;
  2103. mask = status & DPLL_PORTB_READY_MASK;
  2104. if (mask == 0xf)
  2105. mask = 0x0;
  2106. else
  2107. dev_priv->chv_phy_control |=
  2108. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
  2109. dev_priv->chv_phy_control |=
  2110. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
  2111. mask = (status & DPLL_PORTC_READY_MASK) >> 4;
  2112. if (mask == 0xf)
  2113. mask = 0x0;
  2114. else
  2115. dev_priv->chv_phy_control |=
  2116. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
  2117. dev_priv->chv_phy_control |=
  2118. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
  2119. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
  2120. dev_priv->chv_phy_assert[DPIO_PHY0] = false;
  2121. } else {
  2122. dev_priv->chv_phy_assert[DPIO_PHY0] = true;
  2123. }
  2124. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  2125. uint32_t status = I915_READ(DPIO_PHY_STATUS);
  2126. unsigned int mask;
  2127. mask = status & DPLL_PORTD_READY_MASK;
  2128. if (mask == 0xf)
  2129. mask = 0x0;
  2130. else
  2131. dev_priv->chv_phy_control |=
  2132. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
  2133. dev_priv->chv_phy_control |=
  2134. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
  2135. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
  2136. dev_priv->chv_phy_assert[DPIO_PHY1] = false;
  2137. } else {
  2138. dev_priv->chv_phy_assert[DPIO_PHY1] = true;
  2139. }
  2140. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  2141. DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
  2142. dev_priv->chv_phy_control);
  2143. }
  2144. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  2145. {
  2146. struct i915_power_well *cmn =
  2147. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2148. struct i915_power_well *disp2d =
  2149. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  2150. /* If the display might be already active skip this */
  2151. if (cmn->ops->is_enabled(dev_priv, cmn) &&
  2152. disp2d->ops->is_enabled(dev_priv, disp2d) &&
  2153. I915_READ(DPIO_CTL) & DPIO_CMNRST)
  2154. return;
  2155. DRM_DEBUG_KMS("toggling display PHY side reset\n");
  2156. /* cmnlane needs DPLL registers */
  2157. disp2d->ops->enable(dev_priv, disp2d);
  2158. /*
  2159. * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  2160. * Need to assert and de-assert PHY SB reset by gating the
  2161. * common lane power, then un-gating it.
  2162. * Simply ungating isn't enough to reset the PHY enough to get
  2163. * ports and lanes running.
  2164. */
  2165. cmn->ops->disable(dev_priv, cmn);
  2166. }
  2167. /**
  2168. * intel_power_domains_init_hw - initialize hardware power domain state
  2169. * @dev_priv: i915 device instance
  2170. * @resume: Called from resume code paths or not
  2171. *
  2172. * This function initializes the hardware power domain state and enables all
  2173. * power domains using intel_display_set_init_power().
  2174. */
  2175. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
  2176. {
  2177. struct drm_device *dev = &dev_priv->drm;
  2178. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2179. power_domains->initializing = true;
  2180. if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
  2181. skl_display_core_init(dev_priv, resume);
  2182. } else if (IS_BROXTON(dev)) {
  2183. bxt_display_core_init(dev_priv, resume);
  2184. } else if (IS_CHERRYVIEW(dev)) {
  2185. mutex_lock(&power_domains->lock);
  2186. chv_phy_control_init(dev_priv);
  2187. mutex_unlock(&power_domains->lock);
  2188. } else if (IS_VALLEYVIEW(dev)) {
  2189. mutex_lock(&power_domains->lock);
  2190. vlv_cmnlane_wa(dev_priv);
  2191. mutex_unlock(&power_domains->lock);
  2192. }
  2193. /* For now, we need the power well to be always enabled. */
  2194. intel_display_set_init_power(dev_priv, true);
  2195. /* Disable power support if the user asked so. */
  2196. if (!i915.disable_power_well)
  2197. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  2198. intel_power_domains_sync_hw(dev_priv);
  2199. power_domains->initializing = false;
  2200. }
  2201. /**
  2202. * intel_power_domains_suspend - suspend power domain state
  2203. * @dev_priv: i915 device instance
  2204. *
  2205. * This function prepares the hardware power domain state before entering
  2206. * system suspend. It must be paired with intel_power_domains_init_hw().
  2207. */
  2208. void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  2209. {
  2210. /*
  2211. * Even if power well support was disabled we still want to disable
  2212. * power wells while we are system suspended.
  2213. */
  2214. if (!i915.disable_power_well)
  2215. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  2216. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  2217. skl_display_core_uninit(dev_priv);
  2218. else if (IS_BROXTON(dev_priv))
  2219. bxt_display_core_uninit(dev_priv);
  2220. }
  2221. /**
  2222. * intel_runtime_pm_get - grab a runtime pm reference
  2223. * @dev_priv: i915 device instance
  2224. *
  2225. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2226. * code to ensure the GTT or GT is on) and ensures that it is powered up.
  2227. *
  2228. * Any runtime pm reference obtained by this function must have a symmetric
  2229. * call to intel_runtime_pm_put() to release the reference again.
  2230. */
  2231. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  2232. {
  2233. struct pci_dev *pdev = dev_priv->drm.pdev;
  2234. struct device *kdev = &pdev->dev;
  2235. pm_runtime_get_sync(kdev);
  2236. atomic_inc(&dev_priv->pm.wakeref_count);
  2237. assert_rpm_wakelock_held(dev_priv);
  2238. }
  2239. /**
  2240. * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
  2241. * @dev_priv: i915 device instance
  2242. *
  2243. * This function grabs a device-level runtime pm reference if the device is
  2244. * already in use and ensures that it is powered up.
  2245. *
  2246. * Any runtime pm reference obtained by this function must have a symmetric
  2247. * call to intel_runtime_pm_put() to release the reference again.
  2248. */
  2249. bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  2250. {
  2251. struct pci_dev *pdev = dev_priv->drm.pdev;
  2252. struct device *kdev = &pdev->dev;
  2253. if (IS_ENABLED(CONFIG_PM)) {
  2254. int ret = pm_runtime_get_if_in_use(kdev);
  2255. /*
  2256. * In cases runtime PM is disabled by the RPM core and we get
  2257. * an -EINVAL return value we are not supposed to call this
  2258. * function, since the power state is undefined. This applies
  2259. * atm to the late/early system suspend/resume handlers.
  2260. */
  2261. WARN_ON_ONCE(ret < 0);
  2262. if (ret <= 0)
  2263. return false;
  2264. }
  2265. atomic_inc(&dev_priv->pm.wakeref_count);
  2266. assert_rpm_wakelock_held(dev_priv);
  2267. return true;
  2268. }
  2269. /**
  2270. * intel_runtime_pm_get_noresume - grab a runtime pm reference
  2271. * @dev_priv: i915 device instance
  2272. *
  2273. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2274. * code to ensure the GTT or GT is on).
  2275. *
  2276. * It will _not_ power up the device but instead only check that it's powered
  2277. * on. Therefore it is only valid to call this functions from contexts where
  2278. * the device is known to be powered up and where trying to power it up would
  2279. * result in hilarity and deadlocks. That pretty much means only the system
  2280. * suspend/resume code where this is used to grab runtime pm references for
  2281. * delayed setup down in work items.
  2282. *
  2283. * Any runtime pm reference obtained by this function must have a symmetric
  2284. * call to intel_runtime_pm_put() to release the reference again.
  2285. */
  2286. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  2287. {
  2288. struct pci_dev *pdev = dev_priv->drm.pdev;
  2289. struct device *kdev = &pdev->dev;
  2290. assert_rpm_wakelock_held(dev_priv);
  2291. pm_runtime_get_noresume(kdev);
  2292. atomic_inc(&dev_priv->pm.wakeref_count);
  2293. }
  2294. /**
  2295. * intel_runtime_pm_put - release a runtime pm reference
  2296. * @dev_priv: i915 device instance
  2297. *
  2298. * This function drops the device-level runtime pm reference obtained by
  2299. * intel_runtime_pm_get() and might power down the corresponding
  2300. * hardware block right away if this is the last reference.
  2301. */
  2302. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  2303. {
  2304. struct pci_dev *pdev = dev_priv->drm.pdev;
  2305. struct device *kdev = &pdev->dev;
  2306. assert_rpm_wakelock_held(dev_priv);
  2307. if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
  2308. atomic_inc(&dev_priv->pm.atomic_seq);
  2309. pm_runtime_mark_last_busy(kdev);
  2310. pm_runtime_put_autosuspend(kdev);
  2311. }
  2312. /**
  2313. * intel_runtime_pm_enable - enable runtime pm
  2314. * @dev_priv: i915 device instance
  2315. *
  2316. * This function enables runtime pm at the end of the driver load sequence.
  2317. *
  2318. * Note that this function does currently not enable runtime pm for the
  2319. * subordinate display power domains. That is only done on the first modeset
  2320. * using intel_display_set_init_power().
  2321. */
  2322. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  2323. {
  2324. struct pci_dev *pdev = dev_priv->drm.pdev;
  2325. struct drm_device *dev = &dev_priv->drm;
  2326. struct device *kdev = &pdev->dev;
  2327. pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
  2328. pm_runtime_mark_last_busy(kdev);
  2329. /*
  2330. * Take a permanent reference to disable the RPM functionality and drop
  2331. * it only when unloading the driver. Use the low level get/put helpers,
  2332. * so the driver's own RPM reference tracking asserts also work on
  2333. * platforms without RPM support.
  2334. */
  2335. if (!HAS_RUNTIME_PM(dev)) {
  2336. pm_runtime_dont_use_autosuspend(kdev);
  2337. pm_runtime_get_sync(kdev);
  2338. } else {
  2339. pm_runtime_use_autosuspend(kdev);
  2340. }
  2341. /*
  2342. * The core calls the driver load handler with an RPM reference held.
  2343. * We drop that here and will reacquire it during unloading in
  2344. * intel_power_domains_fini().
  2345. */
  2346. pm_runtime_put_autosuspend(kdev);
  2347. }