nouveau_hw.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Copyright 2006 Dave Airlie
  3. * Copyright 2007 Maarten Maathuis
  4. * Copyright 2007-2009 Stuart Bennett
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  20. * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
  21. * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22. * SOFTWARE.
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_hw.h"
  27. #define CHIPSET_NFORCE 0x01a0
  28. #define CHIPSET_NFORCE2 0x01f0
  29. /*
  30. * misc hw access wrappers/control functions
  31. */
  32. void
  33. NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
  34. {
  35. NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
  36. NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
  37. }
  38. uint8_t
  39. NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
  40. {
  41. NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
  42. return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
  43. }
  44. void
  45. NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
  46. {
  47. NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
  48. NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
  49. }
  50. uint8_t
  51. NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
  52. {
  53. NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
  54. return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
  55. }
  56. /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
  57. * it affects only the 8 bit vga io regs, which we access using mmio at
  58. * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
  59. * in general, the set value of cr44 does not matter: reg access works as
  60. * expected and values can be set for the appropriate head by using a 0x2000
  61. * offset as required
  62. * however:
  63. * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
  64. * cr44 must be set to 0 or 3 for accessing values on the correct head
  65. * through the common 0xc03c* addresses
  66. * b) in tied mode (4) head B is programmed to the values set on head A, and
  67. * access using the head B addresses can have strange results, ergo we leave
  68. * tied mode in init once we know to what cr44 should be restored on exit
  69. *
  70. * the owner parameter is slightly abused:
  71. * 0 and 1 are treated as head values and so the set value is (owner * 3)
  72. * other values are treated as literal values to set
  73. */
  74. void
  75. NVSetOwner(struct drm_device *dev, int owner)
  76. {
  77. struct drm_nouveau_private *dev_priv = dev->dev_private;
  78. if (owner == 1)
  79. owner *= 3;
  80. if (dev_priv->chipset == 0x11) {
  81. /* This might seem stupid, but the blob does it and
  82. * omitting it often locks the system up.
  83. */
  84. NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
  85. NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
  86. }
  87. /* CR44 is always changed on CRTC0 */
  88. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
  89. if (dev_priv->chipset == 0x11) { /* set me harder */
  90. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
  91. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
  92. }
  93. }
  94. void
  95. NVBlankScreen(struct drm_device *dev, int head, bool blank)
  96. {
  97. unsigned char seq1;
  98. if (nv_two_heads(dev))
  99. NVSetOwner(dev, head);
  100. seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
  101. NVVgaSeqReset(dev, head, true);
  102. if (blank)
  103. NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
  104. else
  105. NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
  106. NVVgaSeqReset(dev, head, false);
  107. }
  108. /*
  109. * PLL setting
  110. */
  111. static int
  112. powerctrl_1_shift(int chip_version, int reg)
  113. {
  114. int shift = -4;
  115. if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
  116. return shift;
  117. switch (reg) {
  118. case NV_RAMDAC_VPLL2:
  119. shift += 4;
  120. case NV_PRAMDAC_VPLL_COEFF:
  121. shift += 4;
  122. case NV_PRAMDAC_MPLL_COEFF:
  123. shift += 4;
  124. case NV_PRAMDAC_NVPLL_COEFF:
  125. shift += 4;
  126. }
  127. /*
  128. * the shift for vpll regs is only used for nv3x chips with a single
  129. * stage pll
  130. */
  131. if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
  132. chip_version == 0x36 || chip_version >= 0x40))
  133. shift = -4;
  134. return shift;
  135. }
  136. static void
  137. setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
  138. {
  139. struct drm_nouveau_private *dev_priv = dev->dev_private;
  140. int chip_version = dev_priv->vbios.chip_version;
  141. uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
  142. int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
  143. uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
  144. uint32_t saved_powerctrl_1 = 0;
  145. int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
  146. if (oldpll == pll)
  147. return; /* already set */
  148. if (shift_powerctrl_1 >= 0) {
  149. saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
  150. nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
  151. (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
  152. 1 << shift_powerctrl_1);
  153. }
  154. if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
  155. /* upclock -- write new post divider first */
  156. NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
  157. else
  158. /* downclock -- write new NM first */
  159. NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
  160. if (chip_version < 0x17 && chip_version != 0x11)
  161. /* wait a bit on older chips */
  162. msleep(64);
  163. NVReadRAMDAC(dev, 0, reg);
  164. /* then write the other half as well */
  165. NVWriteRAMDAC(dev, 0, reg, pll);
  166. if (shift_powerctrl_1 >= 0)
  167. nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
  168. }
  169. static uint32_t
  170. new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
  171. {
  172. bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
  173. if (ss) /* single stage pll mode */
  174. ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
  175. NV_RAMDAC_580_VPLL2_ACTIVE;
  176. else
  177. ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
  178. ~NV_RAMDAC_580_VPLL2_ACTIVE;
  179. return ramdac580;
  180. }
  181. static void
  182. setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
  183. struct nouveau_pll_vals *pv)
  184. {
  185. struct drm_nouveau_private *dev_priv = dev->dev_private;
  186. int chip_version = dev_priv->vbios.chip_version;
  187. bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
  188. uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
  189. uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
  190. uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
  191. uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
  192. uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
  193. uint32_t oldramdac580 = 0, ramdac580 = 0;
  194. bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
  195. uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
  196. int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
  197. /* model specific additions to generic pll1 and pll2 set up above */
  198. if (nv3035) {
  199. pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
  200. (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
  201. pll2 = 0;
  202. }
  203. if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
  204. oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
  205. ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
  206. if (oldramdac580 != ramdac580)
  207. oldpll1 = ~0; /* force mismatch */
  208. if (single_stage)
  209. /* magic value used by nvidia in single stage mode */
  210. pll2 |= 0x011f;
  211. }
  212. if (chip_version > 0x70)
  213. /* magic bits set by the blob (but not the bios) on g71-73 */
  214. pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
  215. if (oldpll1 == pll1 && oldpll2 == pll2)
  216. return; /* already set */
  217. if (shift_powerctrl_1 >= 0) {
  218. saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
  219. nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
  220. (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
  221. 1 << shift_powerctrl_1);
  222. }
  223. if (chip_version >= 0x40) {
  224. int shift_c040 = 14;
  225. switch (reg1) {
  226. case NV_PRAMDAC_MPLL_COEFF:
  227. shift_c040 += 2;
  228. case NV_PRAMDAC_NVPLL_COEFF:
  229. shift_c040 += 2;
  230. case NV_RAMDAC_VPLL2:
  231. shift_c040 += 2;
  232. case NV_PRAMDAC_VPLL_COEFF:
  233. shift_c040 += 2;
  234. }
  235. savedc040 = nvReadMC(dev, 0xc040);
  236. if (shift_c040 != 14)
  237. nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
  238. }
  239. if (oldramdac580 != ramdac580)
  240. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
  241. if (!nv3035)
  242. NVWriteRAMDAC(dev, 0, reg2, pll2);
  243. NVWriteRAMDAC(dev, 0, reg1, pll1);
  244. if (shift_powerctrl_1 >= 0)
  245. nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
  246. if (chip_version >= 0x40)
  247. nvWriteMC(dev, 0xc040, savedc040);
  248. }
  249. static void
  250. setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
  251. struct nouveau_pll_vals *pv)
  252. {
  253. /* When setting PLLs, there is a merry game of disabling and enabling
  254. * various bits of hardware during the process. This function is a
  255. * synthesis of six nv4x traces, nearly each card doing a subtly
  256. * different thing. With luck all the necessary bits for each card are
  257. * combined herein. Without luck it deviates from each card's formula
  258. * so as to not work on any :)
  259. */
  260. uint32_t Preg = NMNMreg - 4;
  261. bool mpll = Preg == 0x4020;
  262. uint32_t oldPval = nvReadMC(dev, Preg);
  263. uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
  264. uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
  265. 0xc << 28 | pv->log2P << 16;
  266. uint32_t saved4600 = 0;
  267. /* some cards have different maskc040s */
  268. uint32_t maskc040 = ~(3 << 14), savedc040;
  269. bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
  270. if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
  271. return;
  272. if (Preg == 0x4000)
  273. maskc040 = ~0x333;
  274. if (Preg == 0x4058)
  275. maskc040 = ~(0xc << 24);
  276. if (mpll) {
  277. struct pll_lims pll_lim;
  278. uint8_t Pval2;
  279. if (get_pll_limits(dev, Preg, &pll_lim))
  280. return;
  281. Pval2 = pv->log2P + pll_lim.log2p_bias;
  282. if (Pval2 > pll_lim.max_log2p)
  283. Pval2 = pll_lim.max_log2p;
  284. Pval |= 1 << 28 | Pval2 << 20;
  285. saved4600 = nvReadMC(dev, 0x4600);
  286. nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
  287. }
  288. if (single_stage)
  289. Pval |= mpll ? 1 << 12 : 1 << 8;
  290. nvWriteMC(dev, Preg, oldPval | 1 << 28);
  291. nvWriteMC(dev, Preg, Pval & ~(4 << 28));
  292. if (mpll) {
  293. Pval |= 8 << 20;
  294. nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
  295. nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
  296. }
  297. savedc040 = nvReadMC(dev, 0xc040);
  298. nvWriteMC(dev, 0xc040, savedc040 & maskc040);
  299. nvWriteMC(dev, NMNMreg, NMNM);
  300. if (NMNMreg == 0x4024)
  301. nvWriteMC(dev, 0x403c, NMNM);
  302. nvWriteMC(dev, Preg, Pval);
  303. if (mpll) {
  304. Pval &= ~(8 << 20);
  305. nvWriteMC(dev, 0x4020, Pval);
  306. nvWriteMC(dev, 0x4038, Pval);
  307. nvWriteMC(dev, 0x4600, saved4600);
  308. }
  309. nvWriteMC(dev, 0xc040, savedc040);
  310. if (mpll) {
  311. nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
  312. nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
  313. }
  314. }
  315. void
  316. nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
  317. struct nouveau_pll_vals *pv)
  318. {
  319. struct drm_nouveau_private *dev_priv = dev->dev_private;
  320. int cv = dev_priv->vbios.chip_version;
  321. if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
  322. cv >= 0x40) {
  323. if (reg1 > 0x405c)
  324. setPLL_double_highregs(dev, reg1, pv);
  325. else
  326. setPLL_double_lowregs(dev, reg1, pv);
  327. } else
  328. setPLL_single(dev, reg1, pv);
  329. }
  330. /*
  331. * PLL getting
  332. */
  333. static void
  334. nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
  335. uint32_t pll2, struct nouveau_pll_vals *pllvals)
  336. {
  337. struct drm_nouveau_private *dev_priv = dev->dev_private;
  338. /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
  339. /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
  340. pllvals->log2P = (pll1 >> 16) & 0x7;
  341. pllvals->N2 = pllvals->M2 = 1;
  342. if (reg1 <= 0x405c) {
  343. pllvals->NM1 = pll2 & 0xffff;
  344. /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
  345. if (!(pll1 & 0x1100))
  346. pllvals->NM2 = pll2 >> 16;
  347. } else {
  348. pllvals->NM1 = pll1 & 0xffff;
  349. if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
  350. pllvals->NM2 = pll2 & 0xffff;
  351. else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
  352. pllvals->M1 &= 0xf; /* only 4 bits */
  353. if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
  354. pllvals->M2 = (pll1 >> 4) & 0x7;
  355. pllvals->N2 = ((pll1 >> 21) & 0x18) |
  356. ((pll1 >> 19) & 0x7);
  357. }
  358. }
  359. }
  360. }
  361. int
  362. nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
  363. struct nouveau_pll_vals *pllvals)
  364. {
  365. struct drm_nouveau_private *dev_priv = dev->dev_private;
  366. uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0;
  367. struct pll_lims pll_lim;
  368. int ret;
  369. if (reg1 == 0)
  370. return -ENOENT;
  371. pll1 = nvReadMC(dev, reg1);
  372. if (reg1 <= 0x405c)
  373. pll2 = nvReadMC(dev, reg1 + 4);
  374. else if (nv_two_reg_pll(dev)) {
  375. uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
  376. pll2 = nvReadMC(dev, reg2);
  377. }
  378. if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
  379. uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
  380. /* check whether vpll has been forced into single stage mode */
  381. if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
  382. if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
  383. pll2 = 0;
  384. } else
  385. if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
  386. pll2 = 0;
  387. }
  388. nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
  389. ret = get_pll_limits(dev, plltype, &pll_lim);
  390. if (ret)
  391. return ret;
  392. pllvals->refclk = pll_lim.refclk;
  393. return 0;
  394. }
  395. int
  396. nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
  397. {
  398. /* Avoid divide by zero if called at an inappropriate time */
  399. if (!pv->M1 || !pv->M2)
  400. return 0;
  401. return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
  402. }
  403. int
  404. nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
  405. {
  406. struct nouveau_pll_vals pllvals;
  407. int ret;
  408. if (plltype == PLL_MEMORY &&
  409. (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
  410. uint32_t mpllP;
  411. pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
  412. if (!mpllP)
  413. mpllP = 4;
  414. return 400000 / mpllP;
  415. } else
  416. if (plltype == PLL_MEMORY &&
  417. (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
  418. uint32_t clock;
  419. pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
  420. return clock;
  421. }
  422. ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
  423. if (ret)
  424. return ret;
  425. return nouveau_hw_pllvals_to_clk(&pllvals);
  426. }
  427. static void
  428. nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
  429. {
  430. /* the vpll on an unused head can come up with a random value, way
  431. * beyond the pll limits. for some reason this causes the chip to
  432. * lock up when reading the dac palette regs, so set a valid pll here
  433. * when such a condition detected. only seen on nv11 to date
  434. */
  435. struct pll_lims pll_lim;
  436. struct nouveau_pll_vals pv;
  437. enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0;
  438. if (get_pll_limits(dev, pll, &pll_lim))
  439. return;
  440. nouveau_hw_get_pllvals(dev, pll, &pv);
  441. if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
  442. pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
  443. pv.log2P <= pll_lim.max_log2p)
  444. return;
  445. NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
  446. /* set lowest clock within static limits */
  447. pv.M1 = pll_lim.vco1.max_m;
  448. pv.N1 = pll_lim.vco1.min_n;
  449. pv.log2P = pll_lim.max_usable_log2p;
  450. nouveau_hw_setpll(dev, pll_lim.reg, &pv);
  451. }
  452. /*
  453. * vga font save/restore
  454. */
  455. static void nouveau_vga_font_io(struct drm_device *dev,
  456. void __iomem *iovram,
  457. bool save, unsigned plane)
  458. {
  459. struct drm_nouveau_private *dev_priv = dev->dev_private;
  460. unsigned i;
  461. NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
  462. NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
  463. for (i = 0; i < 16384; i++) {
  464. if (save) {
  465. dev_priv->saved_vga_font[plane][i] =
  466. ioread32_native(iovram + i * 4);
  467. } else {
  468. iowrite32_native(dev_priv->saved_vga_font[plane][i],
  469. iovram + i * 4);
  470. }
  471. }
  472. }
  473. void
  474. nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
  475. {
  476. uint8_t misc, gr4, gr5, gr6, seq2, seq4;
  477. bool graphicsmode;
  478. unsigned plane;
  479. void __iomem *iovram;
  480. if (nv_two_heads(dev))
  481. NVSetOwner(dev, 0);
  482. NVSetEnablePalette(dev, 0, true);
  483. graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
  484. NVSetEnablePalette(dev, 0, false);
  485. if (graphicsmode) /* graphics mode => framebuffer => no need to save */
  486. return;
  487. NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
  488. /* map first 64KiB of VRAM, holds VGA fonts etc */
  489. iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
  490. if (!iovram) {
  491. NV_ERROR(dev, "Failed to map VRAM, "
  492. "cannot save/restore VGA fonts.\n");
  493. return;
  494. }
  495. if (nv_two_heads(dev))
  496. NVBlankScreen(dev, 1, true);
  497. NVBlankScreen(dev, 0, true);
  498. /* save control regs */
  499. misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
  500. seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
  501. seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
  502. gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
  503. gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
  504. gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
  505. NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
  506. NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
  507. NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
  508. NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
  509. /* store font in planes 0..3 */
  510. for (plane = 0; plane < 4; plane++)
  511. nouveau_vga_font_io(dev, iovram, save, plane);
  512. /* restore control regs */
  513. NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
  514. NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
  515. NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
  516. NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
  517. NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
  518. NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
  519. if (nv_two_heads(dev))
  520. NVBlankScreen(dev, 1, false);
  521. NVBlankScreen(dev, 0, false);
  522. iounmap(iovram);
  523. }
  524. /*
  525. * mode state save/load
  526. */
  527. static void
  528. rd_cio_state(struct drm_device *dev, int head,
  529. struct nv04_crtc_reg *crtcstate, int index)
  530. {
  531. crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
  532. }
  533. static void
  534. wr_cio_state(struct drm_device *dev, int head,
  535. struct nv04_crtc_reg *crtcstate, int index)
  536. {
  537. NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
  538. }
  539. static void
  540. nv_save_state_ramdac(struct drm_device *dev, int head,
  541. struct nv04_mode_state *state)
  542. {
  543. struct drm_nouveau_private *dev_priv = dev->dev_private;
  544. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  545. int i;
  546. if (dev_priv->card_type >= NV_10)
  547. regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
  548. nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
  549. state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
  550. if (nv_two_heads(dev))
  551. state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
  552. if (dev_priv->chipset == 0x11)
  553. regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
  554. regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
  555. if (nv_gf4_disp_arch(dev))
  556. regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
  557. if (dev_priv->chipset >= 0x30)
  558. regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
  559. regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
  560. regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
  561. regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
  562. regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
  563. regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
  564. regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
  565. regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
  566. regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
  567. for (i = 0; i < 7; i++) {
  568. uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
  569. regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
  570. regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
  571. }
  572. if (nv_gf4_disp_arch(dev)) {
  573. regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
  574. for (i = 0; i < 3; i++) {
  575. regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
  576. regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
  577. }
  578. }
  579. regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
  580. regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
  581. if (!nv_gf4_disp_arch(dev) && head == 0) {
  582. /* early chips don't allow access to PRAMDAC_TMDS_* without
  583. * the head A FPCLK on (nv11 even locks up) */
  584. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
  585. ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
  586. }
  587. regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
  588. regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
  589. regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
  590. if (nv_gf4_disp_arch(dev))
  591. regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
  592. if (dev_priv->card_type == NV_40) {
  593. regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
  594. regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
  595. regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
  596. for (i = 0; i < 38; i++)
  597. regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
  598. NV_PRAMDAC_CTV + 4*i);
  599. }
  600. }
  601. static void
  602. nv_load_state_ramdac(struct drm_device *dev, int head,
  603. struct nv04_mode_state *state)
  604. {
  605. struct drm_nouveau_private *dev_priv = dev->dev_private;
  606. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  607. uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
  608. int i;
  609. if (dev_priv->card_type >= NV_10)
  610. NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
  611. nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
  612. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
  613. if (nv_two_heads(dev))
  614. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
  615. if (dev_priv->chipset == 0x11)
  616. NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
  617. NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
  618. if (nv_gf4_disp_arch(dev))
  619. NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
  620. if (dev_priv->chipset >= 0x30)
  621. NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
  622. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
  623. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
  624. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
  625. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
  626. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
  627. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
  628. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
  629. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
  630. for (i = 0; i < 7; i++) {
  631. uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
  632. NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
  633. NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
  634. }
  635. if (nv_gf4_disp_arch(dev)) {
  636. NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
  637. for (i = 0; i < 3; i++) {
  638. NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
  639. NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
  640. }
  641. }
  642. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
  643. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
  644. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
  645. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
  646. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
  647. if (nv_gf4_disp_arch(dev))
  648. NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
  649. if (dev_priv->card_type == NV_40) {
  650. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
  651. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
  652. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
  653. for (i = 0; i < 38; i++)
  654. NVWriteRAMDAC(dev, head,
  655. NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
  656. }
  657. }
  658. static void
  659. nv_save_state_vga(struct drm_device *dev, int head,
  660. struct nv04_mode_state *state)
  661. {
  662. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  663. int i;
  664. regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
  665. for (i = 0; i < 25; i++)
  666. rd_cio_state(dev, head, regp, i);
  667. NVSetEnablePalette(dev, head, true);
  668. for (i = 0; i < 21; i++)
  669. regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
  670. NVSetEnablePalette(dev, head, false);
  671. for (i = 0; i < 9; i++)
  672. regp->Graphics[i] = NVReadVgaGr(dev, head, i);
  673. for (i = 0; i < 5; i++)
  674. regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
  675. }
  676. static void
  677. nv_load_state_vga(struct drm_device *dev, int head,
  678. struct nv04_mode_state *state)
  679. {
  680. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  681. int i;
  682. NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
  683. for (i = 0; i < 5; i++)
  684. NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
  685. nv_lock_vga_crtc_base(dev, head, false);
  686. for (i = 0; i < 25; i++)
  687. wr_cio_state(dev, head, regp, i);
  688. nv_lock_vga_crtc_base(dev, head, true);
  689. for (i = 0; i < 9; i++)
  690. NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
  691. NVSetEnablePalette(dev, head, true);
  692. for (i = 0; i < 21; i++)
  693. NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
  694. NVSetEnablePalette(dev, head, false);
  695. }
  696. static void
  697. nv_save_state_ext(struct drm_device *dev, int head,
  698. struct nv04_mode_state *state)
  699. {
  700. struct drm_nouveau_private *dev_priv = dev->dev_private;
  701. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  702. int i;
  703. rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
  704. rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
  705. rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
  706. rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
  707. rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
  708. rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
  709. rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
  710. rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
  711. rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
  712. rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
  713. if (dev_priv->card_type >= NV_20)
  714. rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
  715. if (dev_priv->card_type >= NV_30)
  716. rd_cio_state(dev, head, regp, 0x9f);
  717. rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
  718. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
  719. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
  720. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
  721. rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
  722. if (dev_priv->card_type >= NV_10) {
  723. regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
  724. regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
  725. if (dev_priv->card_type >= NV_30)
  726. regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
  727. if (dev_priv->card_type == NV_40)
  728. regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
  729. if (nv_two_heads(dev))
  730. regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
  731. regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
  732. }
  733. regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
  734. rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
  735. rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
  736. if (dev_priv->card_type >= NV_10) {
  737. rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
  738. rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
  739. rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
  740. rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
  741. }
  742. /* NV11 and NV20 don't have this, they stop at 0x52. */
  743. if (nv_gf4_disp_arch(dev)) {
  744. rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
  745. rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
  746. rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
  747. for (i = 0; i < 0x10; i++)
  748. regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
  749. rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
  750. rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
  751. rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
  752. rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
  753. }
  754. regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
  755. }
  756. static void
  757. nv_load_state_ext(struct drm_device *dev, int head,
  758. struct nv04_mode_state *state)
  759. {
  760. struct drm_nouveau_private *dev_priv = dev->dev_private;
  761. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  762. uint32_t reg900;
  763. int i;
  764. if (dev_priv->card_type >= NV_10) {
  765. if (nv_two_heads(dev))
  766. /* setting ENGINE_CTRL (EC) *must* come before
  767. * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
  768. * EC that should not be overwritten by writing stale EC
  769. */
  770. NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
  771. nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
  772. nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
  773. nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
  774. nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
  775. nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
  776. nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
  777. nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
  778. nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
  779. nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
  780. NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
  781. NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
  782. NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
  783. if (dev_priv->card_type >= NV_30)
  784. NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
  785. if (dev_priv->card_type == NV_40) {
  786. NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
  787. reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
  788. if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
  789. NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
  790. else
  791. NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
  792. }
  793. }
  794. NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
  795. wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
  796. wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
  797. wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
  798. wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
  799. wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
  800. wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
  801. wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
  802. wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
  803. wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
  804. if (dev_priv->card_type >= NV_20)
  805. wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
  806. if (dev_priv->card_type >= NV_30)
  807. wr_cio_state(dev, head, regp, 0x9f);
  808. wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
  809. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
  810. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
  811. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
  812. if (dev_priv->card_type == NV_40)
  813. nv_fix_nv40_hw_cursor(dev, head);
  814. wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
  815. wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
  816. wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
  817. if (dev_priv->card_type >= NV_10) {
  818. wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
  819. wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
  820. wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
  821. wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
  822. }
  823. /* NV11 and NV20 stop at 0x52. */
  824. if (nv_gf4_disp_arch(dev)) {
  825. if (dev_priv->card_type == NV_10) {
  826. /* Not waiting for vertical retrace before modifying
  827. CRE_53/CRE_54 causes lockups. */
  828. nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
  829. nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
  830. }
  831. wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
  832. wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
  833. wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
  834. for (i = 0; i < 0x10; i++)
  835. NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
  836. wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
  837. wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
  838. wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
  839. wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
  840. }
  841. NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
  842. /* Enable vblank interrupts. */
  843. NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
  844. (dev->vblank_enabled[head] ? 1 : 0));
  845. NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
  846. }
  847. static void
  848. nv_save_state_palette(struct drm_device *dev, int head,
  849. struct nv04_mode_state *state)
  850. {
  851. int head_offset = head * NV_PRMDIO_SIZE, i;
  852. nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
  853. NV_PRMDIO_PIXEL_MASK_MASK);
  854. nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
  855. for (i = 0; i < 768; i++) {
  856. state->crtc_reg[head].DAC[i] = nv_rd08(dev,
  857. NV_PRMDIO_PALETTE_DATA + head_offset);
  858. }
  859. NVSetEnablePalette(dev, head, false);
  860. }
  861. void
  862. nouveau_hw_load_state_palette(struct drm_device *dev, int head,
  863. struct nv04_mode_state *state)
  864. {
  865. int head_offset = head * NV_PRMDIO_SIZE, i;
  866. nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
  867. NV_PRMDIO_PIXEL_MASK_MASK);
  868. nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
  869. for (i = 0; i < 768; i++) {
  870. nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
  871. state->crtc_reg[head].DAC[i]);
  872. }
  873. NVSetEnablePalette(dev, head, false);
  874. }
  875. void nouveau_hw_save_state(struct drm_device *dev, int head,
  876. struct nv04_mode_state *state)
  877. {
  878. struct drm_nouveau_private *dev_priv = dev->dev_private;
  879. if (dev_priv->chipset == 0x11)
  880. /* NB: no attempt is made to restore the bad pll later on */
  881. nouveau_hw_fix_bad_vpll(dev, head);
  882. nv_save_state_ramdac(dev, head, state);
  883. nv_save_state_vga(dev, head, state);
  884. nv_save_state_palette(dev, head, state);
  885. nv_save_state_ext(dev, head, state);
  886. }
  887. void nouveau_hw_load_state(struct drm_device *dev, int head,
  888. struct nv04_mode_state *state)
  889. {
  890. NVVgaProtect(dev, head, true);
  891. nv_load_state_ramdac(dev, head, state);
  892. nv_load_state_ext(dev, head, state);
  893. nouveau_hw_load_state_palette(dev, head, state);
  894. nv_load_state_vga(dev, head, state);
  895. NVVgaProtect(dev, head, false);
  896. }