atom.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Stanislaw Skowronek
  23. */
  24. #include <linux/module.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <asm/unaligned.h>
  28. #define ATOM_DEBUG
  29. #include "atom.h"
  30. #include "atom-names.h"
  31. #include "atom-bits.h"
  32. #include "radeon.h"
  33. #define ATOM_COND_ABOVE 0
  34. #define ATOM_COND_ABOVEOREQUAL 1
  35. #define ATOM_COND_ALWAYS 2
  36. #define ATOM_COND_BELOW 3
  37. #define ATOM_COND_BELOWOREQUAL 4
  38. #define ATOM_COND_EQUAL 5
  39. #define ATOM_COND_NOTEQUAL 6
  40. #define ATOM_PORT_ATI 0
  41. #define ATOM_PORT_PCI 1
  42. #define ATOM_PORT_SYSIO 2
  43. #define ATOM_UNIT_MICROSEC 0
  44. #define ATOM_UNIT_MILLISEC 1
  45. #define PLL_INDEX 2
  46. #define PLL_DATA 3
  47. typedef struct {
  48. struct atom_context *ctx;
  49. uint32_t *ps, *ws;
  50. int ps_shift;
  51. uint16_t start;
  52. unsigned last_jump;
  53. unsigned long last_jump_jiffies;
  54. bool abort;
  55. } atom_exec_context;
  56. int atom_debug = 0;
  57. static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
  58. int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
  59. static uint32_t atom_arg_mask[8] = {
  60. 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
  61. 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
  62. };
  63. static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
  64. static int atom_dst_to_src[8][4] = {
  65. /* translate destination alignment field to the source alignment encoding */
  66. {0, 0, 0, 0},
  67. {1, 2, 3, 0},
  68. {1, 2, 3, 0},
  69. {1, 2, 3, 0},
  70. {4, 5, 6, 7},
  71. {4, 5, 6, 7},
  72. {4, 5, 6, 7},
  73. {4, 5, 6, 7},
  74. };
  75. static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
  76. static int debug_depth = 0;
  77. #ifdef ATOM_DEBUG
  78. static void debug_print_spaces(int n)
  79. {
  80. while (n--)
  81. printk(" ");
  82. }
  83. #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
  84. #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
  85. #else
  86. #define DEBUG(...) do { } while (0)
  87. #define SDEBUG(...) do { } while (0)
  88. #endif
  89. static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
  90. uint32_t index, uint32_t data)
  91. {
  92. struct radeon_device *rdev = ctx->card->dev->dev_private;
  93. uint32_t temp = 0xCDCDCDCD;
  94. while (1)
  95. switch (CU8(base)) {
  96. case ATOM_IIO_NOP:
  97. base++;
  98. break;
  99. case ATOM_IIO_READ:
  100. temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  101. base += 3;
  102. break;
  103. case ATOM_IIO_WRITE:
  104. if (rdev->family == CHIP_RV515)
  105. (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  106. ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
  107. base += 3;
  108. break;
  109. case ATOM_IIO_CLEAR:
  110. temp &=
  111. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  112. CU8(base + 2));
  113. base += 3;
  114. break;
  115. case ATOM_IIO_SET:
  116. temp |=
  117. (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
  118. 2);
  119. base += 3;
  120. break;
  121. case ATOM_IIO_MOVE_INDEX:
  122. temp &=
  123. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  124. CU8(base + 3));
  125. temp |=
  126. ((index >> CU8(base + 2)) &
  127. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  128. 3);
  129. base += 4;
  130. break;
  131. case ATOM_IIO_MOVE_DATA:
  132. temp &=
  133. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  134. CU8(base + 3));
  135. temp |=
  136. ((data >> CU8(base + 2)) &
  137. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  138. 3);
  139. base += 4;
  140. break;
  141. case ATOM_IIO_MOVE_ATTR:
  142. temp &=
  143. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  144. CU8(base + 3));
  145. temp |=
  146. ((ctx->
  147. io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
  148. CU8
  149. (base
  150. +
  151. 1))))
  152. << CU8(base + 3);
  153. base += 4;
  154. break;
  155. case ATOM_IIO_END:
  156. return temp;
  157. default:
  158. printk(KERN_INFO "Unknown IIO opcode.\n");
  159. return 0;
  160. }
  161. }
  162. static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
  163. int *ptr, uint32_t *saved, int print)
  164. {
  165. uint32_t idx, val = 0xCDCDCDCD, align, arg;
  166. struct atom_context *gctx = ctx->ctx;
  167. arg = attr & 7;
  168. align = (attr >> 3) & 7;
  169. switch (arg) {
  170. case ATOM_ARG_REG:
  171. idx = U16(*ptr);
  172. (*ptr) += 2;
  173. if (print)
  174. DEBUG("REG[0x%04X]", idx);
  175. idx += gctx->reg_block;
  176. switch (gctx->io_mode) {
  177. case ATOM_IO_MM:
  178. val = gctx->card->reg_read(gctx->card, idx);
  179. break;
  180. case ATOM_IO_PCI:
  181. printk(KERN_INFO
  182. "PCI registers are not implemented.\n");
  183. return 0;
  184. case ATOM_IO_SYSIO:
  185. printk(KERN_INFO
  186. "SYSIO registers are not implemented.\n");
  187. return 0;
  188. default:
  189. if (!(gctx->io_mode & 0x80)) {
  190. printk(KERN_INFO "Bad IO mode.\n");
  191. return 0;
  192. }
  193. if (!gctx->iio[gctx->io_mode & 0x7F]) {
  194. printk(KERN_INFO
  195. "Undefined indirect IO read method %d.\n",
  196. gctx->io_mode & 0x7F);
  197. return 0;
  198. }
  199. val =
  200. atom_iio_execute(gctx,
  201. gctx->iio[gctx->io_mode & 0x7F],
  202. idx, 0);
  203. }
  204. break;
  205. case ATOM_ARG_PS:
  206. idx = U8(*ptr);
  207. (*ptr)++;
  208. /* get_unaligned_le32 avoids unaligned accesses from atombios
  209. * tables, noticed on a DEC Alpha. */
  210. val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
  211. if (print)
  212. DEBUG("PS[0x%02X,0x%04X]", idx, val);
  213. break;
  214. case ATOM_ARG_WS:
  215. idx = U8(*ptr);
  216. (*ptr)++;
  217. if (print)
  218. DEBUG("WS[0x%02X]", idx);
  219. switch (idx) {
  220. case ATOM_WS_QUOTIENT:
  221. val = gctx->divmul[0];
  222. break;
  223. case ATOM_WS_REMAINDER:
  224. val = gctx->divmul[1];
  225. break;
  226. case ATOM_WS_DATAPTR:
  227. val = gctx->data_block;
  228. break;
  229. case ATOM_WS_SHIFT:
  230. val = gctx->shift;
  231. break;
  232. case ATOM_WS_OR_MASK:
  233. val = 1 << gctx->shift;
  234. break;
  235. case ATOM_WS_AND_MASK:
  236. val = ~(1 << gctx->shift);
  237. break;
  238. case ATOM_WS_FB_WINDOW:
  239. val = gctx->fb_base;
  240. break;
  241. case ATOM_WS_ATTRIBUTES:
  242. val = gctx->io_attr;
  243. break;
  244. case ATOM_WS_REGPTR:
  245. val = gctx->reg_block;
  246. break;
  247. default:
  248. val = ctx->ws[idx];
  249. }
  250. break;
  251. case ATOM_ARG_ID:
  252. idx = U16(*ptr);
  253. (*ptr) += 2;
  254. if (print) {
  255. if (gctx->data_block)
  256. DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
  257. else
  258. DEBUG("ID[0x%04X]", idx);
  259. }
  260. val = U32(idx + gctx->data_block);
  261. break;
  262. case ATOM_ARG_FB:
  263. idx = U8(*ptr);
  264. (*ptr)++;
  265. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  266. DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
  267. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  268. val = 0;
  269. } else
  270. val = gctx->scratch[(gctx->fb_base / 4) + idx];
  271. if (print)
  272. DEBUG("FB[0x%02X]", idx);
  273. break;
  274. case ATOM_ARG_IMM:
  275. switch (align) {
  276. case ATOM_SRC_DWORD:
  277. val = U32(*ptr);
  278. (*ptr) += 4;
  279. if (print)
  280. DEBUG("IMM 0x%08X\n", val);
  281. return val;
  282. case ATOM_SRC_WORD0:
  283. case ATOM_SRC_WORD8:
  284. case ATOM_SRC_WORD16:
  285. val = U16(*ptr);
  286. (*ptr) += 2;
  287. if (print)
  288. DEBUG("IMM 0x%04X\n", val);
  289. return val;
  290. case ATOM_SRC_BYTE0:
  291. case ATOM_SRC_BYTE8:
  292. case ATOM_SRC_BYTE16:
  293. case ATOM_SRC_BYTE24:
  294. val = U8(*ptr);
  295. (*ptr)++;
  296. if (print)
  297. DEBUG("IMM 0x%02X\n", val);
  298. return val;
  299. }
  300. return 0;
  301. case ATOM_ARG_PLL:
  302. idx = U8(*ptr);
  303. (*ptr)++;
  304. if (print)
  305. DEBUG("PLL[0x%02X]", idx);
  306. val = gctx->card->pll_read(gctx->card, idx);
  307. break;
  308. case ATOM_ARG_MC:
  309. idx = U8(*ptr);
  310. (*ptr)++;
  311. if (print)
  312. DEBUG("MC[0x%02X]", idx);
  313. val = gctx->card->mc_read(gctx->card, idx);
  314. break;
  315. }
  316. if (saved)
  317. *saved = val;
  318. val &= atom_arg_mask[align];
  319. val >>= atom_arg_shift[align];
  320. if (print)
  321. switch (align) {
  322. case ATOM_SRC_DWORD:
  323. DEBUG(".[31:0] -> 0x%08X\n", val);
  324. break;
  325. case ATOM_SRC_WORD0:
  326. DEBUG(".[15:0] -> 0x%04X\n", val);
  327. break;
  328. case ATOM_SRC_WORD8:
  329. DEBUG(".[23:8] -> 0x%04X\n", val);
  330. break;
  331. case ATOM_SRC_WORD16:
  332. DEBUG(".[31:16] -> 0x%04X\n", val);
  333. break;
  334. case ATOM_SRC_BYTE0:
  335. DEBUG(".[7:0] -> 0x%02X\n", val);
  336. break;
  337. case ATOM_SRC_BYTE8:
  338. DEBUG(".[15:8] -> 0x%02X\n", val);
  339. break;
  340. case ATOM_SRC_BYTE16:
  341. DEBUG(".[23:16] -> 0x%02X\n", val);
  342. break;
  343. case ATOM_SRC_BYTE24:
  344. DEBUG(".[31:24] -> 0x%02X\n", val);
  345. break;
  346. }
  347. return val;
  348. }
  349. static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
  350. {
  351. uint32_t align = (attr >> 3) & 7, arg = attr & 7;
  352. switch (arg) {
  353. case ATOM_ARG_REG:
  354. case ATOM_ARG_ID:
  355. (*ptr) += 2;
  356. break;
  357. case ATOM_ARG_PLL:
  358. case ATOM_ARG_MC:
  359. case ATOM_ARG_PS:
  360. case ATOM_ARG_WS:
  361. case ATOM_ARG_FB:
  362. (*ptr)++;
  363. break;
  364. case ATOM_ARG_IMM:
  365. switch (align) {
  366. case ATOM_SRC_DWORD:
  367. (*ptr) += 4;
  368. return;
  369. case ATOM_SRC_WORD0:
  370. case ATOM_SRC_WORD8:
  371. case ATOM_SRC_WORD16:
  372. (*ptr) += 2;
  373. return;
  374. case ATOM_SRC_BYTE0:
  375. case ATOM_SRC_BYTE8:
  376. case ATOM_SRC_BYTE16:
  377. case ATOM_SRC_BYTE24:
  378. (*ptr)++;
  379. return;
  380. }
  381. return;
  382. }
  383. }
  384. static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
  385. {
  386. return atom_get_src_int(ctx, attr, ptr, NULL, 1);
  387. }
  388. static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
  389. {
  390. uint32_t val = 0xCDCDCDCD;
  391. switch (align) {
  392. case ATOM_SRC_DWORD:
  393. val = U32(*ptr);
  394. (*ptr) += 4;
  395. break;
  396. case ATOM_SRC_WORD0:
  397. case ATOM_SRC_WORD8:
  398. case ATOM_SRC_WORD16:
  399. val = U16(*ptr);
  400. (*ptr) += 2;
  401. break;
  402. case ATOM_SRC_BYTE0:
  403. case ATOM_SRC_BYTE8:
  404. case ATOM_SRC_BYTE16:
  405. case ATOM_SRC_BYTE24:
  406. val = U8(*ptr);
  407. (*ptr)++;
  408. break;
  409. }
  410. return val;
  411. }
  412. static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  413. int *ptr, uint32_t *saved, int print)
  414. {
  415. return atom_get_src_int(ctx,
  416. arg | atom_dst_to_src[(attr >> 3) &
  417. 7][(attr >> 6) & 3] << 3,
  418. ptr, saved, print);
  419. }
  420. static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
  421. {
  422. atom_skip_src_int(ctx,
  423. arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
  424. 3] << 3, ptr);
  425. }
  426. static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  427. int *ptr, uint32_t val, uint32_t saved)
  428. {
  429. uint32_t align =
  430. atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
  431. val, idx;
  432. struct atom_context *gctx = ctx->ctx;
  433. old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
  434. val <<= atom_arg_shift[align];
  435. val &= atom_arg_mask[align];
  436. saved &= ~atom_arg_mask[align];
  437. val |= saved;
  438. switch (arg) {
  439. case ATOM_ARG_REG:
  440. idx = U16(*ptr);
  441. (*ptr) += 2;
  442. DEBUG("REG[0x%04X]", idx);
  443. idx += gctx->reg_block;
  444. switch (gctx->io_mode) {
  445. case ATOM_IO_MM:
  446. if (idx == 0)
  447. gctx->card->reg_write(gctx->card, idx,
  448. val << 2);
  449. else
  450. gctx->card->reg_write(gctx->card, idx, val);
  451. break;
  452. case ATOM_IO_PCI:
  453. printk(KERN_INFO
  454. "PCI registers are not implemented.\n");
  455. return;
  456. case ATOM_IO_SYSIO:
  457. printk(KERN_INFO
  458. "SYSIO registers are not implemented.\n");
  459. return;
  460. default:
  461. if (!(gctx->io_mode & 0x80)) {
  462. printk(KERN_INFO "Bad IO mode.\n");
  463. return;
  464. }
  465. if (!gctx->iio[gctx->io_mode & 0xFF]) {
  466. printk(KERN_INFO
  467. "Undefined indirect IO write method %d.\n",
  468. gctx->io_mode & 0x7F);
  469. return;
  470. }
  471. atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
  472. idx, val);
  473. }
  474. break;
  475. case ATOM_ARG_PS:
  476. idx = U8(*ptr);
  477. (*ptr)++;
  478. DEBUG("PS[0x%02X]", idx);
  479. ctx->ps[idx] = cpu_to_le32(val);
  480. break;
  481. case ATOM_ARG_WS:
  482. idx = U8(*ptr);
  483. (*ptr)++;
  484. DEBUG("WS[0x%02X]", idx);
  485. switch (idx) {
  486. case ATOM_WS_QUOTIENT:
  487. gctx->divmul[0] = val;
  488. break;
  489. case ATOM_WS_REMAINDER:
  490. gctx->divmul[1] = val;
  491. break;
  492. case ATOM_WS_DATAPTR:
  493. gctx->data_block = val;
  494. break;
  495. case ATOM_WS_SHIFT:
  496. gctx->shift = val;
  497. break;
  498. case ATOM_WS_OR_MASK:
  499. case ATOM_WS_AND_MASK:
  500. break;
  501. case ATOM_WS_FB_WINDOW:
  502. gctx->fb_base = val;
  503. break;
  504. case ATOM_WS_ATTRIBUTES:
  505. gctx->io_attr = val;
  506. break;
  507. case ATOM_WS_REGPTR:
  508. gctx->reg_block = val;
  509. break;
  510. default:
  511. ctx->ws[idx] = val;
  512. }
  513. break;
  514. case ATOM_ARG_FB:
  515. idx = U8(*ptr);
  516. (*ptr)++;
  517. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  518. DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
  519. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  520. } else
  521. gctx->scratch[(gctx->fb_base / 4) + idx] = val;
  522. DEBUG("FB[0x%02X]", idx);
  523. break;
  524. case ATOM_ARG_PLL:
  525. idx = U8(*ptr);
  526. (*ptr)++;
  527. DEBUG("PLL[0x%02X]", idx);
  528. gctx->card->pll_write(gctx->card, idx, val);
  529. break;
  530. case ATOM_ARG_MC:
  531. idx = U8(*ptr);
  532. (*ptr)++;
  533. DEBUG("MC[0x%02X]", idx);
  534. gctx->card->mc_write(gctx->card, idx, val);
  535. return;
  536. }
  537. switch (align) {
  538. case ATOM_SRC_DWORD:
  539. DEBUG(".[31:0] <- 0x%08X\n", old_val);
  540. break;
  541. case ATOM_SRC_WORD0:
  542. DEBUG(".[15:0] <- 0x%04X\n", old_val);
  543. break;
  544. case ATOM_SRC_WORD8:
  545. DEBUG(".[23:8] <- 0x%04X\n", old_val);
  546. break;
  547. case ATOM_SRC_WORD16:
  548. DEBUG(".[31:16] <- 0x%04X\n", old_val);
  549. break;
  550. case ATOM_SRC_BYTE0:
  551. DEBUG(".[7:0] <- 0x%02X\n", old_val);
  552. break;
  553. case ATOM_SRC_BYTE8:
  554. DEBUG(".[15:8] <- 0x%02X\n", old_val);
  555. break;
  556. case ATOM_SRC_BYTE16:
  557. DEBUG(".[23:16] <- 0x%02X\n", old_val);
  558. break;
  559. case ATOM_SRC_BYTE24:
  560. DEBUG(".[31:24] <- 0x%02X\n", old_val);
  561. break;
  562. }
  563. }
  564. static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
  565. {
  566. uint8_t attr = U8((*ptr)++);
  567. uint32_t dst, src, saved;
  568. int dptr = *ptr;
  569. SDEBUG(" dst: ");
  570. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  571. SDEBUG(" src: ");
  572. src = atom_get_src(ctx, attr, ptr);
  573. dst += src;
  574. SDEBUG(" dst: ");
  575. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  576. }
  577. static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
  578. {
  579. uint8_t attr = U8((*ptr)++);
  580. uint32_t dst, src, saved;
  581. int dptr = *ptr;
  582. SDEBUG(" dst: ");
  583. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  584. SDEBUG(" src: ");
  585. src = atom_get_src(ctx, attr, ptr);
  586. dst &= src;
  587. SDEBUG(" dst: ");
  588. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  589. }
  590. static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
  591. {
  592. printk("ATOM BIOS beeped!\n");
  593. }
  594. static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
  595. {
  596. int idx = U8((*ptr)++);
  597. int r = 0;
  598. if (idx < ATOM_TABLE_NAMES_CNT)
  599. SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
  600. else
  601. SDEBUG(" table: %d\n", idx);
  602. if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
  603. r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
  604. if (r) {
  605. ctx->abort = true;
  606. }
  607. }
  608. static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
  609. {
  610. uint8_t attr = U8((*ptr)++);
  611. uint32_t saved;
  612. int dptr = *ptr;
  613. attr &= 0x38;
  614. attr |= atom_def_dst[attr >> 3] << 6;
  615. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  616. SDEBUG(" dst: ");
  617. atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
  618. }
  619. static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
  620. {
  621. uint8_t attr = U8((*ptr)++);
  622. uint32_t dst, src;
  623. SDEBUG(" src1: ");
  624. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  625. SDEBUG(" src2: ");
  626. src = atom_get_src(ctx, attr, ptr);
  627. ctx->ctx->cs_equal = (dst == src);
  628. ctx->ctx->cs_above = (dst > src);
  629. SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
  630. ctx->ctx->cs_above ? "GT" : "LE");
  631. }
  632. static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
  633. {
  634. unsigned count = U8((*ptr)++);
  635. SDEBUG(" count: %d\n", count);
  636. if (arg == ATOM_UNIT_MICROSEC)
  637. udelay(count);
  638. else if (!drm_can_sleep())
  639. mdelay(count);
  640. else
  641. msleep(count);
  642. }
  643. static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
  644. {
  645. uint8_t attr = U8((*ptr)++);
  646. uint32_t dst, src;
  647. SDEBUG(" src1: ");
  648. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  649. SDEBUG(" src2: ");
  650. src = atom_get_src(ctx, attr, ptr);
  651. if (src != 0) {
  652. ctx->ctx->divmul[0] = dst / src;
  653. ctx->ctx->divmul[1] = dst % src;
  654. } else {
  655. ctx->ctx->divmul[0] = 0;
  656. ctx->ctx->divmul[1] = 0;
  657. }
  658. }
  659. static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
  660. {
  661. /* functionally, a nop */
  662. }
  663. static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
  664. {
  665. int execute = 0, target = U16(*ptr);
  666. unsigned long cjiffies;
  667. (*ptr) += 2;
  668. switch (arg) {
  669. case ATOM_COND_ABOVE:
  670. execute = ctx->ctx->cs_above;
  671. break;
  672. case ATOM_COND_ABOVEOREQUAL:
  673. execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
  674. break;
  675. case ATOM_COND_ALWAYS:
  676. execute = 1;
  677. break;
  678. case ATOM_COND_BELOW:
  679. execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
  680. break;
  681. case ATOM_COND_BELOWOREQUAL:
  682. execute = !ctx->ctx->cs_above;
  683. break;
  684. case ATOM_COND_EQUAL:
  685. execute = ctx->ctx->cs_equal;
  686. break;
  687. case ATOM_COND_NOTEQUAL:
  688. execute = !ctx->ctx->cs_equal;
  689. break;
  690. }
  691. if (arg != ATOM_COND_ALWAYS)
  692. SDEBUG(" taken: %s\n", execute ? "yes" : "no");
  693. SDEBUG(" target: 0x%04X\n", target);
  694. if (execute) {
  695. if (ctx->last_jump == (ctx->start + target)) {
  696. cjiffies = jiffies;
  697. if (time_after(cjiffies, ctx->last_jump_jiffies)) {
  698. cjiffies -= ctx->last_jump_jiffies;
  699. if ((jiffies_to_msecs(cjiffies) > 5000)) {
  700. DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
  701. ctx->abort = true;
  702. }
  703. } else {
  704. /* jiffies wrap around we will just wait a little longer */
  705. ctx->last_jump_jiffies = jiffies;
  706. }
  707. } else {
  708. ctx->last_jump = ctx->start + target;
  709. ctx->last_jump_jiffies = jiffies;
  710. }
  711. *ptr = ctx->start + target;
  712. }
  713. }
  714. static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
  715. {
  716. uint8_t attr = U8((*ptr)++);
  717. uint32_t dst, mask, src, saved;
  718. int dptr = *ptr;
  719. SDEBUG(" dst: ");
  720. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  721. mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
  722. SDEBUG(" mask: 0x%08x", mask);
  723. SDEBUG(" src: ");
  724. src = atom_get_src(ctx, attr, ptr);
  725. dst &= mask;
  726. dst |= src;
  727. SDEBUG(" dst: ");
  728. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  729. }
  730. static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
  731. {
  732. uint8_t attr = U8((*ptr)++);
  733. uint32_t src, saved;
  734. int dptr = *ptr;
  735. if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
  736. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  737. else {
  738. atom_skip_dst(ctx, arg, attr, ptr);
  739. saved = 0xCDCDCDCD;
  740. }
  741. SDEBUG(" src: ");
  742. src = atom_get_src(ctx, attr, ptr);
  743. SDEBUG(" dst: ");
  744. atom_put_dst(ctx, arg, attr, &dptr, src, saved);
  745. }
  746. static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
  747. {
  748. uint8_t attr = U8((*ptr)++);
  749. uint32_t dst, src;
  750. SDEBUG(" src1: ");
  751. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  752. SDEBUG(" src2: ");
  753. src = atom_get_src(ctx, attr, ptr);
  754. ctx->ctx->divmul[0] = dst * src;
  755. }
  756. static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
  757. {
  758. /* nothing */
  759. }
  760. static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
  761. {
  762. uint8_t attr = U8((*ptr)++);
  763. uint32_t dst, src, saved;
  764. int dptr = *ptr;
  765. SDEBUG(" dst: ");
  766. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  767. SDEBUG(" src: ");
  768. src = atom_get_src(ctx, attr, ptr);
  769. dst |= src;
  770. SDEBUG(" dst: ");
  771. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  772. }
  773. static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
  774. {
  775. uint8_t val = U8((*ptr)++);
  776. SDEBUG("POST card output: 0x%02X\n", val);
  777. }
  778. static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
  779. {
  780. printk(KERN_INFO "unimplemented!\n");
  781. }
  782. static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
  783. {
  784. printk(KERN_INFO "unimplemented!\n");
  785. }
  786. static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
  787. {
  788. printk(KERN_INFO "unimplemented!\n");
  789. }
  790. static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
  791. {
  792. int idx = U8(*ptr);
  793. (*ptr)++;
  794. SDEBUG(" block: %d\n", idx);
  795. if (!idx)
  796. ctx->ctx->data_block = 0;
  797. else if (idx == 255)
  798. ctx->ctx->data_block = ctx->start;
  799. else
  800. ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
  801. SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
  802. }
  803. static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
  804. {
  805. uint8_t attr = U8((*ptr)++);
  806. SDEBUG(" fb_base: ");
  807. ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
  808. }
  809. static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
  810. {
  811. int port;
  812. switch (arg) {
  813. case ATOM_PORT_ATI:
  814. port = U16(*ptr);
  815. if (port < ATOM_IO_NAMES_CNT)
  816. SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
  817. else
  818. SDEBUG(" port: %d\n", port);
  819. if (!port)
  820. ctx->ctx->io_mode = ATOM_IO_MM;
  821. else
  822. ctx->ctx->io_mode = ATOM_IO_IIO | port;
  823. (*ptr) += 2;
  824. break;
  825. case ATOM_PORT_PCI:
  826. ctx->ctx->io_mode = ATOM_IO_PCI;
  827. (*ptr)++;
  828. break;
  829. case ATOM_PORT_SYSIO:
  830. ctx->ctx->io_mode = ATOM_IO_SYSIO;
  831. (*ptr)++;
  832. break;
  833. }
  834. }
  835. static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
  836. {
  837. ctx->ctx->reg_block = U16(*ptr);
  838. (*ptr) += 2;
  839. SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
  840. }
  841. static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
  842. {
  843. uint8_t attr = U8((*ptr)++), shift;
  844. uint32_t saved, dst;
  845. int dptr = *ptr;
  846. attr &= 0x38;
  847. attr |= atom_def_dst[attr >> 3] << 6;
  848. SDEBUG(" dst: ");
  849. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  850. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  851. SDEBUG(" shift: %d\n", shift);
  852. dst <<= shift;
  853. SDEBUG(" dst: ");
  854. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  855. }
  856. static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
  857. {
  858. uint8_t attr = U8((*ptr)++), shift;
  859. uint32_t saved, dst;
  860. int dptr = *ptr;
  861. attr &= 0x38;
  862. attr |= atom_def_dst[attr >> 3] << 6;
  863. SDEBUG(" dst: ");
  864. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  865. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  866. SDEBUG(" shift: %d\n", shift);
  867. dst >>= shift;
  868. SDEBUG(" dst: ");
  869. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  870. }
  871. static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
  872. {
  873. uint8_t attr = U8((*ptr)++), shift;
  874. uint32_t saved, dst;
  875. int dptr = *ptr;
  876. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  877. SDEBUG(" dst: ");
  878. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  879. /* op needs to full dst value */
  880. dst = saved;
  881. shift = atom_get_src(ctx, attr, ptr);
  882. SDEBUG(" shift: %d\n", shift);
  883. dst <<= shift;
  884. dst &= atom_arg_mask[dst_align];
  885. dst >>= atom_arg_shift[dst_align];
  886. SDEBUG(" dst: ");
  887. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  888. }
  889. static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
  890. {
  891. uint8_t attr = U8((*ptr)++), shift;
  892. uint32_t saved, dst;
  893. int dptr = *ptr;
  894. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  895. SDEBUG(" dst: ");
  896. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  897. /* op needs to full dst value */
  898. dst = saved;
  899. shift = atom_get_src(ctx, attr, ptr);
  900. SDEBUG(" shift: %d\n", shift);
  901. dst >>= shift;
  902. dst &= atom_arg_mask[dst_align];
  903. dst >>= atom_arg_shift[dst_align];
  904. SDEBUG(" dst: ");
  905. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  906. }
  907. static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
  908. {
  909. uint8_t attr = U8((*ptr)++);
  910. uint32_t dst, src, saved;
  911. int dptr = *ptr;
  912. SDEBUG(" dst: ");
  913. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  914. SDEBUG(" src: ");
  915. src = atom_get_src(ctx, attr, ptr);
  916. dst -= src;
  917. SDEBUG(" dst: ");
  918. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  919. }
  920. static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
  921. {
  922. uint8_t attr = U8((*ptr)++);
  923. uint32_t src, val, target;
  924. SDEBUG(" switch: ");
  925. src = atom_get_src(ctx, attr, ptr);
  926. while (U16(*ptr) != ATOM_CASE_END)
  927. if (U8(*ptr) == ATOM_CASE_MAGIC) {
  928. (*ptr)++;
  929. SDEBUG(" case: ");
  930. val =
  931. atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
  932. ptr);
  933. target = U16(*ptr);
  934. if (val == src) {
  935. SDEBUG(" target: %04X\n", target);
  936. *ptr = ctx->start + target;
  937. return;
  938. }
  939. (*ptr) += 2;
  940. } else {
  941. printk(KERN_INFO "Bad case.\n");
  942. return;
  943. }
  944. (*ptr) += 2;
  945. }
  946. static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
  947. {
  948. uint8_t attr = U8((*ptr)++);
  949. uint32_t dst, src;
  950. SDEBUG(" src1: ");
  951. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  952. SDEBUG(" src2: ");
  953. src = atom_get_src(ctx, attr, ptr);
  954. ctx->ctx->cs_equal = ((dst & src) == 0);
  955. SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
  956. }
  957. static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
  958. {
  959. uint8_t attr = U8((*ptr)++);
  960. uint32_t dst, src, saved;
  961. int dptr = *ptr;
  962. SDEBUG(" dst: ");
  963. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  964. SDEBUG(" src: ");
  965. src = atom_get_src(ctx, attr, ptr);
  966. dst ^= src;
  967. SDEBUG(" dst: ");
  968. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  969. }
  970. static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
  971. {
  972. printk(KERN_INFO "unimplemented!\n");
  973. }
  974. static struct {
  975. void (*func) (atom_exec_context *, int *, int);
  976. int arg;
  977. } opcode_table[ATOM_OP_CNT] = {
  978. {
  979. NULL, 0}, {
  980. atom_op_move, ATOM_ARG_REG}, {
  981. atom_op_move, ATOM_ARG_PS}, {
  982. atom_op_move, ATOM_ARG_WS}, {
  983. atom_op_move, ATOM_ARG_FB}, {
  984. atom_op_move, ATOM_ARG_PLL}, {
  985. atom_op_move, ATOM_ARG_MC}, {
  986. atom_op_and, ATOM_ARG_REG}, {
  987. atom_op_and, ATOM_ARG_PS}, {
  988. atom_op_and, ATOM_ARG_WS}, {
  989. atom_op_and, ATOM_ARG_FB}, {
  990. atom_op_and, ATOM_ARG_PLL}, {
  991. atom_op_and, ATOM_ARG_MC}, {
  992. atom_op_or, ATOM_ARG_REG}, {
  993. atom_op_or, ATOM_ARG_PS}, {
  994. atom_op_or, ATOM_ARG_WS}, {
  995. atom_op_or, ATOM_ARG_FB}, {
  996. atom_op_or, ATOM_ARG_PLL}, {
  997. atom_op_or, ATOM_ARG_MC}, {
  998. atom_op_shift_left, ATOM_ARG_REG}, {
  999. atom_op_shift_left, ATOM_ARG_PS}, {
  1000. atom_op_shift_left, ATOM_ARG_WS}, {
  1001. atom_op_shift_left, ATOM_ARG_FB}, {
  1002. atom_op_shift_left, ATOM_ARG_PLL}, {
  1003. atom_op_shift_left, ATOM_ARG_MC}, {
  1004. atom_op_shift_right, ATOM_ARG_REG}, {
  1005. atom_op_shift_right, ATOM_ARG_PS}, {
  1006. atom_op_shift_right, ATOM_ARG_WS}, {
  1007. atom_op_shift_right, ATOM_ARG_FB}, {
  1008. atom_op_shift_right, ATOM_ARG_PLL}, {
  1009. atom_op_shift_right, ATOM_ARG_MC}, {
  1010. atom_op_mul, ATOM_ARG_REG}, {
  1011. atom_op_mul, ATOM_ARG_PS}, {
  1012. atom_op_mul, ATOM_ARG_WS}, {
  1013. atom_op_mul, ATOM_ARG_FB}, {
  1014. atom_op_mul, ATOM_ARG_PLL}, {
  1015. atom_op_mul, ATOM_ARG_MC}, {
  1016. atom_op_div, ATOM_ARG_REG}, {
  1017. atom_op_div, ATOM_ARG_PS}, {
  1018. atom_op_div, ATOM_ARG_WS}, {
  1019. atom_op_div, ATOM_ARG_FB}, {
  1020. atom_op_div, ATOM_ARG_PLL}, {
  1021. atom_op_div, ATOM_ARG_MC}, {
  1022. atom_op_add, ATOM_ARG_REG}, {
  1023. atom_op_add, ATOM_ARG_PS}, {
  1024. atom_op_add, ATOM_ARG_WS}, {
  1025. atom_op_add, ATOM_ARG_FB}, {
  1026. atom_op_add, ATOM_ARG_PLL}, {
  1027. atom_op_add, ATOM_ARG_MC}, {
  1028. atom_op_sub, ATOM_ARG_REG}, {
  1029. atom_op_sub, ATOM_ARG_PS}, {
  1030. atom_op_sub, ATOM_ARG_WS}, {
  1031. atom_op_sub, ATOM_ARG_FB}, {
  1032. atom_op_sub, ATOM_ARG_PLL}, {
  1033. atom_op_sub, ATOM_ARG_MC}, {
  1034. atom_op_setport, ATOM_PORT_ATI}, {
  1035. atom_op_setport, ATOM_PORT_PCI}, {
  1036. atom_op_setport, ATOM_PORT_SYSIO}, {
  1037. atom_op_setregblock, 0}, {
  1038. atom_op_setfbbase, 0}, {
  1039. atom_op_compare, ATOM_ARG_REG}, {
  1040. atom_op_compare, ATOM_ARG_PS}, {
  1041. atom_op_compare, ATOM_ARG_WS}, {
  1042. atom_op_compare, ATOM_ARG_FB}, {
  1043. atom_op_compare, ATOM_ARG_PLL}, {
  1044. atom_op_compare, ATOM_ARG_MC}, {
  1045. atom_op_switch, 0}, {
  1046. atom_op_jump, ATOM_COND_ALWAYS}, {
  1047. atom_op_jump, ATOM_COND_EQUAL}, {
  1048. atom_op_jump, ATOM_COND_BELOW}, {
  1049. atom_op_jump, ATOM_COND_ABOVE}, {
  1050. atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
  1051. atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
  1052. atom_op_jump, ATOM_COND_NOTEQUAL}, {
  1053. atom_op_test, ATOM_ARG_REG}, {
  1054. atom_op_test, ATOM_ARG_PS}, {
  1055. atom_op_test, ATOM_ARG_WS}, {
  1056. atom_op_test, ATOM_ARG_FB}, {
  1057. atom_op_test, ATOM_ARG_PLL}, {
  1058. atom_op_test, ATOM_ARG_MC}, {
  1059. atom_op_delay, ATOM_UNIT_MILLISEC}, {
  1060. atom_op_delay, ATOM_UNIT_MICROSEC}, {
  1061. atom_op_calltable, 0}, {
  1062. atom_op_repeat, 0}, {
  1063. atom_op_clear, ATOM_ARG_REG}, {
  1064. atom_op_clear, ATOM_ARG_PS}, {
  1065. atom_op_clear, ATOM_ARG_WS}, {
  1066. atom_op_clear, ATOM_ARG_FB}, {
  1067. atom_op_clear, ATOM_ARG_PLL}, {
  1068. atom_op_clear, ATOM_ARG_MC}, {
  1069. atom_op_nop, 0}, {
  1070. atom_op_eot, 0}, {
  1071. atom_op_mask, ATOM_ARG_REG}, {
  1072. atom_op_mask, ATOM_ARG_PS}, {
  1073. atom_op_mask, ATOM_ARG_WS}, {
  1074. atom_op_mask, ATOM_ARG_FB}, {
  1075. atom_op_mask, ATOM_ARG_PLL}, {
  1076. atom_op_mask, ATOM_ARG_MC}, {
  1077. atom_op_postcard, 0}, {
  1078. atom_op_beep, 0}, {
  1079. atom_op_savereg, 0}, {
  1080. atom_op_restorereg, 0}, {
  1081. atom_op_setdatablock, 0}, {
  1082. atom_op_xor, ATOM_ARG_REG}, {
  1083. atom_op_xor, ATOM_ARG_PS}, {
  1084. atom_op_xor, ATOM_ARG_WS}, {
  1085. atom_op_xor, ATOM_ARG_FB}, {
  1086. atom_op_xor, ATOM_ARG_PLL}, {
  1087. atom_op_xor, ATOM_ARG_MC}, {
  1088. atom_op_shl, ATOM_ARG_REG}, {
  1089. atom_op_shl, ATOM_ARG_PS}, {
  1090. atom_op_shl, ATOM_ARG_WS}, {
  1091. atom_op_shl, ATOM_ARG_FB}, {
  1092. atom_op_shl, ATOM_ARG_PLL}, {
  1093. atom_op_shl, ATOM_ARG_MC}, {
  1094. atom_op_shr, ATOM_ARG_REG}, {
  1095. atom_op_shr, ATOM_ARG_PS}, {
  1096. atom_op_shr, ATOM_ARG_WS}, {
  1097. atom_op_shr, ATOM_ARG_FB}, {
  1098. atom_op_shr, ATOM_ARG_PLL}, {
  1099. atom_op_shr, ATOM_ARG_MC}, {
  1100. atom_op_debug, 0},};
  1101. static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
  1102. {
  1103. int base = CU16(ctx->cmd_table + 4 + 2 * index);
  1104. int len, ws, ps, ptr;
  1105. unsigned char op;
  1106. atom_exec_context ectx;
  1107. int ret = 0;
  1108. if (!base)
  1109. return -EINVAL;
  1110. len = CU16(base + ATOM_CT_SIZE_PTR);
  1111. ws = CU8(base + ATOM_CT_WS_PTR);
  1112. ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
  1113. ptr = base + ATOM_CT_CODE_PTR;
  1114. SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
  1115. ectx.ctx = ctx;
  1116. ectx.ps_shift = ps / 4;
  1117. ectx.start = base;
  1118. ectx.ps = params;
  1119. ectx.abort = false;
  1120. ectx.last_jump = 0;
  1121. if (ws)
  1122. ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
  1123. else
  1124. ectx.ws = NULL;
  1125. debug_depth++;
  1126. while (1) {
  1127. op = CU8(ptr++);
  1128. if (op < ATOM_OP_NAMES_CNT)
  1129. SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
  1130. else
  1131. SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
  1132. if (ectx.abort) {
  1133. DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
  1134. base, len, ws, ps, ptr - 1);
  1135. ret = -EINVAL;
  1136. goto free;
  1137. }
  1138. if (op < ATOM_OP_CNT && op > 0)
  1139. opcode_table[op].func(&ectx, &ptr,
  1140. opcode_table[op].arg);
  1141. else
  1142. break;
  1143. if (op == ATOM_OP_EOT)
  1144. break;
  1145. }
  1146. debug_depth--;
  1147. SDEBUG("<<\n");
  1148. free:
  1149. if (ws)
  1150. kfree(ectx.ws);
  1151. return ret;
  1152. }
  1153. int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
  1154. {
  1155. int r;
  1156. mutex_lock(&ctx->mutex);
  1157. /* reset data block */
  1158. ctx->data_block = 0;
  1159. /* reset reg block */
  1160. ctx->reg_block = 0;
  1161. /* reset fb window */
  1162. ctx->fb_base = 0;
  1163. /* reset io mode */
  1164. ctx->io_mode = ATOM_IO_MM;
  1165. /* reset divmul */
  1166. ctx->divmul[0] = 0;
  1167. ctx->divmul[1] = 0;
  1168. r = atom_execute_table_locked(ctx, index, params);
  1169. mutex_unlock(&ctx->mutex);
  1170. return r;
  1171. }
  1172. int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
  1173. {
  1174. int r;
  1175. mutex_lock(&ctx->scratch_mutex);
  1176. r = atom_execute_table_scratch_unlocked(ctx, index, params);
  1177. mutex_unlock(&ctx->scratch_mutex);
  1178. return r;
  1179. }
  1180. static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
  1181. static void atom_index_iio(struct atom_context *ctx, int base)
  1182. {
  1183. ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
  1184. if (!ctx->iio)
  1185. return;
  1186. while (CU8(base) == ATOM_IIO_START) {
  1187. ctx->iio[CU8(base + 1)] = base + 2;
  1188. base += 2;
  1189. while (CU8(base) != ATOM_IIO_END)
  1190. base += atom_iio_len[CU8(base)];
  1191. base += 3;
  1192. }
  1193. }
  1194. struct atom_context *atom_parse(struct card_info *card, void *bios)
  1195. {
  1196. int base;
  1197. struct atom_context *ctx =
  1198. kzalloc(sizeof(struct atom_context), GFP_KERNEL);
  1199. char *str;
  1200. char name[512];
  1201. int i;
  1202. if (!ctx)
  1203. return NULL;
  1204. ctx->card = card;
  1205. ctx->bios = bios;
  1206. if (CU16(0) != ATOM_BIOS_MAGIC) {
  1207. printk(KERN_INFO "Invalid BIOS magic.\n");
  1208. kfree(ctx);
  1209. return NULL;
  1210. }
  1211. if (strncmp
  1212. (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
  1213. strlen(ATOM_ATI_MAGIC))) {
  1214. printk(KERN_INFO "Invalid ATI magic.\n");
  1215. kfree(ctx);
  1216. return NULL;
  1217. }
  1218. base = CU16(ATOM_ROM_TABLE_PTR);
  1219. if (strncmp
  1220. (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
  1221. strlen(ATOM_ROM_MAGIC))) {
  1222. printk(KERN_INFO "Invalid ATOM magic.\n");
  1223. kfree(ctx);
  1224. return NULL;
  1225. }
  1226. ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
  1227. ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
  1228. atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
  1229. if (!ctx->iio) {
  1230. atom_destroy(ctx);
  1231. return NULL;
  1232. }
  1233. str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
  1234. while (*str && ((*str == '\n') || (*str == '\r')))
  1235. str++;
  1236. /* name string isn't always 0 terminated */
  1237. for (i = 0; i < 511; i++) {
  1238. name[i] = str[i];
  1239. if (name[i] < '.' || name[i] > 'z') {
  1240. name[i] = 0;
  1241. break;
  1242. }
  1243. }
  1244. printk(KERN_INFO "ATOM BIOS: %s\n", name);
  1245. return ctx;
  1246. }
  1247. int atom_asic_init(struct atom_context *ctx)
  1248. {
  1249. struct radeon_device *rdev = ctx->card->dev->dev_private;
  1250. int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
  1251. uint32_t ps[16];
  1252. int ret;
  1253. memset(ps, 0, 64);
  1254. ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
  1255. ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
  1256. if (!ps[0] || !ps[1])
  1257. return 1;
  1258. if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
  1259. return 1;
  1260. ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
  1261. if (ret)
  1262. return ret;
  1263. memset(ps, 0, 64);
  1264. if (rdev->family < CHIP_R600) {
  1265. if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
  1266. atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
  1267. }
  1268. return ret;
  1269. }
  1270. void atom_destroy(struct atom_context *ctx)
  1271. {
  1272. kfree(ctx->iio);
  1273. kfree(ctx);
  1274. }
  1275. bool atom_parse_data_header(struct atom_context *ctx, int index,
  1276. uint16_t * size, uint8_t * frev, uint8_t * crev,
  1277. uint16_t * data_start)
  1278. {
  1279. int offset = index * 2 + 4;
  1280. int idx = CU16(ctx->data_table + offset);
  1281. u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
  1282. if (!mdt[index])
  1283. return false;
  1284. if (size)
  1285. *size = CU16(idx);
  1286. if (frev)
  1287. *frev = CU8(idx + 2);
  1288. if (crev)
  1289. *crev = CU8(idx + 3);
  1290. *data_start = idx;
  1291. return true;
  1292. }
  1293. bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
  1294. uint8_t * crev)
  1295. {
  1296. int offset = index * 2 + 4;
  1297. int idx = CU16(ctx->cmd_table + offset);
  1298. u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
  1299. if (!mct[index])
  1300. return false;
  1301. if (frev)
  1302. *frev = CU8(idx + 2);
  1303. if (crev)
  1304. *crev = CU8(idx + 3);
  1305. return true;
  1306. }
  1307. int atom_allocate_fb_scratch(struct atom_context *ctx)
  1308. {
  1309. int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
  1310. uint16_t data_offset;
  1311. int usage_bytes = 0;
  1312. struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
  1313. if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
  1314. firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
  1315. DRM_DEBUG("atom firmware requested %08x %dkb\n",
  1316. le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
  1317. le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
  1318. usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
  1319. }
  1320. ctx->scratch_size_bytes = 0;
  1321. if (usage_bytes == 0)
  1322. usage_bytes = 20 * 1024;
  1323. /* allocate some scratch memory */
  1324. ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
  1325. if (!ctx->scratch)
  1326. return -ENOMEM;
  1327. ctx->scratch_size_bytes = usage_bytes;
  1328. return 0;
  1329. }