atom.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Stanislaw Skowronek
  23. */
  24. #include <linux/module.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <asm/unaligned.h>
  28. #define ATOM_DEBUG
  29. #include "atom.h"
  30. #include "atom-names.h"
  31. #include "atom-bits.h"
  32. #include "amdgpu.h"
  33. #define ATOM_COND_ABOVE 0
  34. #define ATOM_COND_ABOVEOREQUAL 1
  35. #define ATOM_COND_ALWAYS 2
  36. #define ATOM_COND_BELOW 3
  37. #define ATOM_COND_BELOWOREQUAL 4
  38. #define ATOM_COND_EQUAL 5
  39. #define ATOM_COND_NOTEQUAL 6
  40. #define ATOM_PORT_ATI 0
  41. #define ATOM_PORT_PCI 1
  42. #define ATOM_PORT_SYSIO 2
  43. #define ATOM_UNIT_MICROSEC 0
  44. #define ATOM_UNIT_MILLISEC 1
  45. #define PLL_INDEX 2
  46. #define PLL_DATA 3
  47. typedef struct {
  48. struct atom_context *ctx;
  49. uint32_t *ps, *ws;
  50. int ps_shift;
  51. uint16_t start;
  52. unsigned last_jump;
  53. unsigned long last_jump_jiffies;
  54. bool abort;
  55. } atom_exec_context;
  56. int amdgpu_atom_debug = 0;
  57. static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
  58. int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
  59. static uint32_t atom_arg_mask[8] =
  60. { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
  61. 0xFF000000 };
  62. static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
  63. static int atom_dst_to_src[8][4] = {
  64. /* translate destination alignment field to the source alignment encoding */
  65. {0, 0, 0, 0},
  66. {1, 2, 3, 0},
  67. {1, 2, 3, 0},
  68. {1, 2, 3, 0},
  69. {4, 5, 6, 7},
  70. {4, 5, 6, 7},
  71. {4, 5, 6, 7},
  72. {4, 5, 6, 7},
  73. };
  74. static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
  75. static int debug_depth = 0;
  76. #ifdef ATOM_DEBUG
  77. static void debug_print_spaces(int n)
  78. {
  79. while (n--)
  80. printk(" ");
  81. }
  82. #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
  83. #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
  84. #else
  85. #define DEBUG(...) do { } while (0)
  86. #define SDEBUG(...) do { } while (0)
  87. #endif
  88. static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
  89. uint32_t index, uint32_t data)
  90. {
  91. uint32_t temp = 0xCDCDCDCD;
  92. while (1)
  93. switch (CU8(base)) {
  94. case ATOM_IIO_NOP:
  95. base++;
  96. break;
  97. case ATOM_IIO_READ:
  98. temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  99. base += 3;
  100. break;
  101. case ATOM_IIO_WRITE:
  102. ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
  103. base += 3;
  104. break;
  105. case ATOM_IIO_CLEAR:
  106. temp &=
  107. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  108. CU8(base + 2));
  109. base += 3;
  110. break;
  111. case ATOM_IIO_SET:
  112. temp |=
  113. (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
  114. 2);
  115. base += 3;
  116. break;
  117. case ATOM_IIO_MOVE_INDEX:
  118. temp &=
  119. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  120. CU8(base + 3));
  121. temp |=
  122. ((index >> CU8(base + 2)) &
  123. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  124. 3);
  125. base += 4;
  126. break;
  127. case ATOM_IIO_MOVE_DATA:
  128. temp &=
  129. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  130. CU8(base + 3));
  131. temp |=
  132. ((data >> CU8(base + 2)) &
  133. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  134. 3);
  135. base += 4;
  136. break;
  137. case ATOM_IIO_MOVE_ATTR:
  138. temp &=
  139. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  140. CU8(base + 3));
  141. temp |=
  142. ((ctx->
  143. io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
  144. CU8
  145. (base
  146. +
  147. 1))))
  148. << CU8(base + 3);
  149. base += 4;
  150. break;
  151. case ATOM_IIO_END:
  152. return temp;
  153. default:
  154. pr_info("Unknown IIO opcode\n");
  155. return 0;
  156. }
  157. }
  158. static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
  159. int *ptr, uint32_t *saved, int print)
  160. {
  161. uint32_t idx, val = 0xCDCDCDCD, align, arg;
  162. struct atom_context *gctx = ctx->ctx;
  163. arg = attr & 7;
  164. align = (attr >> 3) & 7;
  165. switch (arg) {
  166. case ATOM_ARG_REG:
  167. idx = U16(*ptr);
  168. (*ptr) += 2;
  169. if (print)
  170. DEBUG("REG[0x%04X]", idx);
  171. idx += gctx->reg_block;
  172. switch (gctx->io_mode) {
  173. case ATOM_IO_MM:
  174. val = gctx->card->reg_read(gctx->card, idx);
  175. break;
  176. case ATOM_IO_PCI:
  177. pr_info("PCI registers are not implemented\n");
  178. return 0;
  179. case ATOM_IO_SYSIO:
  180. pr_info("SYSIO registers are not implemented\n");
  181. return 0;
  182. default:
  183. if (!(gctx->io_mode & 0x80)) {
  184. pr_info("Bad IO mode\n");
  185. return 0;
  186. }
  187. if (!gctx->iio[gctx->io_mode & 0x7F]) {
  188. pr_info("Undefined indirect IO read method %d\n",
  189. gctx->io_mode & 0x7F);
  190. return 0;
  191. }
  192. val =
  193. atom_iio_execute(gctx,
  194. gctx->iio[gctx->io_mode & 0x7F],
  195. idx, 0);
  196. }
  197. break;
  198. case ATOM_ARG_PS:
  199. idx = U8(*ptr);
  200. (*ptr)++;
  201. /* get_unaligned_le32 avoids unaligned accesses from atombios
  202. * tables, noticed on a DEC Alpha. */
  203. val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
  204. if (print)
  205. DEBUG("PS[0x%02X,0x%04X]", idx, val);
  206. break;
  207. case ATOM_ARG_WS:
  208. idx = U8(*ptr);
  209. (*ptr)++;
  210. if (print)
  211. DEBUG("WS[0x%02X]", idx);
  212. switch (idx) {
  213. case ATOM_WS_QUOTIENT:
  214. val = gctx->divmul[0];
  215. break;
  216. case ATOM_WS_REMAINDER:
  217. val = gctx->divmul[1];
  218. break;
  219. case ATOM_WS_DATAPTR:
  220. val = gctx->data_block;
  221. break;
  222. case ATOM_WS_SHIFT:
  223. val = gctx->shift;
  224. break;
  225. case ATOM_WS_OR_MASK:
  226. val = 1 << gctx->shift;
  227. break;
  228. case ATOM_WS_AND_MASK:
  229. val = ~(1 << gctx->shift);
  230. break;
  231. case ATOM_WS_FB_WINDOW:
  232. val = gctx->fb_base;
  233. break;
  234. case ATOM_WS_ATTRIBUTES:
  235. val = gctx->io_attr;
  236. break;
  237. case ATOM_WS_REGPTR:
  238. val = gctx->reg_block;
  239. break;
  240. default:
  241. val = ctx->ws[idx];
  242. }
  243. break;
  244. case ATOM_ARG_ID:
  245. idx = U16(*ptr);
  246. (*ptr) += 2;
  247. if (print) {
  248. if (gctx->data_block)
  249. DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
  250. else
  251. DEBUG("ID[0x%04X]", idx);
  252. }
  253. val = U32(idx + gctx->data_block);
  254. break;
  255. case ATOM_ARG_FB:
  256. idx = U8(*ptr);
  257. (*ptr)++;
  258. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  259. DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
  260. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  261. val = 0;
  262. } else
  263. val = gctx->scratch[(gctx->fb_base / 4) + idx];
  264. if (print)
  265. DEBUG("FB[0x%02X]", idx);
  266. break;
  267. case ATOM_ARG_IMM:
  268. switch (align) {
  269. case ATOM_SRC_DWORD:
  270. val = U32(*ptr);
  271. (*ptr) += 4;
  272. if (print)
  273. DEBUG("IMM 0x%08X\n", val);
  274. return val;
  275. case ATOM_SRC_WORD0:
  276. case ATOM_SRC_WORD8:
  277. case ATOM_SRC_WORD16:
  278. val = U16(*ptr);
  279. (*ptr) += 2;
  280. if (print)
  281. DEBUG("IMM 0x%04X\n", val);
  282. return val;
  283. case ATOM_SRC_BYTE0:
  284. case ATOM_SRC_BYTE8:
  285. case ATOM_SRC_BYTE16:
  286. case ATOM_SRC_BYTE24:
  287. val = U8(*ptr);
  288. (*ptr)++;
  289. if (print)
  290. DEBUG("IMM 0x%02X\n", val);
  291. return val;
  292. }
  293. return 0;
  294. case ATOM_ARG_PLL:
  295. idx = U8(*ptr);
  296. (*ptr)++;
  297. if (print)
  298. DEBUG("PLL[0x%02X]", idx);
  299. val = gctx->card->pll_read(gctx->card, idx);
  300. break;
  301. case ATOM_ARG_MC:
  302. idx = U8(*ptr);
  303. (*ptr)++;
  304. if (print)
  305. DEBUG("MC[0x%02X]", idx);
  306. val = gctx->card->mc_read(gctx->card, idx);
  307. break;
  308. }
  309. if (saved)
  310. *saved = val;
  311. val &= atom_arg_mask[align];
  312. val >>= atom_arg_shift[align];
  313. if (print)
  314. switch (align) {
  315. case ATOM_SRC_DWORD:
  316. DEBUG(".[31:0] -> 0x%08X\n", val);
  317. break;
  318. case ATOM_SRC_WORD0:
  319. DEBUG(".[15:0] -> 0x%04X\n", val);
  320. break;
  321. case ATOM_SRC_WORD8:
  322. DEBUG(".[23:8] -> 0x%04X\n", val);
  323. break;
  324. case ATOM_SRC_WORD16:
  325. DEBUG(".[31:16] -> 0x%04X\n", val);
  326. break;
  327. case ATOM_SRC_BYTE0:
  328. DEBUG(".[7:0] -> 0x%02X\n", val);
  329. break;
  330. case ATOM_SRC_BYTE8:
  331. DEBUG(".[15:8] -> 0x%02X\n", val);
  332. break;
  333. case ATOM_SRC_BYTE16:
  334. DEBUG(".[23:16] -> 0x%02X\n", val);
  335. break;
  336. case ATOM_SRC_BYTE24:
  337. DEBUG(".[31:24] -> 0x%02X\n", val);
  338. break;
  339. }
  340. return val;
  341. }
  342. static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
  343. {
  344. uint32_t align = (attr >> 3) & 7, arg = attr & 7;
  345. switch (arg) {
  346. case ATOM_ARG_REG:
  347. case ATOM_ARG_ID:
  348. (*ptr) += 2;
  349. break;
  350. case ATOM_ARG_PLL:
  351. case ATOM_ARG_MC:
  352. case ATOM_ARG_PS:
  353. case ATOM_ARG_WS:
  354. case ATOM_ARG_FB:
  355. (*ptr)++;
  356. break;
  357. case ATOM_ARG_IMM:
  358. switch (align) {
  359. case ATOM_SRC_DWORD:
  360. (*ptr) += 4;
  361. return;
  362. case ATOM_SRC_WORD0:
  363. case ATOM_SRC_WORD8:
  364. case ATOM_SRC_WORD16:
  365. (*ptr) += 2;
  366. return;
  367. case ATOM_SRC_BYTE0:
  368. case ATOM_SRC_BYTE8:
  369. case ATOM_SRC_BYTE16:
  370. case ATOM_SRC_BYTE24:
  371. (*ptr)++;
  372. return;
  373. }
  374. return;
  375. }
  376. }
  377. static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
  378. {
  379. return atom_get_src_int(ctx, attr, ptr, NULL, 1);
  380. }
  381. static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
  382. {
  383. uint32_t val = 0xCDCDCDCD;
  384. switch (align) {
  385. case ATOM_SRC_DWORD:
  386. val = U32(*ptr);
  387. (*ptr) += 4;
  388. break;
  389. case ATOM_SRC_WORD0:
  390. case ATOM_SRC_WORD8:
  391. case ATOM_SRC_WORD16:
  392. val = U16(*ptr);
  393. (*ptr) += 2;
  394. break;
  395. case ATOM_SRC_BYTE0:
  396. case ATOM_SRC_BYTE8:
  397. case ATOM_SRC_BYTE16:
  398. case ATOM_SRC_BYTE24:
  399. val = U8(*ptr);
  400. (*ptr)++;
  401. break;
  402. }
  403. return val;
  404. }
  405. static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  406. int *ptr, uint32_t *saved, int print)
  407. {
  408. return atom_get_src_int(ctx,
  409. arg | atom_dst_to_src[(attr >> 3) &
  410. 7][(attr >> 6) & 3] << 3,
  411. ptr, saved, print);
  412. }
  413. static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
  414. {
  415. atom_skip_src_int(ctx,
  416. arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
  417. 3] << 3, ptr);
  418. }
  419. static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  420. int *ptr, uint32_t val, uint32_t saved)
  421. {
  422. uint32_t align =
  423. atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
  424. val, idx;
  425. struct atom_context *gctx = ctx->ctx;
  426. old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
  427. val <<= atom_arg_shift[align];
  428. val &= atom_arg_mask[align];
  429. saved &= ~atom_arg_mask[align];
  430. val |= saved;
  431. switch (arg) {
  432. case ATOM_ARG_REG:
  433. idx = U16(*ptr);
  434. (*ptr) += 2;
  435. DEBUG("REG[0x%04X]", idx);
  436. idx += gctx->reg_block;
  437. switch (gctx->io_mode) {
  438. case ATOM_IO_MM:
  439. if (idx == 0)
  440. gctx->card->reg_write(gctx->card, idx,
  441. val << 2);
  442. else
  443. gctx->card->reg_write(gctx->card, idx, val);
  444. break;
  445. case ATOM_IO_PCI:
  446. pr_info("PCI registers are not implemented\n");
  447. return;
  448. case ATOM_IO_SYSIO:
  449. pr_info("SYSIO registers are not implemented\n");
  450. return;
  451. default:
  452. if (!(gctx->io_mode & 0x80)) {
  453. pr_info("Bad IO mode\n");
  454. return;
  455. }
  456. if (!gctx->iio[gctx->io_mode & 0xFF]) {
  457. pr_info("Undefined indirect IO write method %d\n",
  458. gctx->io_mode & 0x7F);
  459. return;
  460. }
  461. atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
  462. idx, val);
  463. }
  464. break;
  465. case ATOM_ARG_PS:
  466. idx = U8(*ptr);
  467. (*ptr)++;
  468. DEBUG("PS[0x%02X]", idx);
  469. ctx->ps[idx] = cpu_to_le32(val);
  470. break;
  471. case ATOM_ARG_WS:
  472. idx = U8(*ptr);
  473. (*ptr)++;
  474. DEBUG("WS[0x%02X]", idx);
  475. switch (idx) {
  476. case ATOM_WS_QUOTIENT:
  477. gctx->divmul[0] = val;
  478. break;
  479. case ATOM_WS_REMAINDER:
  480. gctx->divmul[1] = val;
  481. break;
  482. case ATOM_WS_DATAPTR:
  483. gctx->data_block = val;
  484. break;
  485. case ATOM_WS_SHIFT:
  486. gctx->shift = val;
  487. break;
  488. case ATOM_WS_OR_MASK:
  489. case ATOM_WS_AND_MASK:
  490. break;
  491. case ATOM_WS_FB_WINDOW:
  492. gctx->fb_base = val;
  493. break;
  494. case ATOM_WS_ATTRIBUTES:
  495. gctx->io_attr = val;
  496. break;
  497. case ATOM_WS_REGPTR:
  498. gctx->reg_block = val;
  499. break;
  500. default:
  501. ctx->ws[idx] = val;
  502. }
  503. break;
  504. case ATOM_ARG_FB:
  505. idx = U8(*ptr);
  506. (*ptr)++;
  507. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  508. DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
  509. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  510. } else
  511. gctx->scratch[(gctx->fb_base / 4) + idx] = val;
  512. DEBUG("FB[0x%02X]", idx);
  513. break;
  514. case ATOM_ARG_PLL:
  515. idx = U8(*ptr);
  516. (*ptr)++;
  517. DEBUG("PLL[0x%02X]", idx);
  518. gctx->card->pll_write(gctx->card, idx, val);
  519. break;
  520. case ATOM_ARG_MC:
  521. idx = U8(*ptr);
  522. (*ptr)++;
  523. DEBUG("MC[0x%02X]", idx);
  524. gctx->card->mc_write(gctx->card, idx, val);
  525. return;
  526. }
  527. switch (align) {
  528. case ATOM_SRC_DWORD:
  529. DEBUG(".[31:0] <- 0x%08X\n", old_val);
  530. break;
  531. case ATOM_SRC_WORD0:
  532. DEBUG(".[15:0] <- 0x%04X\n", old_val);
  533. break;
  534. case ATOM_SRC_WORD8:
  535. DEBUG(".[23:8] <- 0x%04X\n", old_val);
  536. break;
  537. case ATOM_SRC_WORD16:
  538. DEBUG(".[31:16] <- 0x%04X\n", old_val);
  539. break;
  540. case ATOM_SRC_BYTE0:
  541. DEBUG(".[7:0] <- 0x%02X\n", old_val);
  542. break;
  543. case ATOM_SRC_BYTE8:
  544. DEBUG(".[15:8] <- 0x%02X\n", old_val);
  545. break;
  546. case ATOM_SRC_BYTE16:
  547. DEBUG(".[23:16] <- 0x%02X\n", old_val);
  548. break;
  549. case ATOM_SRC_BYTE24:
  550. DEBUG(".[31:24] <- 0x%02X\n", old_val);
  551. break;
  552. }
  553. }
  554. static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
  555. {
  556. uint8_t attr = U8((*ptr)++);
  557. uint32_t dst, src, saved;
  558. int dptr = *ptr;
  559. SDEBUG(" dst: ");
  560. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  561. SDEBUG(" src: ");
  562. src = atom_get_src(ctx, attr, ptr);
  563. dst += src;
  564. SDEBUG(" dst: ");
  565. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  566. }
  567. static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
  568. {
  569. uint8_t attr = U8((*ptr)++);
  570. uint32_t dst, src, saved;
  571. int dptr = *ptr;
  572. SDEBUG(" dst: ");
  573. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  574. SDEBUG(" src: ");
  575. src = atom_get_src(ctx, attr, ptr);
  576. dst &= src;
  577. SDEBUG(" dst: ");
  578. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  579. }
  580. static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
  581. {
  582. printk("ATOM BIOS beeped!\n");
  583. }
  584. static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
  585. {
  586. int idx = U8((*ptr)++);
  587. int r = 0;
  588. if (idx < ATOM_TABLE_NAMES_CNT)
  589. SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
  590. else
  591. SDEBUG(" table: %d\n", idx);
  592. if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
  593. r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
  594. if (r) {
  595. ctx->abort = true;
  596. }
  597. }
  598. static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
  599. {
  600. uint8_t attr = U8((*ptr)++);
  601. uint32_t saved;
  602. int dptr = *ptr;
  603. attr &= 0x38;
  604. attr |= atom_def_dst[attr >> 3] << 6;
  605. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  606. SDEBUG(" dst: ");
  607. atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
  608. }
  609. static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
  610. {
  611. uint8_t attr = U8((*ptr)++);
  612. uint32_t dst, src;
  613. SDEBUG(" src1: ");
  614. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  615. SDEBUG(" src2: ");
  616. src = atom_get_src(ctx, attr, ptr);
  617. ctx->ctx->cs_equal = (dst == src);
  618. ctx->ctx->cs_above = (dst > src);
  619. SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
  620. ctx->ctx->cs_above ? "GT" : "LE");
  621. }
  622. static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
  623. {
  624. unsigned count = U8((*ptr)++);
  625. SDEBUG(" count: %d\n", count);
  626. if (arg == ATOM_UNIT_MICROSEC)
  627. udelay(count);
  628. else if (!drm_can_sleep())
  629. mdelay(count);
  630. else
  631. msleep(count);
  632. }
  633. static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
  634. {
  635. uint8_t attr = U8((*ptr)++);
  636. uint32_t dst, src;
  637. SDEBUG(" src1: ");
  638. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  639. SDEBUG(" src2: ");
  640. src = atom_get_src(ctx, attr, ptr);
  641. if (src != 0) {
  642. ctx->ctx->divmul[0] = dst / src;
  643. ctx->ctx->divmul[1] = dst % src;
  644. } else {
  645. ctx->ctx->divmul[0] = 0;
  646. ctx->ctx->divmul[1] = 0;
  647. }
  648. }
  649. static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
  650. {
  651. uint64_t val64;
  652. uint8_t attr = U8((*ptr)++);
  653. uint32_t dst, src;
  654. SDEBUG(" src1: ");
  655. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  656. SDEBUG(" src2: ");
  657. src = atom_get_src(ctx, attr, ptr);
  658. if (src != 0) {
  659. val64 = dst;
  660. val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
  661. do_div(val64, src);
  662. ctx->ctx->divmul[0] = lower_32_bits(val64);
  663. ctx->ctx->divmul[1] = upper_32_bits(val64);
  664. } else {
  665. ctx->ctx->divmul[0] = 0;
  666. ctx->ctx->divmul[1] = 0;
  667. }
  668. }
  669. static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
  670. {
  671. /* functionally, a nop */
  672. }
  673. static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
  674. {
  675. int execute = 0, target = U16(*ptr);
  676. unsigned long cjiffies;
  677. (*ptr) += 2;
  678. switch (arg) {
  679. case ATOM_COND_ABOVE:
  680. execute = ctx->ctx->cs_above;
  681. break;
  682. case ATOM_COND_ABOVEOREQUAL:
  683. execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
  684. break;
  685. case ATOM_COND_ALWAYS:
  686. execute = 1;
  687. break;
  688. case ATOM_COND_BELOW:
  689. execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
  690. break;
  691. case ATOM_COND_BELOWOREQUAL:
  692. execute = !ctx->ctx->cs_above;
  693. break;
  694. case ATOM_COND_EQUAL:
  695. execute = ctx->ctx->cs_equal;
  696. break;
  697. case ATOM_COND_NOTEQUAL:
  698. execute = !ctx->ctx->cs_equal;
  699. break;
  700. }
  701. if (arg != ATOM_COND_ALWAYS)
  702. SDEBUG(" taken: %s\n", execute ? "yes" : "no");
  703. SDEBUG(" target: 0x%04X\n", target);
  704. if (execute) {
  705. if (ctx->last_jump == (ctx->start + target)) {
  706. cjiffies = jiffies;
  707. if (time_after(cjiffies, ctx->last_jump_jiffies)) {
  708. cjiffies -= ctx->last_jump_jiffies;
  709. if ((jiffies_to_msecs(cjiffies) > 10000)) {
  710. DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
  711. ctx->abort = true;
  712. }
  713. } else {
  714. /* jiffies wrap around we will just wait a little longer */
  715. ctx->last_jump_jiffies = jiffies;
  716. }
  717. } else {
  718. ctx->last_jump = ctx->start + target;
  719. ctx->last_jump_jiffies = jiffies;
  720. }
  721. *ptr = ctx->start + target;
  722. }
  723. }
  724. static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
  725. {
  726. uint8_t attr = U8((*ptr)++);
  727. uint32_t dst, mask, src, saved;
  728. int dptr = *ptr;
  729. SDEBUG(" dst: ");
  730. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  731. mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
  732. SDEBUG(" mask: 0x%08x", mask);
  733. SDEBUG(" src: ");
  734. src = atom_get_src(ctx, attr, ptr);
  735. dst &= mask;
  736. dst |= src;
  737. SDEBUG(" dst: ");
  738. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  739. }
  740. static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
  741. {
  742. uint8_t attr = U8((*ptr)++);
  743. uint32_t src, saved;
  744. int dptr = *ptr;
  745. if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
  746. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  747. else {
  748. atom_skip_dst(ctx, arg, attr, ptr);
  749. saved = 0xCDCDCDCD;
  750. }
  751. SDEBUG(" src: ");
  752. src = atom_get_src(ctx, attr, ptr);
  753. SDEBUG(" dst: ");
  754. atom_put_dst(ctx, arg, attr, &dptr, src, saved);
  755. }
  756. static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
  757. {
  758. uint8_t attr = U8((*ptr)++);
  759. uint32_t dst, src;
  760. SDEBUG(" src1: ");
  761. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  762. SDEBUG(" src2: ");
  763. src = atom_get_src(ctx, attr, ptr);
  764. ctx->ctx->divmul[0] = dst * src;
  765. }
  766. static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
  767. {
  768. uint64_t val64;
  769. uint8_t attr = U8((*ptr)++);
  770. uint32_t dst, src;
  771. SDEBUG(" src1: ");
  772. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  773. SDEBUG(" src2: ");
  774. src = atom_get_src(ctx, attr, ptr);
  775. val64 = (uint64_t)dst * (uint64_t)src;
  776. ctx->ctx->divmul[0] = lower_32_bits(val64);
  777. ctx->ctx->divmul[1] = upper_32_bits(val64);
  778. }
  779. static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
  780. {
  781. /* nothing */
  782. }
  783. static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
  784. {
  785. uint8_t attr = U8((*ptr)++);
  786. uint32_t dst, src, saved;
  787. int dptr = *ptr;
  788. SDEBUG(" dst: ");
  789. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  790. SDEBUG(" src: ");
  791. src = atom_get_src(ctx, attr, ptr);
  792. dst |= src;
  793. SDEBUG(" dst: ");
  794. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  795. }
  796. static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
  797. {
  798. uint8_t val = U8((*ptr)++);
  799. SDEBUG("POST card output: 0x%02X\n", val);
  800. }
  801. static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
  802. {
  803. pr_info("unimplemented!\n");
  804. }
  805. static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
  806. {
  807. pr_info("unimplemented!\n");
  808. }
  809. static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
  810. {
  811. pr_info("unimplemented!\n");
  812. }
  813. static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
  814. {
  815. int idx = U8(*ptr);
  816. (*ptr)++;
  817. SDEBUG(" block: %d\n", idx);
  818. if (!idx)
  819. ctx->ctx->data_block = 0;
  820. else if (idx == 255)
  821. ctx->ctx->data_block = ctx->start;
  822. else
  823. ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
  824. SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
  825. }
  826. static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
  827. {
  828. uint8_t attr = U8((*ptr)++);
  829. SDEBUG(" fb_base: ");
  830. ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
  831. }
  832. static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
  833. {
  834. int port;
  835. switch (arg) {
  836. case ATOM_PORT_ATI:
  837. port = U16(*ptr);
  838. if (port < ATOM_IO_NAMES_CNT)
  839. SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
  840. else
  841. SDEBUG(" port: %d\n", port);
  842. if (!port)
  843. ctx->ctx->io_mode = ATOM_IO_MM;
  844. else
  845. ctx->ctx->io_mode = ATOM_IO_IIO | port;
  846. (*ptr) += 2;
  847. break;
  848. case ATOM_PORT_PCI:
  849. ctx->ctx->io_mode = ATOM_IO_PCI;
  850. (*ptr)++;
  851. break;
  852. case ATOM_PORT_SYSIO:
  853. ctx->ctx->io_mode = ATOM_IO_SYSIO;
  854. (*ptr)++;
  855. break;
  856. }
  857. }
  858. static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
  859. {
  860. ctx->ctx->reg_block = U16(*ptr);
  861. (*ptr) += 2;
  862. SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
  863. }
  864. static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
  865. {
  866. uint8_t attr = U8((*ptr)++), shift;
  867. uint32_t saved, dst;
  868. int dptr = *ptr;
  869. attr &= 0x38;
  870. attr |= atom_def_dst[attr >> 3] << 6;
  871. SDEBUG(" dst: ");
  872. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  873. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  874. SDEBUG(" shift: %d\n", shift);
  875. dst <<= shift;
  876. SDEBUG(" dst: ");
  877. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  878. }
  879. static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
  880. {
  881. uint8_t attr = U8((*ptr)++), shift;
  882. uint32_t saved, dst;
  883. int dptr = *ptr;
  884. attr &= 0x38;
  885. attr |= atom_def_dst[attr >> 3] << 6;
  886. SDEBUG(" dst: ");
  887. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  888. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  889. SDEBUG(" shift: %d\n", shift);
  890. dst >>= shift;
  891. SDEBUG(" dst: ");
  892. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  893. }
  894. static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
  895. {
  896. uint8_t attr = U8((*ptr)++), shift;
  897. uint32_t saved, dst;
  898. int dptr = *ptr;
  899. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  900. SDEBUG(" dst: ");
  901. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  902. /* op needs to full dst value */
  903. dst = saved;
  904. shift = atom_get_src(ctx, attr, ptr);
  905. SDEBUG(" shift: %d\n", shift);
  906. dst <<= shift;
  907. dst &= atom_arg_mask[dst_align];
  908. dst >>= atom_arg_shift[dst_align];
  909. SDEBUG(" dst: ");
  910. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  911. }
  912. static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
  913. {
  914. uint8_t attr = U8((*ptr)++), shift;
  915. uint32_t saved, dst;
  916. int dptr = *ptr;
  917. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  918. SDEBUG(" dst: ");
  919. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  920. /* op needs to full dst value */
  921. dst = saved;
  922. shift = atom_get_src(ctx, attr, ptr);
  923. SDEBUG(" shift: %d\n", shift);
  924. dst >>= shift;
  925. dst &= atom_arg_mask[dst_align];
  926. dst >>= atom_arg_shift[dst_align];
  927. SDEBUG(" dst: ");
  928. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  929. }
  930. static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
  931. {
  932. uint8_t attr = U8((*ptr)++);
  933. uint32_t dst, src, saved;
  934. int dptr = *ptr;
  935. SDEBUG(" dst: ");
  936. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  937. SDEBUG(" src: ");
  938. src = atom_get_src(ctx, attr, ptr);
  939. dst -= src;
  940. SDEBUG(" dst: ");
  941. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  942. }
  943. static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
  944. {
  945. uint8_t attr = U8((*ptr)++);
  946. uint32_t src, val, target;
  947. SDEBUG(" switch: ");
  948. src = atom_get_src(ctx, attr, ptr);
  949. while (U16(*ptr) != ATOM_CASE_END)
  950. if (U8(*ptr) == ATOM_CASE_MAGIC) {
  951. (*ptr)++;
  952. SDEBUG(" case: ");
  953. val =
  954. atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
  955. ptr);
  956. target = U16(*ptr);
  957. if (val == src) {
  958. SDEBUG(" target: %04X\n", target);
  959. *ptr = ctx->start + target;
  960. return;
  961. }
  962. (*ptr) += 2;
  963. } else {
  964. pr_info("Bad case\n");
  965. return;
  966. }
  967. (*ptr) += 2;
  968. }
  969. static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
  970. {
  971. uint8_t attr = U8((*ptr)++);
  972. uint32_t dst, src;
  973. SDEBUG(" src1: ");
  974. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  975. SDEBUG(" src2: ");
  976. src = atom_get_src(ctx, attr, ptr);
  977. ctx->ctx->cs_equal = ((dst & src) == 0);
  978. SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
  979. }
  980. static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
  981. {
  982. uint8_t attr = U8((*ptr)++);
  983. uint32_t dst, src, saved;
  984. int dptr = *ptr;
  985. SDEBUG(" dst: ");
  986. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  987. SDEBUG(" src: ");
  988. src = atom_get_src(ctx, attr, ptr);
  989. dst ^= src;
  990. SDEBUG(" dst: ");
  991. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  992. }
  993. static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
  994. {
  995. uint8_t val = U8((*ptr)++);
  996. SDEBUG("DEBUG output: 0x%02X\n", val);
  997. }
  998. static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
  999. {
  1000. uint16_t val = U16(*ptr);
  1001. (*ptr) += val + 2;
  1002. SDEBUG("PROCESSDS output: 0x%02X\n", val);
  1003. }
  1004. static struct {
  1005. void (*func) (atom_exec_context *, int *, int);
  1006. int arg;
  1007. } opcode_table[ATOM_OP_CNT] = {
  1008. {
  1009. NULL, 0}, {
  1010. atom_op_move, ATOM_ARG_REG}, {
  1011. atom_op_move, ATOM_ARG_PS}, {
  1012. atom_op_move, ATOM_ARG_WS}, {
  1013. atom_op_move, ATOM_ARG_FB}, {
  1014. atom_op_move, ATOM_ARG_PLL}, {
  1015. atom_op_move, ATOM_ARG_MC}, {
  1016. atom_op_and, ATOM_ARG_REG}, {
  1017. atom_op_and, ATOM_ARG_PS}, {
  1018. atom_op_and, ATOM_ARG_WS}, {
  1019. atom_op_and, ATOM_ARG_FB}, {
  1020. atom_op_and, ATOM_ARG_PLL}, {
  1021. atom_op_and, ATOM_ARG_MC}, {
  1022. atom_op_or, ATOM_ARG_REG}, {
  1023. atom_op_or, ATOM_ARG_PS}, {
  1024. atom_op_or, ATOM_ARG_WS}, {
  1025. atom_op_or, ATOM_ARG_FB}, {
  1026. atom_op_or, ATOM_ARG_PLL}, {
  1027. atom_op_or, ATOM_ARG_MC}, {
  1028. atom_op_shift_left, ATOM_ARG_REG}, {
  1029. atom_op_shift_left, ATOM_ARG_PS}, {
  1030. atom_op_shift_left, ATOM_ARG_WS}, {
  1031. atom_op_shift_left, ATOM_ARG_FB}, {
  1032. atom_op_shift_left, ATOM_ARG_PLL}, {
  1033. atom_op_shift_left, ATOM_ARG_MC}, {
  1034. atom_op_shift_right, ATOM_ARG_REG}, {
  1035. atom_op_shift_right, ATOM_ARG_PS}, {
  1036. atom_op_shift_right, ATOM_ARG_WS}, {
  1037. atom_op_shift_right, ATOM_ARG_FB}, {
  1038. atom_op_shift_right, ATOM_ARG_PLL}, {
  1039. atom_op_shift_right, ATOM_ARG_MC}, {
  1040. atom_op_mul, ATOM_ARG_REG}, {
  1041. atom_op_mul, ATOM_ARG_PS}, {
  1042. atom_op_mul, ATOM_ARG_WS}, {
  1043. atom_op_mul, ATOM_ARG_FB}, {
  1044. atom_op_mul, ATOM_ARG_PLL}, {
  1045. atom_op_mul, ATOM_ARG_MC}, {
  1046. atom_op_div, ATOM_ARG_REG}, {
  1047. atom_op_div, ATOM_ARG_PS}, {
  1048. atom_op_div, ATOM_ARG_WS}, {
  1049. atom_op_div, ATOM_ARG_FB}, {
  1050. atom_op_div, ATOM_ARG_PLL}, {
  1051. atom_op_div, ATOM_ARG_MC}, {
  1052. atom_op_add, ATOM_ARG_REG}, {
  1053. atom_op_add, ATOM_ARG_PS}, {
  1054. atom_op_add, ATOM_ARG_WS}, {
  1055. atom_op_add, ATOM_ARG_FB}, {
  1056. atom_op_add, ATOM_ARG_PLL}, {
  1057. atom_op_add, ATOM_ARG_MC}, {
  1058. atom_op_sub, ATOM_ARG_REG}, {
  1059. atom_op_sub, ATOM_ARG_PS}, {
  1060. atom_op_sub, ATOM_ARG_WS}, {
  1061. atom_op_sub, ATOM_ARG_FB}, {
  1062. atom_op_sub, ATOM_ARG_PLL}, {
  1063. atom_op_sub, ATOM_ARG_MC}, {
  1064. atom_op_setport, ATOM_PORT_ATI}, {
  1065. atom_op_setport, ATOM_PORT_PCI}, {
  1066. atom_op_setport, ATOM_PORT_SYSIO}, {
  1067. atom_op_setregblock, 0}, {
  1068. atom_op_setfbbase, 0}, {
  1069. atom_op_compare, ATOM_ARG_REG}, {
  1070. atom_op_compare, ATOM_ARG_PS}, {
  1071. atom_op_compare, ATOM_ARG_WS}, {
  1072. atom_op_compare, ATOM_ARG_FB}, {
  1073. atom_op_compare, ATOM_ARG_PLL}, {
  1074. atom_op_compare, ATOM_ARG_MC}, {
  1075. atom_op_switch, 0}, {
  1076. atom_op_jump, ATOM_COND_ALWAYS}, {
  1077. atom_op_jump, ATOM_COND_EQUAL}, {
  1078. atom_op_jump, ATOM_COND_BELOW}, {
  1079. atom_op_jump, ATOM_COND_ABOVE}, {
  1080. atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
  1081. atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
  1082. atom_op_jump, ATOM_COND_NOTEQUAL}, {
  1083. atom_op_test, ATOM_ARG_REG}, {
  1084. atom_op_test, ATOM_ARG_PS}, {
  1085. atom_op_test, ATOM_ARG_WS}, {
  1086. atom_op_test, ATOM_ARG_FB}, {
  1087. atom_op_test, ATOM_ARG_PLL}, {
  1088. atom_op_test, ATOM_ARG_MC}, {
  1089. atom_op_delay, ATOM_UNIT_MILLISEC}, {
  1090. atom_op_delay, ATOM_UNIT_MICROSEC}, {
  1091. atom_op_calltable, 0}, {
  1092. atom_op_repeat, 0}, {
  1093. atom_op_clear, ATOM_ARG_REG}, {
  1094. atom_op_clear, ATOM_ARG_PS}, {
  1095. atom_op_clear, ATOM_ARG_WS}, {
  1096. atom_op_clear, ATOM_ARG_FB}, {
  1097. atom_op_clear, ATOM_ARG_PLL}, {
  1098. atom_op_clear, ATOM_ARG_MC}, {
  1099. atom_op_nop, 0}, {
  1100. atom_op_eot, 0}, {
  1101. atom_op_mask, ATOM_ARG_REG}, {
  1102. atom_op_mask, ATOM_ARG_PS}, {
  1103. atom_op_mask, ATOM_ARG_WS}, {
  1104. atom_op_mask, ATOM_ARG_FB}, {
  1105. atom_op_mask, ATOM_ARG_PLL}, {
  1106. atom_op_mask, ATOM_ARG_MC}, {
  1107. atom_op_postcard, 0}, {
  1108. atom_op_beep, 0}, {
  1109. atom_op_savereg, 0}, {
  1110. atom_op_restorereg, 0}, {
  1111. atom_op_setdatablock, 0}, {
  1112. atom_op_xor, ATOM_ARG_REG}, {
  1113. atom_op_xor, ATOM_ARG_PS}, {
  1114. atom_op_xor, ATOM_ARG_WS}, {
  1115. atom_op_xor, ATOM_ARG_FB}, {
  1116. atom_op_xor, ATOM_ARG_PLL}, {
  1117. atom_op_xor, ATOM_ARG_MC}, {
  1118. atom_op_shl, ATOM_ARG_REG}, {
  1119. atom_op_shl, ATOM_ARG_PS}, {
  1120. atom_op_shl, ATOM_ARG_WS}, {
  1121. atom_op_shl, ATOM_ARG_FB}, {
  1122. atom_op_shl, ATOM_ARG_PLL}, {
  1123. atom_op_shl, ATOM_ARG_MC}, {
  1124. atom_op_shr, ATOM_ARG_REG}, {
  1125. atom_op_shr, ATOM_ARG_PS}, {
  1126. atom_op_shr, ATOM_ARG_WS}, {
  1127. atom_op_shr, ATOM_ARG_FB}, {
  1128. atom_op_shr, ATOM_ARG_PLL}, {
  1129. atom_op_shr, ATOM_ARG_MC}, {
  1130. atom_op_debug, 0}, {
  1131. atom_op_processds, 0}, {
  1132. atom_op_mul32, ATOM_ARG_PS}, {
  1133. atom_op_mul32, ATOM_ARG_WS}, {
  1134. atom_op_div32, ATOM_ARG_PS}, {
  1135. atom_op_div32, ATOM_ARG_WS},
  1136. };
  1137. static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
  1138. {
  1139. int base = CU16(ctx->cmd_table + 4 + 2 * index);
  1140. int len, ws, ps, ptr;
  1141. unsigned char op;
  1142. atom_exec_context ectx;
  1143. int ret = 0;
  1144. if (!base)
  1145. return -EINVAL;
  1146. len = CU16(base + ATOM_CT_SIZE_PTR);
  1147. ws = CU8(base + ATOM_CT_WS_PTR);
  1148. ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
  1149. ptr = base + ATOM_CT_CODE_PTR;
  1150. SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
  1151. ectx.ctx = ctx;
  1152. ectx.ps_shift = ps / 4;
  1153. ectx.start = base;
  1154. ectx.ps = params;
  1155. ectx.abort = false;
  1156. ectx.last_jump = 0;
  1157. if (ws)
  1158. ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
  1159. else
  1160. ectx.ws = NULL;
  1161. debug_depth++;
  1162. while (1) {
  1163. op = CU8(ptr++);
  1164. if (op < ATOM_OP_NAMES_CNT)
  1165. SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
  1166. else
  1167. SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
  1168. if (ectx.abort) {
  1169. DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
  1170. base, len, ws, ps, ptr - 1);
  1171. ret = -EINVAL;
  1172. goto free;
  1173. }
  1174. if (op < ATOM_OP_CNT && op > 0)
  1175. opcode_table[op].func(&ectx, &ptr,
  1176. opcode_table[op].arg);
  1177. else
  1178. break;
  1179. if (op == ATOM_OP_EOT)
  1180. break;
  1181. }
  1182. debug_depth--;
  1183. SDEBUG("<<\n");
  1184. free:
  1185. if (ws)
  1186. kfree(ectx.ws);
  1187. return ret;
  1188. }
  1189. int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
  1190. {
  1191. int r;
  1192. mutex_lock(&ctx->mutex);
  1193. /* reset data block */
  1194. ctx->data_block = 0;
  1195. /* reset reg block */
  1196. ctx->reg_block = 0;
  1197. /* reset fb window */
  1198. ctx->fb_base = 0;
  1199. /* reset io mode */
  1200. ctx->io_mode = ATOM_IO_MM;
  1201. /* reset divmul */
  1202. ctx->divmul[0] = 0;
  1203. ctx->divmul[1] = 0;
  1204. r = amdgpu_atom_execute_table_locked(ctx, index, params);
  1205. mutex_unlock(&ctx->mutex);
  1206. return r;
  1207. }
  1208. static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
  1209. static void atom_index_iio(struct atom_context *ctx, int base)
  1210. {
  1211. ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
  1212. if (!ctx->iio)
  1213. return;
  1214. while (CU8(base) == ATOM_IIO_START) {
  1215. ctx->iio[CU8(base + 1)] = base + 2;
  1216. base += 2;
  1217. while (CU8(base) != ATOM_IIO_END)
  1218. base += atom_iio_len[CU8(base)];
  1219. base += 3;
  1220. }
  1221. }
  1222. struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
  1223. {
  1224. int base;
  1225. struct atom_context *ctx =
  1226. kzalloc(sizeof(struct atom_context), GFP_KERNEL);
  1227. char *str;
  1228. u16 idx;
  1229. if (!ctx)
  1230. return NULL;
  1231. ctx->card = card;
  1232. ctx->bios = bios;
  1233. if (CU16(0) != ATOM_BIOS_MAGIC) {
  1234. pr_info("Invalid BIOS magic\n");
  1235. kfree(ctx);
  1236. return NULL;
  1237. }
  1238. if (strncmp
  1239. (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
  1240. strlen(ATOM_ATI_MAGIC))) {
  1241. pr_info("Invalid ATI magic\n");
  1242. kfree(ctx);
  1243. return NULL;
  1244. }
  1245. base = CU16(ATOM_ROM_TABLE_PTR);
  1246. if (strncmp
  1247. (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
  1248. strlen(ATOM_ROM_MAGIC))) {
  1249. pr_info("Invalid ATOM magic\n");
  1250. kfree(ctx);
  1251. return NULL;
  1252. }
  1253. ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
  1254. ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
  1255. atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
  1256. if (!ctx->iio) {
  1257. amdgpu_atom_destroy(ctx);
  1258. return NULL;
  1259. }
  1260. idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
  1261. if (idx == 0)
  1262. idx = 0x80;
  1263. str = CSTR(idx);
  1264. if (*str != '\0')
  1265. pr_info("ATOM BIOS: %s\n", str);
  1266. return ctx;
  1267. }
  1268. int amdgpu_atom_asic_init(struct atom_context *ctx)
  1269. {
  1270. int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
  1271. uint32_t ps[16];
  1272. int ret;
  1273. memset(ps, 0, 64);
  1274. ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
  1275. ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
  1276. if (!ps[0] || !ps[1])
  1277. return 1;
  1278. if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
  1279. return 1;
  1280. ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
  1281. if (ret)
  1282. return ret;
  1283. memset(ps, 0, 64);
  1284. return ret;
  1285. }
  1286. void amdgpu_atom_destroy(struct atom_context *ctx)
  1287. {
  1288. kfree(ctx->iio);
  1289. kfree(ctx);
  1290. }
  1291. bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
  1292. uint16_t * size, uint8_t * frev, uint8_t * crev,
  1293. uint16_t * data_start)
  1294. {
  1295. int offset = index * 2 + 4;
  1296. int idx = CU16(ctx->data_table + offset);
  1297. u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
  1298. if (!mdt[index])
  1299. return false;
  1300. if (size)
  1301. *size = CU16(idx);
  1302. if (frev)
  1303. *frev = CU8(idx + 2);
  1304. if (crev)
  1305. *crev = CU8(idx + 3);
  1306. *data_start = idx;
  1307. return true;
  1308. }
  1309. bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
  1310. uint8_t * crev)
  1311. {
  1312. int offset = index * 2 + 4;
  1313. int idx = CU16(ctx->cmd_table + offset);
  1314. u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
  1315. if (!mct[index])
  1316. return false;
  1317. if (frev)
  1318. *frev = CU8(idx + 2);
  1319. if (crev)
  1320. *crev = CU8(idx + 3);
  1321. return true;
  1322. }