atom.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Stanislaw Skowronek
  23. */
  24. #include <linux/module.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <asm/unaligned.h>
  28. #define ATOM_DEBUG
  29. #include "atom.h"
  30. #include "atom-names.h"
  31. #include "atom-bits.h"
  32. #include "radeon.h"
  33. #define ATOM_COND_ABOVE 0
  34. #define ATOM_COND_ABOVEOREQUAL 1
  35. #define ATOM_COND_ALWAYS 2
  36. #define ATOM_COND_BELOW 3
  37. #define ATOM_COND_BELOWOREQUAL 4
  38. #define ATOM_COND_EQUAL 5
  39. #define ATOM_COND_NOTEQUAL 6
  40. #define ATOM_PORT_ATI 0
  41. #define ATOM_PORT_PCI 1
  42. #define ATOM_PORT_SYSIO 2
  43. #define ATOM_UNIT_MICROSEC 0
  44. #define ATOM_UNIT_MILLISEC 1
  45. #define PLL_INDEX 2
  46. #define PLL_DATA 3
  47. typedef struct {
  48. struct atom_context *ctx;
  49. uint32_t *ps, *ws;
  50. int ps_shift;
  51. uint16_t start;
  52. unsigned last_jump;
  53. unsigned long last_jump_jiffies;
  54. bool abort;
  55. } atom_exec_context;
  56. int atom_debug = 0;
  57. static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
  58. int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
  59. static uint32_t atom_arg_mask[8] =
  60. { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
  61. 0xFF000000 };
  62. static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
  63. static int atom_dst_to_src[8][4] = {
  64. /* translate destination alignment field to the source alignment encoding */
  65. {0, 0, 0, 0},
  66. {1, 2, 3, 0},
  67. {1, 2, 3, 0},
  68. {1, 2, 3, 0},
  69. {4, 5, 6, 7},
  70. {4, 5, 6, 7},
  71. {4, 5, 6, 7},
  72. {4, 5, 6, 7},
  73. };
  74. static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
  75. static int debug_depth = 0;
  76. #ifdef ATOM_DEBUG
  77. static void debug_print_spaces(int n)
  78. {
  79. while (n--)
  80. printk(" ");
  81. }
  82. #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
  83. #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
  84. #else
  85. #define DEBUG(...) do { } while (0)
  86. #define SDEBUG(...) do { } while (0)
  87. #endif
  88. static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
  89. uint32_t index, uint32_t data)
  90. {
  91. struct radeon_device *rdev = ctx->card->dev->dev_private;
  92. uint32_t temp = 0xCDCDCDCD;
  93. while (1)
  94. switch (CU8(base)) {
  95. case ATOM_IIO_NOP:
  96. base++;
  97. break;
  98. case ATOM_IIO_READ:
  99. temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  100. base += 3;
  101. break;
  102. case ATOM_IIO_WRITE:
  103. if (rdev->family == CHIP_RV515)
  104. (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  105. ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
  106. base += 3;
  107. break;
  108. case ATOM_IIO_CLEAR:
  109. temp &=
  110. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  111. CU8(base + 2));
  112. base += 3;
  113. break;
  114. case ATOM_IIO_SET:
  115. temp |=
  116. (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
  117. 2);
  118. base += 3;
  119. break;
  120. case ATOM_IIO_MOVE_INDEX:
  121. temp &=
  122. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  123. CU8(base + 3));
  124. temp |=
  125. ((index >> CU8(base + 2)) &
  126. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  127. 3);
  128. base += 4;
  129. break;
  130. case ATOM_IIO_MOVE_DATA:
  131. temp &=
  132. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  133. CU8(base + 3));
  134. temp |=
  135. ((data >> CU8(base + 2)) &
  136. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  137. 3);
  138. base += 4;
  139. break;
  140. case ATOM_IIO_MOVE_ATTR:
  141. temp &=
  142. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  143. CU8(base + 3));
  144. temp |=
  145. ((ctx->
  146. io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
  147. CU8
  148. (base
  149. +
  150. 1))))
  151. << CU8(base + 3);
  152. base += 4;
  153. break;
  154. case ATOM_IIO_END:
  155. return temp;
  156. default:
  157. printk(KERN_INFO "Unknown IIO opcode.\n");
  158. return 0;
  159. }
  160. }
  161. static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
  162. int *ptr, uint32_t *saved, int print)
  163. {
  164. uint32_t idx, val = 0xCDCDCDCD, align, arg;
  165. struct atom_context *gctx = ctx->ctx;
  166. arg = attr & 7;
  167. align = (attr >> 3) & 7;
  168. switch (arg) {
  169. case ATOM_ARG_REG:
  170. idx = U16(*ptr);
  171. (*ptr) += 2;
  172. if (print)
  173. DEBUG("REG[0x%04X]", idx);
  174. idx += gctx->reg_block;
  175. switch (gctx->io_mode) {
  176. case ATOM_IO_MM:
  177. val = gctx->card->reg_read(gctx->card, idx);
  178. break;
  179. case ATOM_IO_PCI:
  180. printk(KERN_INFO
  181. "PCI registers are not implemented.\n");
  182. return 0;
  183. case ATOM_IO_SYSIO:
  184. printk(KERN_INFO
  185. "SYSIO registers are not implemented.\n");
  186. return 0;
  187. default:
  188. if (!(gctx->io_mode & 0x80)) {
  189. printk(KERN_INFO "Bad IO mode.\n");
  190. return 0;
  191. }
  192. if (!gctx->iio[gctx->io_mode & 0x7F]) {
  193. printk(KERN_INFO
  194. "Undefined indirect IO read method %d.\n",
  195. gctx->io_mode & 0x7F);
  196. return 0;
  197. }
  198. val =
  199. atom_iio_execute(gctx,
  200. gctx->iio[gctx->io_mode & 0x7F],
  201. idx, 0);
  202. }
  203. break;
  204. case ATOM_ARG_PS:
  205. idx = U8(*ptr);
  206. (*ptr)++;
  207. /* get_unaligned_le32 avoids unaligned accesses from atombios
  208. * tables, noticed on a DEC Alpha. */
  209. val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
  210. if (print)
  211. DEBUG("PS[0x%02X,0x%04X]", idx, val);
  212. break;
  213. case ATOM_ARG_WS:
  214. idx = U8(*ptr);
  215. (*ptr)++;
  216. if (print)
  217. DEBUG("WS[0x%02X]", idx);
  218. switch (idx) {
  219. case ATOM_WS_QUOTIENT:
  220. val = gctx->divmul[0];
  221. break;
  222. case ATOM_WS_REMAINDER:
  223. val = gctx->divmul[1];
  224. break;
  225. case ATOM_WS_DATAPTR:
  226. val = gctx->data_block;
  227. break;
  228. case ATOM_WS_SHIFT:
  229. val = gctx->shift;
  230. break;
  231. case ATOM_WS_OR_MASK:
  232. val = 1 << gctx->shift;
  233. break;
  234. case ATOM_WS_AND_MASK:
  235. val = ~(1 << gctx->shift);
  236. break;
  237. case ATOM_WS_FB_WINDOW:
  238. val = gctx->fb_base;
  239. break;
  240. case ATOM_WS_ATTRIBUTES:
  241. val = gctx->io_attr;
  242. break;
  243. case ATOM_WS_REGPTR:
  244. val = gctx->reg_block;
  245. break;
  246. default:
  247. val = ctx->ws[idx];
  248. }
  249. break;
  250. case ATOM_ARG_ID:
  251. idx = U16(*ptr);
  252. (*ptr) += 2;
  253. if (print) {
  254. if (gctx->data_block)
  255. DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
  256. else
  257. DEBUG("ID[0x%04X]", idx);
  258. }
  259. val = U32(idx + gctx->data_block);
  260. break;
  261. case ATOM_ARG_FB:
  262. idx = U8(*ptr);
  263. (*ptr)++;
  264. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  265. DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
  266. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  267. val = 0;
  268. } else
  269. val = gctx->scratch[(gctx->fb_base / 4) + idx];
  270. if (print)
  271. DEBUG("FB[0x%02X]", idx);
  272. break;
  273. case ATOM_ARG_IMM:
  274. switch (align) {
  275. case ATOM_SRC_DWORD:
  276. val = U32(*ptr);
  277. (*ptr) += 4;
  278. if (print)
  279. DEBUG("IMM 0x%08X\n", val);
  280. return val;
  281. case ATOM_SRC_WORD0:
  282. case ATOM_SRC_WORD8:
  283. case ATOM_SRC_WORD16:
  284. val = U16(*ptr);
  285. (*ptr) += 2;
  286. if (print)
  287. DEBUG("IMM 0x%04X\n", val);
  288. return val;
  289. case ATOM_SRC_BYTE0:
  290. case ATOM_SRC_BYTE8:
  291. case ATOM_SRC_BYTE16:
  292. case ATOM_SRC_BYTE24:
  293. val = U8(*ptr);
  294. (*ptr)++;
  295. if (print)
  296. DEBUG("IMM 0x%02X\n", val);
  297. return val;
  298. }
  299. return 0;
  300. case ATOM_ARG_PLL:
  301. idx = U8(*ptr);
  302. (*ptr)++;
  303. if (print)
  304. DEBUG("PLL[0x%02X]", idx);
  305. val = gctx->card->pll_read(gctx->card, idx);
  306. break;
  307. case ATOM_ARG_MC:
  308. idx = U8(*ptr);
  309. (*ptr)++;
  310. if (print)
  311. DEBUG("MC[0x%02X]", idx);
  312. val = gctx->card->mc_read(gctx->card, idx);
  313. break;
  314. }
  315. if (saved)
  316. *saved = val;
  317. val &= atom_arg_mask[align];
  318. val >>= atom_arg_shift[align];
  319. if (print)
  320. switch (align) {
  321. case ATOM_SRC_DWORD:
  322. DEBUG(".[31:0] -> 0x%08X\n", val);
  323. break;
  324. case ATOM_SRC_WORD0:
  325. DEBUG(".[15:0] -> 0x%04X\n", val);
  326. break;
  327. case ATOM_SRC_WORD8:
  328. DEBUG(".[23:8] -> 0x%04X\n", val);
  329. break;
  330. case ATOM_SRC_WORD16:
  331. DEBUG(".[31:16] -> 0x%04X\n", val);
  332. break;
  333. case ATOM_SRC_BYTE0:
  334. DEBUG(".[7:0] -> 0x%02X\n", val);
  335. break;
  336. case ATOM_SRC_BYTE8:
  337. DEBUG(".[15:8] -> 0x%02X\n", val);
  338. break;
  339. case ATOM_SRC_BYTE16:
  340. DEBUG(".[23:16] -> 0x%02X\n", val);
  341. break;
  342. case ATOM_SRC_BYTE24:
  343. DEBUG(".[31:24] -> 0x%02X\n", val);
  344. break;
  345. }
  346. return val;
  347. }
  348. static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
  349. {
  350. uint32_t align = (attr >> 3) & 7, arg = attr & 7;
  351. switch (arg) {
  352. case ATOM_ARG_REG:
  353. case ATOM_ARG_ID:
  354. (*ptr) += 2;
  355. break;
  356. case ATOM_ARG_PLL:
  357. case ATOM_ARG_MC:
  358. case ATOM_ARG_PS:
  359. case ATOM_ARG_WS:
  360. case ATOM_ARG_FB:
  361. (*ptr)++;
  362. break;
  363. case ATOM_ARG_IMM:
  364. switch (align) {
  365. case ATOM_SRC_DWORD:
  366. (*ptr) += 4;
  367. return;
  368. case ATOM_SRC_WORD0:
  369. case ATOM_SRC_WORD8:
  370. case ATOM_SRC_WORD16:
  371. (*ptr) += 2;
  372. return;
  373. case ATOM_SRC_BYTE0:
  374. case ATOM_SRC_BYTE8:
  375. case ATOM_SRC_BYTE16:
  376. case ATOM_SRC_BYTE24:
  377. (*ptr)++;
  378. return;
  379. }
  380. return;
  381. }
  382. }
  383. static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
  384. {
  385. return atom_get_src_int(ctx, attr, ptr, NULL, 1);
  386. }
  387. static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
  388. {
  389. uint32_t val = 0xCDCDCDCD;
  390. switch (align) {
  391. case ATOM_SRC_DWORD:
  392. val = U32(*ptr);
  393. (*ptr) += 4;
  394. break;
  395. case ATOM_SRC_WORD0:
  396. case ATOM_SRC_WORD8:
  397. case ATOM_SRC_WORD16:
  398. val = U16(*ptr);
  399. (*ptr) += 2;
  400. break;
  401. case ATOM_SRC_BYTE0:
  402. case ATOM_SRC_BYTE8:
  403. case ATOM_SRC_BYTE16:
  404. case ATOM_SRC_BYTE24:
  405. val = U8(*ptr);
  406. (*ptr)++;
  407. break;
  408. }
  409. return val;
  410. }
  411. static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  412. int *ptr, uint32_t *saved, int print)
  413. {
  414. return atom_get_src_int(ctx,
  415. arg | atom_dst_to_src[(attr >> 3) &
  416. 7][(attr >> 6) & 3] << 3,
  417. ptr, saved, print);
  418. }
  419. static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
  420. {
  421. atom_skip_src_int(ctx,
  422. arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
  423. 3] << 3, ptr);
  424. }
  425. static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  426. int *ptr, uint32_t val, uint32_t saved)
  427. {
  428. uint32_t align =
  429. atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
  430. val, idx;
  431. struct atom_context *gctx = ctx->ctx;
  432. old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
  433. val <<= atom_arg_shift[align];
  434. val &= atom_arg_mask[align];
  435. saved &= ~atom_arg_mask[align];
  436. val |= saved;
  437. switch (arg) {
  438. case ATOM_ARG_REG:
  439. idx = U16(*ptr);
  440. (*ptr) += 2;
  441. DEBUG("REG[0x%04X]", idx);
  442. idx += gctx->reg_block;
  443. switch (gctx->io_mode) {
  444. case ATOM_IO_MM:
  445. if (idx == 0)
  446. gctx->card->reg_write(gctx->card, idx,
  447. val << 2);
  448. else
  449. gctx->card->reg_write(gctx->card, idx, val);
  450. break;
  451. case ATOM_IO_PCI:
  452. printk(KERN_INFO
  453. "PCI registers are not implemented.\n");
  454. return;
  455. case ATOM_IO_SYSIO:
  456. printk(KERN_INFO
  457. "SYSIO registers are not implemented.\n");
  458. return;
  459. default:
  460. if (!(gctx->io_mode & 0x80)) {
  461. printk(KERN_INFO "Bad IO mode.\n");
  462. return;
  463. }
  464. if (!gctx->iio[gctx->io_mode & 0xFF]) {
  465. printk(KERN_INFO
  466. "Undefined indirect IO write method %d.\n",
  467. gctx->io_mode & 0x7F);
  468. return;
  469. }
  470. atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
  471. idx, val);
  472. }
  473. break;
  474. case ATOM_ARG_PS:
  475. idx = U8(*ptr);
  476. (*ptr)++;
  477. DEBUG("PS[0x%02X]", idx);
  478. ctx->ps[idx] = cpu_to_le32(val);
  479. break;
  480. case ATOM_ARG_WS:
  481. idx = U8(*ptr);
  482. (*ptr)++;
  483. DEBUG("WS[0x%02X]", idx);
  484. switch (idx) {
  485. case ATOM_WS_QUOTIENT:
  486. gctx->divmul[0] = val;
  487. break;
  488. case ATOM_WS_REMAINDER:
  489. gctx->divmul[1] = val;
  490. break;
  491. case ATOM_WS_DATAPTR:
  492. gctx->data_block = val;
  493. break;
  494. case ATOM_WS_SHIFT:
  495. gctx->shift = val;
  496. break;
  497. case ATOM_WS_OR_MASK:
  498. case ATOM_WS_AND_MASK:
  499. break;
  500. case ATOM_WS_FB_WINDOW:
  501. gctx->fb_base = val;
  502. break;
  503. case ATOM_WS_ATTRIBUTES:
  504. gctx->io_attr = val;
  505. break;
  506. case ATOM_WS_REGPTR:
  507. gctx->reg_block = val;
  508. break;
  509. default:
  510. ctx->ws[idx] = val;
  511. }
  512. break;
  513. case ATOM_ARG_FB:
  514. idx = U8(*ptr);
  515. (*ptr)++;
  516. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  517. DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
  518. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  519. } else
  520. gctx->scratch[(gctx->fb_base / 4) + idx] = val;
  521. DEBUG("FB[0x%02X]", idx);
  522. break;
  523. case ATOM_ARG_PLL:
  524. idx = U8(*ptr);
  525. (*ptr)++;
  526. DEBUG("PLL[0x%02X]", idx);
  527. gctx->card->pll_write(gctx->card, idx, val);
  528. break;
  529. case ATOM_ARG_MC:
  530. idx = U8(*ptr);
  531. (*ptr)++;
  532. DEBUG("MC[0x%02X]", idx);
  533. gctx->card->mc_write(gctx->card, idx, val);
  534. return;
  535. }
  536. switch (align) {
  537. case ATOM_SRC_DWORD:
  538. DEBUG(".[31:0] <- 0x%08X\n", old_val);
  539. break;
  540. case ATOM_SRC_WORD0:
  541. DEBUG(".[15:0] <- 0x%04X\n", old_val);
  542. break;
  543. case ATOM_SRC_WORD8:
  544. DEBUG(".[23:8] <- 0x%04X\n", old_val);
  545. break;
  546. case ATOM_SRC_WORD16:
  547. DEBUG(".[31:16] <- 0x%04X\n", old_val);
  548. break;
  549. case ATOM_SRC_BYTE0:
  550. DEBUG(".[7:0] <- 0x%02X\n", old_val);
  551. break;
  552. case ATOM_SRC_BYTE8:
  553. DEBUG(".[15:8] <- 0x%02X\n", old_val);
  554. break;
  555. case ATOM_SRC_BYTE16:
  556. DEBUG(".[23:16] <- 0x%02X\n", old_val);
  557. break;
  558. case ATOM_SRC_BYTE24:
  559. DEBUG(".[31:24] <- 0x%02X\n", old_val);
  560. break;
  561. }
  562. }
  563. static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
  564. {
  565. uint8_t attr = U8((*ptr)++);
  566. uint32_t dst, src, saved;
  567. int dptr = *ptr;
  568. SDEBUG(" dst: ");
  569. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  570. SDEBUG(" src: ");
  571. src = atom_get_src(ctx, attr, ptr);
  572. dst += src;
  573. SDEBUG(" dst: ");
  574. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  575. }
  576. static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
  577. {
  578. uint8_t attr = U8((*ptr)++);
  579. uint32_t dst, src, saved;
  580. int dptr = *ptr;
  581. SDEBUG(" dst: ");
  582. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  583. SDEBUG(" src: ");
  584. src = atom_get_src(ctx, attr, ptr);
  585. dst &= src;
  586. SDEBUG(" dst: ");
  587. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  588. }
  589. static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
  590. {
  591. printk("ATOM BIOS beeped!\n");
  592. }
  593. static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
  594. {
  595. int idx = U8((*ptr)++);
  596. int r = 0;
  597. if (idx < ATOM_TABLE_NAMES_CNT)
  598. SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
  599. else
  600. SDEBUG(" table: %d\n", idx);
  601. if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
  602. r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
  603. if (r) {
  604. ctx->abort = true;
  605. }
  606. }
  607. static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
  608. {
  609. uint8_t attr = U8((*ptr)++);
  610. uint32_t saved;
  611. int dptr = *ptr;
  612. attr &= 0x38;
  613. attr |= atom_def_dst[attr >> 3] << 6;
  614. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  615. SDEBUG(" dst: ");
  616. atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
  617. }
  618. static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
  619. {
  620. uint8_t attr = U8((*ptr)++);
  621. uint32_t dst, src;
  622. SDEBUG(" src1: ");
  623. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  624. SDEBUG(" src2: ");
  625. src = atom_get_src(ctx, attr, ptr);
  626. ctx->ctx->cs_equal = (dst == src);
  627. ctx->ctx->cs_above = (dst > src);
  628. SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
  629. ctx->ctx->cs_above ? "GT" : "LE");
  630. }
  631. static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
  632. {
  633. unsigned count = U8((*ptr)++);
  634. SDEBUG(" count: %d\n", count);
  635. if (arg == ATOM_UNIT_MICROSEC)
  636. udelay(count);
  637. else if (!drm_can_sleep())
  638. mdelay(count);
  639. else
  640. msleep(count);
  641. }
  642. static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
  643. {
  644. uint8_t attr = U8((*ptr)++);
  645. uint32_t dst, src;
  646. SDEBUG(" src1: ");
  647. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  648. SDEBUG(" src2: ");
  649. src = atom_get_src(ctx, attr, ptr);
  650. if (src != 0) {
  651. ctx->ctx->divmul[0] = dst / src;
  652. ctx->ctx->divmul[1] = dst % src;
  653. } else {
  654. ctx->ctx->divmul[0] = 0;
  655. ctx->ctx->divmul[1] = 0;
  656. }
  657. }
  658. static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
  659. {
  660. /* functionally, a nop */
  661. }
  662. static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
  663. {
  664. int execute = 0, target = U16(*ptr);
  665. unsigned long cjiffies;
  666. (*ptr) += 2;
  667. switch (arg) {
  668. case ATOM_COND_ABOVE:
  669. execute = ctx->ctx->cs_above;
  670. break;
  671. case ATOM_COND_ABOVEOREQUAL:
  672. execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
  673. break;
  674. case ATOM_COND_ALWAYS:
  675. execute = 1;
  676. break;
  677. case ATOM_COND_BELOW:
  678. execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
  679. break;
  680. case ATOM_COND_BELOWOREQUAL:
  681. execute = !ctx->ctx->cs_above;
  682. break;
  683. case ATOM_COND_EQUAL:
  684. execute = ctx->ctx->cs_equal;
  685. break;
  686. case ATOM_COND_NOTEQUAL:
  687. execute = !ctx->ctx->cs_equal;
  688. break;
  689. }
  690. if (arg != ATOM_COND_ALWAYS)
  691. SDEBUG(" taken: %s\n", execute ? "yes" : "no");
  692. SDEBUG(" target: 0x%04X\n", target);
  693. if (execute) {
  694. if (ctx->last_jump == (ctx->start + target)) {
  695. cjiffies = jiffies;
  696. if (time_after(cjiffies, ctx->last_jump_jiffies)) {
  697. cjiffies -= ctx->last_jump_jiffies;
  698. if ((jiffies_to_msecs(cjiffies) > 5000)) {
  699. DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
  700. ctx->abort = true;
  701. }
  702. } else {
  703. /* jiffies wrap around we will just wait a little longer */
  704. ctx->last_jump_jiffies = jiffies;
  705. }
  706. } else {
  707. ctx->last_jump = ctx->start + target;
  708. ctx->last_jump_jiffies = jiffies;
  709. }
  710. *ptr = ctx->start + target;
  711. }
  712. }
  713. static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
  714. {
  715. uint8_t attr = U8((*ptr)++);
  716. uint32_t dst, mask, src, saved;
  717. int dptr = *ptr;
  718. SDEBUG(" dst: ");
  719. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  720. mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
  721. SDEBUG(" mask: 0x%08x", mask);
  722. SDEBUG(" src: ");
  723. src = atom_get_src(ctx, attr, ptr);
  724. dst &= mask;
  725. dst |= src;
  726. SDEBUG(" dst: ");
  727. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  728. }
  729. static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
  730. {
  731. uint8_t attr = U8((*ptr)++);
  732. uint32_t src, saved;
  733. int dptr = *ptr;
  734. if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
  735. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  736. else {
  737. atom_skip_dst(ctx, arg, attr, ptr);
  738. saved = 0xCDCDCDCD;
  739. }
  740. SDEBUG(" src: ");
  741. src = atom_get_src(ctx, attr, ptr);
  742. SDEBUG(" dst: ");
  743. atom_put_dst(ctx, arg, attr, &dptr, src, saved);
  744. }
  745. static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
  746. {
  747. uint8_t attr = U8((*ptr)++);
  748. uint32_t dst, src;
  749. SDEBUG(" src1: ");
  750. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  751. SDEBUG(" src2: ");
  752. src = atom_get_src(ctx, attr, ptr);
  753. ctx->ctx->divmul[0] = dst * src;
  754. }
  755. static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
  756. {
  757. /* nothing */
  758. }
  759. static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
  760. {
  761. uint8_t attr = U8((*ptr)++);
  762. uint32_t dst, src, saved;
  763. int dptr = *ptr;
  764. SDEBUG(" dst: ");
  765. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  766. SDEBUG(" src: ");
  767. src = atom_get_src(ctx, attr, ptr);
  768. dst |= src;
  769. SDEBUG(" dst: ");
  770. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  771. }
  772. static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
  773. {
  774. uint8_t val = U8((*ptr)++);
  775. SDEBUG("POST card output: 0x%02X\n", val);
  776. }
  777. static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
  778. {
  779. printk(KERN_INFO "unimplemented!\n");
  780. }
  781. static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
  782. {
  783. printk(KERN_INFO "unimplemented!\n");
  784. }
  785. static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
  786. {
  787. printk(KERN_INFO "unimplemented!\n");
  788. }
  789. static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
  790. {
  791. int idx = U8(*ptr);
  792. (*ptr)++;
  793. SDEBUG(" block: %d\n", idx);
  794. if (!idx)
  795. ctx->ctx->data_block = 0;
  796. else if (idx == 255)
  797. ctx->ctx->data_block = ctx->start;
  798. else
  799. ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
  800. SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
  801. }
  802. static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
  803. {
  804. uint8_t attr = U8((*ptr)++);
  805. SDEBUG(" fb_base: ");
  806. ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
  807. }
  808. static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
  809. {
  810. int port;
  811. switch (arg) {
  812. case ATOM_PORT_ATI:
  813. port = U16(*ptr);
  814. if (port < ATOM_IO_NAMES_CNT)
  815. SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
  816. else
  817. SDEBUG(" port: %d\n", port);
  818. if (!port)
  819. ctx->ctx->io_mode = ATOM_IO_MM;
  820. else
  821. ctx->ctx->io_mode = ATOM_IO_IIO | port;
  822. (*ptr) += 2;
  823. break;
  824. case ATOM_PORT_PCI:
  825. ctx->ctx->io_mode = ATOM_IO_PCI;
  826. (*ptr)++;
  827. break;
  828. case ATOM_PORT_SYSIO:
  829. ctx->ctx->io_mode = ATOM_IO_SYSIO;
  830. (*ptr)++;
  831. break;
  832. }
  833. }
  834. static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
  835. {
  836. ctx->ctx->reg_block = U16(*ptr);
  837. (*ptr) += 2;
  838. SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
  839. }
  840. static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
  841. {
  842. uint8_t attr = U8((*ptr)++), shift;
  843. uint32_t saved, dst;
  844. int dptr = *ptr;
  845. attr &= 0x38;
  846. attr |= atom_def_dst[attr >> 3] << 6;
  847. SDEBUG(" dst: ");
  848. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  849. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  850. SDEBUG(" shift: %d\n", shift);
  851. dst <<= shift;
  852. SDEBUG(" dst: ");
  853. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  854. }
  855. static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
  856. {
  857. uint8_t attr = U8((*ptr)++), shift;
  858. uint32_t saved, dst;
  859. int dptr = *ptr;
  860. attr &= 0x38;
  861. attr |= atom_def_dst[attr >> 3] << 6;
  862. SDEBUG(" dst: ");
  863. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  864. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  865. SDEBUG(" shift: %d\n", shift);
  866. dst >>= shift;
  867. SDEBUG(" dst: ");
  868. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  869. }
  870. static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
  871. {
  872. uint8_t attr = U8((*ptr)++), shift;
  873. uint32_t saved, dst;
  874. int dptr = *ptr;
  875. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  876. SDEBUG(" dst: ");
  877. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  878. /* op needs to full dst value */
  879. dst = saved;
  880. shift = atom_get_src(ctx, attr, ptr);
  881. SDEBUG(" shift: %d\n", shift);
  882. dst <<= shift;
  883. dst &= atom_arg_mask[dst_align];
  884. dst >>= atom_arg_shift[dst_align];
  885. SDEBUG(" dst: ");
  886. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  887. }
  888. static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
  889. {
  890. uint8_t attr = U8((*ptr)++), shift;
  891. uint32_t saved, dst;
  892. int dptr = *ptr;
  893. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  894. SDEBUG(" dst: ");
  895. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  896. /* op needs to full dst value */
  897. dst = saved;
  898. shift = atom_get_src(ctx, attr, ptr);
  899. SDEBUG(" shift: %d\n", shift);
  900. dst >>= shift;
  901. dst &= atom_arg_mask[dst_align];
  902. dst >>= atom_arg_shift[dst_align];
  903. SDEBUG(" dst: ");
  904. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  905. }
  906. static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
  907. {
  908. uint8_t attr = U8((*ptr)++);
  909. uint32_t dst, src, saved;
  910. int dptr = *ptr;
  911. SDEBUG(" dst: ");
  912. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  913. SDEBUG(" src: ");
  914. src = atom_get_src(ctx, attr, ptr);
  915. dst -= src;
  916. SDEBUG(" dst: ");
  917. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  918. }
  919. static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
  920. {
  921. uint8_t attr = U8((*ptr)++);
  922. uint32_t src, val, target;
  923. SDEBUG(" switch: ");
  924. src = atom_get_src(ctx, attr, ptr);
  925. while (U16(*ptr) != ATOM_CASE_END)
  926. if (U8(*ptr) == ATOM_CASE_MAGIC) {
  927. (*ptr)++;
  928. SDEBUG(" case: ");
  929. val =
  930. atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
  931. ptr);
  932. target = U16(*ptr);
  933. if (val == src) {
  934. SDEBUG(" target: %04X\n", target);
  935. *ptr = ctx->start + target;
  936. return;
  937. }
  938. (*ptr) += 2;
  939. } else {
  940. printk(KERN_INFO "Bad case.\n");
  941. return;
  942. }
  943. (*ptr) += 2;
  944. }
  945. static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
  946. {
  947. uint8_t attr = U8((*ptr)++);
  948. uint32_t dst, src;
  949. SDEBUG(" src1: ");
  950. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  951. SDEBUG(" src2: ");
  952. src = atom_get_src(ctx, attr, ptr);
  953. ctx->ctx->cs_equal = ((dst & src) == 0);
  954. SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
  955. }
  956. static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
  957. {
  958. uint8_t attr = U8((*ptr)++);
  959. uint32_t dst, src, saved;
  960. int dptr = *ptr;
  961. SDEBUG(" dst: ");
  962. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  963. SDEBUG(" src: ");
  964. src = atom_get_src(ctx, attr, ptr);
  965. dst ^= src;
  966. SDEBUG(" dst: ");
  967. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  968. }
  969. static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
  970. {
  971. printk(KERN_INFO "unimplemented!\n");
  972. }
  973. static struct {
  974. void (*func) (atom_exec_context *, int *, int);
  975. int arg;
  976. } opcode_table[ATOM_OP_CNT] = {
  977. {
  978. NULL, 0}, {
  979. atom_op_move, ATOM_ARG_REG}, {
  980. atom_op_move, ATOM_ARG_PS}, {
  981. atom_op_move, ATOM_ARG_WS}, {
  982. atom_op_move, ATOM_ARG_FB}, {
  983. atom_op_move, ATOM_ARG_PLL}, {
  984. atom_op_move, ATOM_ARG_MC}, {
  985. atom_op_and, ATOM_ARG_REG}, {
  986. atom_op_and, ATOM_ARG_PS}, {
  987. atom_op_and, ATOM_ARG_WS}, {
  988. atom_op_and, ATOM_ARG_FB}, {
  989. atom_op_and, ATOM_ARG_PLL}, {
  990. atom_op_and, ATOM_ARG_MC}, {
  991. atom_op_or, ATOM_ARG_REG}, {
  992. atom_op_or, ATOM_ARG_PS}, {
  993. atom_op_or, ATOM_ARG_WS}, {
  994. atom_op_or, ATOM_ARG_FB}, {
  995. atom_op_or, ATOM_ARG_PLL}, {
  996. atom_op_or, ATOM_ARG_MC}, {
  997. atom_op_shift_left, ATOM_ARG_REG}, {
  998. atom_op_shift_left, ATOM_ARG_PS}, {
  999. atom_op_shift_left, ATOM_ARG_WS}, {
  1000. atom_op_shift_left, ATOM_ARG_FB}, {
  1001. atom_op_shift_left, ATOM_ARG_PLL}, {
  1002. atom_op_shift_left, ATOM_ARG_MC}, {
  1003. atom_op_shift_right, ATOM_ARG_REG}, {
  1004. atom_op_shift_right, ATOM_ARG_PS}, {
  1005. atom_op_shift_right, ATOM_ARG_WS}, {
  1006. atom_op_shift_right, ATOM_ARG_FB}, {
  1007. atom_op_shift_right, ATOM_ARG_PLL}, {
  1008. atom_op_shift_right, ATOM_ARG_MC}, {
  1009. atom_op_mul, ATOM_ARG_REG}, {
  1010. atom_op_mul, ATOM_ARG_PS}, {
  1011. atom_op_mul, ATOM_ARG_WS}, {
  1012. atom_op_mul, ATOM_ARG_FB}, {
  1013. atom_op_mul, ATOM_ARG_PLL}, {
  1014. atom_op_mul, ATOM_ARG_MC}, {
  1015. atom_op_div, ATOM_ARG_REG}, {
  1016. atom_op_div, ATOM_ARG_PS}, {
  1017. atom_op_div, ATOM_ARG_WS}, {
  1018. atom_op_div, ATOM_ARG_FB}, {
  1019. atom_op_div, ATOM_ARG_PLL}, {
  1020. atom_op_div, ATOM_ARG_MC}, {
  1021. atom_op_add, ATOM_ARG_REG}, {
  1022. atom_op_add, ATOM_ARG_PS}, {
  1023. atom_op_add, ATOM_ARG_WS}, {
  1024. atom_op_add, ATOM_ARG_FB}, {
  1025. atom_op_add, ATOM_ARG_PLL}, {
  1026. atom_op_add, ATOM_ARG_MC}, {
  1027. atom_op_sub, ATOM_ARG_REG}, {
  1028. atom_op_sub, ATOM_ARG_PS}, {
  1029. atom_op_sub, ATOM_ARG_WS}, {
  1030. atom_op_sub, ATOM_ARG_FB}, {
  1031. atom_op_sub, ATOM_ARG_PLL}, {
  1032. atom_op_sub, ATOM_ARG_MC}, {
  1033. atom_op_setport, ATOM_PORT_ATI}, {
  1034. atom_op_setport, ATOM_PORT_PCI}, {
  1035. atom_op_setport, ATOM_PORT_SYSIO}, {
  1036. atom_op_setregblock, 0}, {
  1037. atom_op_setfbbase, 0}, {
  1038. atom_op_compare, ATOM_ARG_REG}, {
  1039. atom_op_compare, ATOM_ARG_PS}, {
  1040. atom_op_compare, ATOM_ARG_WS}, {
  1041. atom_op_compare, ATOM_ARG_FB}, {
  1042. atom_op_compare, ATOM_ARG_PLL}, {
  1043. atom_op_compare, ATOM_ARG_MC}, {
  1044. atom_op_switch, 0}, {
  1045. atom_op_jump, ATOM_COND_ALWAYS}, {
  1046. atom_op_jump, ATOM_COND_EQUAL}, {
  1047. atom_op_jump, ATOM_COND_BELOW}, {
  1048. atom_op_jump, ATOM_COND_ABOVE}, {
  1049. atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
  1050. atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
  1051. atom_op_jump, ATOM_COND_NOTEQUAL}, {
  1052. atom_op_test, ATOM_ARG_REG}, {
  1053. atom_op_test, ATOM_ARG_PS}, {
  1054. atom_op_test, ATOM_ARG_WS}, {
  1055. atom_op_test, ATOM_ARG_FB}, {
  1056. atom_op_test, ATOM_ARG_PLL}, {
  1057. atom_op_test, ATOM_ARG_MC}, {
  1058. atom_op_delay, ATOM_UNIT_MILLISEC}, {
  1059. atom_op_delay, ATOM_UNIT_MICROSEC}, {
  1060. atom_op_calltable, 0}, {
  1061. atom_op_repeat, 0}, {
  1062. atom_op_clear, ATOM_ARG_REG}, {
  1063. atom_op_clear, ATOM_ARG_PS}, {
  1064. atom_op_clear, ATOM_ARG_WS}, {
  1065. atom_op_clear, ATOM_ARG_FB}, {
  1066. atom_op_clear, ATOM_ARG_PLL}, {
  1067. atom_op_clear, ATOM_ARG_MC}, {
  1068. atom_op_nop, 0}, {
  1069. atom_op_eot, 0}, {
  1070. atom_op_mask, ATOM_ARG_REG}, {
  1071. atom_op_mask, ATOM_ARG_PS}, {
  1072. atom_op_mask, ATOM_ARG_WS}, {
  1073. atom_op_mask, ATOM_ARG_FB}, {
  1074. atom_op_mask, ATOM_ARG_PLL}, {
  1075. atom_op_mask, ATOM_ARG_MC}, {
  1076. atom_op_postcard, 0}, {
  1077. atom_op_beep, 0}, {
  1078. atom_op_savereg, 0}, {
  1079. atom_op_restorereg, 0}, {
  1080. atom_op_setdatablock, 0}, {
  1081. atom_op_xor, ATOM_ARG_REG}, {
  1082. atom_op_xor, ATOM_ARG_PS}, {
  1083. atom_op_xor, ATOM_ARG_WS}, {
  1084. atom_op_xor, ATOM_ARG_FB}, {
  1085. atom_op_xor, ATOM_ARG_PLL}, {
  1086. atom_op_xor, ATOM_ARG_MC}, {
  1087. atom_op_shl, ATOM_ARG_REG}, {
  1088. atom_op_shl, ATOM_ARG_PS}, {
  1089. atom_op_shl, ATOM_ARG_WS}, {
  1090. atom_op_shl, ATOM_ARG_FB}, {
  1091. atom_op_shl, ATOM_ARG_PLL}, {
  1092. atom_op_shl, ATOM_ARG_MC}, {
  1093. atom_op_shr, ATOM_ARG_REG}, {
  1094. atom_op_shr, ATOM_ARG_PS}, {
  1095. atom_op_shr, ATOM_ARG_WS}, {
  1096. atom_op_shr, ATOM_ARG_FB}, {
  1097. atom_op_shr, ATOM_ARG_PLL}, {
  1098. atom_op_shr, ATOM_ARG_MC}, {
  1099. atom_op_debug, 0},};
  1100. static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
  1101. {
  1102. int base = CU16(ctx->cmd_table + 4 + 2 * index);
  1103. int len, ws, ps, ptr;
  1104. unsigned char op;
  1105. atom_exec_context ectx;
  1106. int ret = 0;
  1107. if (!base)
  1108. return -EINVAL;
  1109. len = CU16(base + ATOM_CT_SIZE_PTR);
  1110. ws = CU8(base + ATOM_CT_WS_PTR);
  1111. ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
  1112. ptr = base + ATOM_CT_CODE_PTR;
  1113. SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
  1114. ectx.ctx = ctx;
  1115. ectx.ps_shift = ps / 4;
  1116. ectx.start = base;
  1117. ectx.ps = params;
  1118. ectx.abort = false;
  1119. ectx.last_jump = 0;
  1120. if (ws)
  1121. ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
  1122. else
  1123. ectx.ws = NULL;
  1124. debug_depth++;
  1125. while (1) {
  1126. op = CU8(ptr++);
  1127. if (op < ATOM_OP_NAMES_CNT)
  1128. SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
  1129. else
  1130. SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
  1131. if (ectx.abort) {
  1132. DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
  1133. base, len, ws, ps, ptr - 1);
  1134. ret = -EINVAL;
  1135. goto free;
  1136. }
  1137. if (op < ATOM_OP_CNT && op > 0)
  1138. opcode_table[op].func(&ectx, &ptr,
  1139. opcode_table[op].arg);
  1140. else
  1141. break;
  1142. if (op == ATOM_OP_EOT)
  1143. break;
  1144. }
  1145. debug_depth--;
  1146. SDEBUG("<<\n");
  1147. free:
  1148. if (ws)
  1149. kfree(ectx.ws);
  1150. return ret;
  1151. }
  1152. int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
  1153. {
  1154. int r;
  1155. mutex_lock(&ctx->mutex);
  1156. /* reset data block */
  1157. ctx->data_block = 0;
  1158. /* reset reg block */
  1159. ctx->reg_block = 0;
  1160. /* reset fb window */
  1161. ctx->fb_base = 0;
  1162. /* reset io mode */
  1163. ctx->io_mode = ATOM_IO_MM;
  1164. /* reset divmul */
  1165. ctx->divmul[0] = 0;
  1166. ctx->divmul[1] = 0;
  1167. r = atom_execute_table_locked(ctx, index, params);
  1168. mutex_unlock(&ctx->mutex);
  1169. return r;
  1170. }
  1171. static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
  1172. static void atom_index_iio(struct atom_context *ctx, int base)
  1173. {
  1174. ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
  1175. if (!ctx->iio)
  1176. return;
  1177. while (CU8(base) == ATOM_IIO_START) {
  1178. ctx->iio[CU8(base + 1)] = base + 2;
  1179. base += 2;
  1180. while (CU8(base) != ATOM_IIO_END)
  1181. base += atom_iio_len[CU8(base)];
  1182. base += 3;
  1183. }
  1184. }
  1185. struct atom_context *atom_parse(struct card_info *card, void *bios)
  1186. {
  1187. int base;
  1188. struct atom_context *ctx =
  1189. kzalloc(sizeof(struct atom_context), GFP_KERNEL);
  1190. char *str;
  1191. char name[512];
  1192. int i;
  1193. if (!ctx)
  1194. return NULL;
  1195. ctx->card = card;
  1196. ctx->bios = bios;
  1197. if (CU16(0) != ATOM_BIOS_MAGIC) {
  1198. printk(KERN_INFO "Invalid BIOS magic.\n");
  1199. kfree(ctx);
  1200. return NULL;
  1201. }
  1202. if (strncmp
  1203. (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
  1204. strlen(ATOM_ATI_MAGIC))) {
  1205. printk(KERN_INFO "Invalid ATI magic.\n");
  1206. kfree(ctx);
  1207. return NULL;
  1208. }
  1209. base = CU16(ATOM_ROM_TABLE_PTR);
  1210. if (strncmp
  1211. (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
  1212. strlen(ATOM_ROM_MAGIC))) {
  1213. printk(KERN_INFO "Invalid ATOM magic.\n");
  1214. kfree(ctx);
  1215. return NULL;
  1216. }
  1217. ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
  1218. ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
  1219. atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
  1220. if (!ctx->iio) {
  1221. atom_destroy(ctx);
  1222. return NULL;
  1223. }
  1224. str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
  1225. while (*str && ((*str == '\n') || (*str == '\r')))
  1226. str++;
  1227. /* name string isn't always 0 terminated */
  1228. for (i = 0; i < 511; i++) {
  1229. name[i] = str[i];
  1230. if (name[i] < '.' || name[i] > 'z') {
  1231. name[i] = 0;
  1232. break;
  1233. }
  1234. }
  1235. printk(KERN_INFO "ATOM BIOS: %s\n", name);
  1236. return ctx;
  1237. }
  1238. int atom_asic_init(struct atom_context *ctx)
  1239. {
  1240. struct radeon_device *rdev = ctx->card->dev->dev_private;
  1241. int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
  1242. uint32_t ps[16];
  1243. int ret;
  1244. memset(ps, 0, 64);
  1245. ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
  1246. ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
  1247. if (!ps[0] || !ps[1])
  1248. return 1;
  1249. if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
  1250. return 1;
  1251. ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
  1252. if (ret)
  1253. return ret;
  1254. memset(ps, 0, 64);
  1255. if (rdev->family < CHIP_R600) {
  1256. if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
  1257. atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
  1258. }
  1259. return ret;
  1260. }
  1261. void atom_destroy(struct atom_context *ctx)
  1262. {
  1263. kfree(ctx->iio);
  1264. kfree(ctx);
  1265. }
  1266. bool atom_parse_data_header(struct atom_context *ctx, int index,
  1267. uint16_t * size, uint8_t * frev, uint8_t * crev,
  1268. uint16_t * data_start)
  1269. {
  1270. int offset = index * 2 + 4;
  1271. int idx = CU16(ctx->data_table + offset);
  1272. u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
  1273. if (!mdt[index])
  1274. return false;
  1275. if (size)
  1276. *size = CU16(idx);
  1277. if (frev)
  1278. *frev = CU8(idx + 2);
  1279. if (crev)
  1280. *crev = CU8(idx + 3);
  1281. *data_start = idx;
  1282. return true;
  1283. }
  1284. bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
  1285. uint8_t * crev)
  1286. {
  1287. int offset = index * 2 + 4;
  1288. int idx = CU16(ctx->cmd_table + offset);
  1289. u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
  1290. if (!mct[index])
  1291. return false;
  1292. if (frev)
  1293. *frev = CU8(idx + 2);
  1294. if (crev)
  1295. *crev = CU8(idx + 3);
  1296. return true;
  1297. }
  1298. int atom_allocate_fb_scratch(struct atom_context *ctx)
  1299. {
  1300. int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
  1301. uint16_t data_offset;
  1302. int usage_bytes = 0;
  1303. struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
  1304. if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
  1305. firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
  1306. DRM_DEBUG("atom firmware requested %08x %dkb\n",
  1307. le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
  1308. le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
  1309. usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
  1310. }
  1311. ctx->scratch_size_bytes = 0;
  1312. if (usage_bytes == 0)
  1313. usage_bytes = 20 * 1024;
  1314. /* allocate some scratch memory */
  1315. ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
  1316. if (!ctx->scratch)
  1317. return -ENOMEM;
  1318. ctx->scratch_size_bytes = usage_bytes;
  1319. return 0;
  1320. }