ni.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545
  1. /*
  2. * Copyright 2010 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/slab.h>
  27. #include <linux/module.h>
  28. #include "drmP.h"
  29. #include "radeon.h"
  30. #include "radeon_asic.h"
  31. #include "radeon_drm.h"
  32. #include "nid.h"
  33. #include "atom.h"
  34. #include "ni_reg.h"
  35. #include "cayman_blit_shaders.h"
  36. extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
  37. extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
  38. extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
  39. extern void evergreen_mc_program(struct radeon_device *rdev);
  40. extern void evergreen_irq_suspend(struct radeon_device *rdev);
  41. extern int evergreen_mc_init(struct radeon_device *rdev);
  42. extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
  43. extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
  44. extern void si_rlc_fini(struct radeon_device *rdev);
  45. extern int si_rlc_init(struct radeon_device *rdev);
  46. #define EVERGREEN_PFP_UCODE_SIZE 1120
  47. #define EVERGREEN_PM4_UCODE_SIZE 1376
  48. #define EVERGREEN_RLC_UCODE_SIZE 768
  49. #define BTC_MC_UCODE_SIZE 6024
  50. #define CAYMAN_PFP_UCODE_SIZE 2176
  51. #define CAYMAN_PM4_UCODE_SIZE 2176
  52. #define CAYMAN_RLC_UCODE_SIZE 1024
  53. #define CAYMAN_MC_UCODE_SIZE 6037
  54. #define ARUBA_RLC_UCODE_SIZE 1536
  55. /* Firmware Names */
  56. MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
  57. MODULE_FIRMWARE("radeon/BARTS_me.bin");
  58. MODULE_FIRMWARE("radeon/BARTS_mc.bin");
  59. MODULE_FIRMWARE("radeon/BTC_rlc.bin");
  60. MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
  61. MODULE_FIRMWARE("radeon/TURKS_me.bin");
  62. MODULE_FIRMWARE("radeon/TURKS_mc.bin");
  63. MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
  64. MODULE_FIRMWARE("radeon/CAICOS_me.bin");
  65. MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
  66. MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
  67. MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
  68. MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
  69. MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
  70. MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
  71. MODULE_FIRMWARE("radeon/ARUBA_me.bin");
  72. MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
  73. #define BTC_IO_MC_REGS_SIZE 29
  74. static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
  75. {0x00000077, 0xff010100},
  76. {0x00000078, 0x00000000},
  77. {0x00000079, 0x00001434},
  78. {0x0000007a, 0xcc08ec08},
  79. {0x0000007b, 0x00040000},
  80. {0x0000007c, 0x000080c0},
  81. {0x0000007d, 0x09000000},
  82. {0x0000007e, 0x00210404},
  83. {0x00000081, 0x08a8e800},
  84. {0x00000082, 0x00030444},
  85. {0x00000083, 0x00000000},
  86. {0x00000085, 0x00000001},
  87. {0x00000086, 0x00000002},
  88. {0x00000087, 0x48490000},
  89. {0x00000088, 0x20244647},
  90. {0x00000089, 0x00000005},
  91. {0x0000008b, 0x66030000},
  92. {0x0000008c, 0x00006603},
  93. {0x0000008d, 0x00000100},
  94. {0x0000008f, 0x00001c0a},
  95. {0x00000090, 0xff000001},
  96. {0x00000094, 0x00101101},
  97. {0x00000095, 0x00000fff},
  98. {0x00000096, 0x00116fff},
  99. {0x00000097, 0x60010000},
  100. {0x00000098, 0x10010000},
  101. {0x00000099, 0x00006000},
  102. {0x0000009a, 0x00001000},
  103. {0x0000009f, 0x00946a00}
  104. };
  105. static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
  106. {0x00000077, 0xff010100},
  107. {0x00000078, 0x00000000},
  108. {0x00000079, 0x00001434},
  109. {0x0000007a, 0xcc08ec08},
  110. {0x0000007b, 0x00040000},
  111. {0x0000007c, 0x000080c0},
  112. {0x0000007d, 0x09000000},
  113. {0x0000007e, 0x00210404},
  114. {0x00000081, 0x08a8e800},
  115. {0x00000082, 0x00030444},
  116. {0x00000083, 0x00000000},
  117. {0x00000085, 0x00000001},
  118. {0x00000086, 0x00000002},
  119. {0x00000087, 0x48490000},
  120. {0x00000088, 0x20244647},
  121. {0x00000089, 0x00000005},
  122. {0x0000008b, 0x66030000},
  123. {0x0000008c, 0x00006603},
  124. {0x0000008d, 0x00000100},
  125. {0x0000008f, 0x00001c0a},
  126. {0x00000090, 0xff000001},
  127. {0x00000094, 0x00101101},
  128. {0x00000095, 0x00000fff},
  129. {0x00000096, 0x00116fff},
  130. {0x00000097, 0x60010000},
  131. {0x00000098, 0x10010000},
  132. {0x00000099, 0x00006000},
  133. {0x0000009a, 0x00001000},
  134. {0x0000009f, 0x00936a00}
  135. };
  136. static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
  137. {0x00000077, 0xff010100},
  138. {0x00000078, 0x00000000},
  139. {0x00000079, 0x00001434},
  140. {0x0000007a, 0xcc08ec08},
  141. {0x0000007b, 0x00040000},
  142. {0x0000007c, 0x000080c0},
  143. {0x0000007d, 0x09000000},
  144. {0x0000007e, 0x00210404},
  145. {0x00000081, 0x08a8e800},
  146. {0x00000082, 0x00030444},
  147. {0x00000083, 0x00000000},
  148. {0x00000085, 0x00000001},
  149. {0x00000086, 0x00000002},
  150. {0x00000087, 0x48490000},
  151. {0x00000088, 0x20244647},
  152. {0x00000089, 0x00000005},
  153. {0x0000008b, 0x66030000},
  154. {0x0000008c, 0x00006603},
  155. {0x0000008d, 0x00000100},
  156. {0x0000008f, 0x00001c0a},
  157. {0x00000090, 0xff000001},
  158. {0x00000094, 0x00101101},
  159. {0x00000095, 0x00000fff},
  160. {0x00000096, 0x00116fff},
  161. {0x00000097, 0x60010000},
  162. {0x00000098, 0x10010000},
  163. {0x00000099, 0x00006000},
  164. {0x0000009a, 0x00001000},
  165. {0x0000009f, 0x00916a00}
  166. };
  167. static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
  168. {0x00000077, 0xff010100},
  169. {0x00000078, 0x00000000},
  170. {0x00000079, 0x00001434},
  171. {0x0000007a, 0xcc08ec08},
  172. {0x0000007b, 0x00040000},
  173. {0x0000007c, 0x000080c0},
  174. {0x0000007d, 0x09000000},
  175. {0x0000007e, 0x00210404},
  176. {0x00000081, 0x08a8e800},
  177. {0x00000082, 0x00030444},
  178. {0x00000083, 0x00000000},
  179. {0x00000085, 0x00000001},
  180. {0x00000086, 0x00000002},
  181. {0x00000087, 0x48490000},
  182. {0x00000088, 0x20244647},
  183. {0x00000089, 0x00000005},
  184. {0x0000008b, 0x66030000},
  185. {0x0000008c, 0x00006603},
  186. {0x0000008d, 0x00000100},
  187. {0x0000008f, 0x00001c0a},
  188. {0x00000090, 0xff000001},
  189. {0x00000094, 0x00101101},
  190. {0x00000095, 0x00000fff},
  191. {0x00000096, 0x00116fff},
  192. {0x00000097, 0x60010000},
  193. {0x00000098, 0x10010000},
  194. {0x00000099, 0x00006000},
  195. {0x0000009a, 0x00001000},
  196. {0x0000009f, 0x00976b00}
  197. };
  198. int ni_mc_load_microcode(struct radeon_device *rdev)
  199. {
  200. const __be32 *fw_data;
  201. u32 mem_type, running, blackout = 0;
  202. u32 *io_mc_regs;
  203. int i, ucode_size, regs_size;
  204. if (!rdev->mc_fw)
  205. return -EINVAL;
  206. switch (rdev->family) {
  207. case CHIP_BARTS:
  208. io_mc_regs = (u32 *)&barts_io_mc_regs;
  209. ucode_size = BTC_MC_UCODE_SIZE;
  210. regs_size = BTC_IO_MC_REGS_SIZE;
  211. break;
  212. case CHIP_TURKS:
  213. io_mc_regs = (u32 *)&turks_io_mc_regs;
  214. ucode_size = BTC_MC_UCODE_SIZE;
  215. regs_size = BTC_IO_MC_REGS_SIZE;
  216. break;
  217. case CHIP_CAICOS:
  218. default:
  219. io_mc_regs = (u32 *)&caicos_io_mc_regs;
  220. ucode_size = BTC_MC_UCODE_SIZE;
  221. regs_size = BTC_IO_MC_REGS_SIZE;
  222. break;
  223. case CHIP_CAYMAN:
  224. io_mc_regs = (u32 *)&cayman_io_mc_regs;
  225. ucode_size = CAYMAN_MC_UCODE_SIZE;
  226. regs_size = BTC_IO_MC_REGS_SIZE;
  227. break;
  228. }
  229. mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
  230. running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
  231. if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
  232. if (running) {
  233. blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
  234. WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
  235. }
  236. /* reset the engine and set to writable */
  237. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  238. WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
  239. /* load mc io regs */
  240. for (i = 0; i < regs_size; i++) {
  241. WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
  242. WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
  243. }
  244. /* load the MC ucode */
  245. fw_data = (const __be32 *)rdev->mc_fw->data;
  246. for (i = 0; i < ucode_size; i++)
  247. WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
  248. /* put the engine back into the active state */
  249. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  250. WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
  251. WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
  252. /* wait for training to complete */
  253. for (i = 0; i < rdev->usec_timeout; i++) {
  254. if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
  255. break;
  256. udelay(1);
  257. }
  258. if (running)
  259. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
  260. }
  261. return 0;
  262. }
  263. int ni_init_microcode(struct radeon_device *rdev)
  264. {
  265. struct platform_device *pdev;
  266. const char *chip_name;
  267. const char *rlc_chip_name;
  268. size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
  269. char fw_name[30];
  270. int err;
  271. DRM_DEBUG("\n");
  272. pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
  273. err = IS_ERR(pdev);
  274. if (err) {
  275. printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
  276. return -EINVAL;
  277. }
  278. switch (rdev->family) {
  279. case CHIP_BARTS:
  280. chip_name = "BARTS";
  281. rlc_chip_name = "BTC";
  282. pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
  283. me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
  284. rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
  285. mc_req_size = BTC_MC_UCODE_SIZE * 4;
  286. break;
  287. case CHIP_TURKS:
  288. chip_name = "TURKS";
  289. rlc_chip_name = "BTC";
  290. pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
  291. me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
  292. rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
  293. mc_req_size = BTC_MC_UCODE_SIZE * 4;
  294. break;
  295. case CHIP_CAICOS:
  296. chip_name = "CAICOS";
  297. rlc_chip_name = "BTC";
  298. pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
  299. me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
  300. rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
  301. mc_req_size = BTC_MC_UCODE_SIZE * 4;
  302. break;
  303. case CHIP_CAYMAN:
  304. chip_name = "CAYMAN";
  305. rlc_chip_name = "CAYMAN";
  306. pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
  307. me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
  308. rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
  309. mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
  310. break;
  311. case CHIP_ARUBA:
  312. chip_name = "ARUBA";
  313. rlc_chip_name = "ARUBA";
  314. /* pfp/me same size as CAYMAN */
  315. pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
  316. me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
  317. rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
  318. mc_req_size = 0;
  319. break;
  320. default: BUG();
  321. }
  322. DRM_INFO("Loading %s Microcode\n", chip_name);
  323. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  324. err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
  325. if (err)
  326. goto out;
  327. if (rdev->pfp_fw->size != pfp_req_size) {
  328. printk(KERN_ERR
  329. "ni_cp: Bogus length %zu in firmware \"%s\"\n",
  330. rdev->pfp_fw->size, fw_name);
  331. err = -EINVAL;
  332. goto out;
  333. }
  334. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  335. err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
  336. if (err)
  337. goto out;
  338. if (rdev->me_fw->size != me_req_size) {
  339. printk(KERN_ERR
  340. "ni_cp: Bogus length %zu in firmware \"%s\"\n",
  341. rdev->me_fw->size, fw_name);
  342. err = -EINVAL;
  343. }
  344. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
  345. err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
  346. if (err)
  347. goto out;
  348. if (rdev->rlc_fw->size != rlc_req_size) {
  349. printk(KERN_ERR
  350. "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
  351. rdev->rlc_fw->size, fw_name);
  352. err = -EINVAL;
  353. }
  354. /* no MC ucode on TN */
  355. if (!(rdev->flags & RADEON_IS_IGP)) {
  356. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  357. err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
  358. if (err)
  359. goto out;
  360. if (rdev->mc_fw->size != mc_req_size) {
  361. printk(KERN_ERR
  362. "ni_mc: Bogus length %zu in firmware \"%s\"\n",
  363. rdev->mc_fw->size, fw_name);
  364. err = -EINVAL;
  365. }
  366. }
  367. out:
  368. platform_device_unregister(pdev);
  369. if (err) {
  370. if (err != -EINVAL)
  371. printk(KERN_ERR
  372. "ni_cp: Failed to load firmware \"%s\"\n",
  373. fw_name);
  374. release_firmware(rdev->pfp_fw);
  375. rdev->pfp_fw = NULL;
  376. release_firmware(rdev->me_fw);
  377. rdev->me_fw = NULL;
  378. release_firmware(rdev->rlc_fw);
  379. rdev->rlc_fw = NULL;
  380. release_firmware(rdev->mc_fw);
  381. rdev->mc_fw = NULL;
  382. }
  383. return err;
  384. }
  385. /*
  386. * Core functions
  387. */
  388. static void cayman_gpu_init(struct radeon_device *rdev)
  389. {
  390. u32 gb_addr_config = 0;
  391. u32 mc_shared_chmap, mc_arb_ramcfg;
  392. u32 cgts_tcc_disable;
  393. u32 sx_debug_1;
  394. u32 smx_dc_ctl0;
  395. u32 cgts_sm_ctrl_reg;
  396. u32 hdp_host_path_cntl;
  397. u32 tmp;
  398. u32 disabled_rb_mask;
  399. int i, j;
  400. switch (rdev->family) {
  401. case CHIP_CAYMAN:
  402. rdev->config.cayman.max_shader_engines = 2;
  403. rdev->config.cayman.max_pipes_per_simd = 4;
  404. rdev->config.cayman.max_tile_pipes = 8;
  405. rdev->config.cayman.max_simds_per_se = 12;
  406. rdev->config.cayman.max_backends_per_se = 4;
  407. rdev->config.cayman.max_texture_channel_caches = 8;
  408. rdev->config.cayman.max_gprs = 256;
  409. rdev->config.cayman.max_threads = 256;
  410. rdev->config.cayman.max_gs_threads = 32;
  411. rdev->config.cayman.max_stack_entries = 512;
  412. rdev->config.cayman.sx_num_of_sets = 8;
  413. rdev->config.cayman.sx_max_export_size = 256;
  414. rdev->config.cayman.sx_max_export_pos_size = 64;
  415. rdev->config.cayman.sx_max_export_smx_size = 192;
  416. rdev->config.cayman.max_hw_contexts = 8;
  417. rdev->config.cayman.sq_num_cf_insts = 2;
  418. rdev->config.cayman.sc_prim_fifo_size = 0x100;
  419. rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
  420. rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
  421. gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
  422. break;
  423. case CHIP_ARUBA:
  424. default:
  425. rdev->config.cayman.max_shader_engines = 1;
  426. rdev->config.cayman.max_pipes_per_simd = 4;
  427. rdev->config.cayman.max_tile_pipes = 2;
  428. if ((rdev->pdev->device == 0x9900) ||
  429. (rdev->pdev->device == 0x9901)) {
  430. rdev->config.cayman.max_simds_per_se = 6;
  431. rdev->config.cayman.max_backends_per_se = 2;
  432. } else if ((rdev->pdev->device == 0x9903) ||
  433. (rdev->pdev->device == 0x9904)) {
  434. rdev->config.cayman.max_simds_per_se = 4;
  435. rdev->config.cayman.max_backends_per_se = 2;
  436. } else if ((rdev->pdev->device == 0x9990) ||
  437. (rdev->pdev->device == 0x9991)) {
  438. rdev->config.cayman.max_simds_per_se = 3;
  439. rdev->config.cayman.max_backends_per_se = 1;
  440. } else {
  441. rdev->config.cayman.max_simds_per_se = 2;
  442. rdev->config.cayman.max_backends_per_se = 1;
  443. }
  444. rdev->config.cayman.max_texture_channel_caches = 2;
  445. rdev->config.cayman.max_gprs = 256;
  446. rdev->config.cayman.max_threads = 256;
  447. rdev->config.cayman.max_gs_threads = 32;
  448. rdev->config.cayman.max_stack_entries = 512;
  449. rdev->config.cayman.sx_num_of_sets = 8;
  450. rdev->config.cayman.sx_max_export_size = 256;
  451. rdev->config.cayman.sx_max_export_pos_size = 64;
  452. rdev->config.cayman.sx_max_export_smx_size = 192;
  453. rdev->config.cayman.max_hw_contexts = 8;
  454. rdev->config.cayman.sq_num_cf_insts = 2;
  455. rdev->config.cayman.sc_prim_fifo_size = 0x40;
  456. rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
  457. rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
  458. gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
  459. break;
  460. }
  461. /* Initialize HDP */
  462. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  463. WREG32((0x2c14 + j), 0x00000000);
  464. WREG32((0x2c18 + j), 0x00000000);
  465. WREG32((0x2c1c + j), 0x00000000);
  466. WREG32((0x2c20 + j), 0x00000000);
  467. WREG32((0x2c24 + j), 0x00000000);
  468. }
  469. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  470. evergreen_fix_pci_max_read_req_size(rdev);
  471. mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
  472. mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
  473. tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
  474. rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
  475. if (rdev->config.cayman.mem_row_size_in_kb > 4)
  476. rdev->config.cayman.mem_row_size_in_kb = 4;
  477. /* XXX use MC settings? */
  478. rdev->config.cayman.shader_engine_tile_size = 32;
  479. rdev->config.cayman.num_gpus = 1;
  480. rdev->config.cayman.multi_gpu_tile_size = 64;
  481. tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
  482. rdev->config.cayman.num_tile_pipes = (1 << tmp);
  483. tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
  484. rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
  485. tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
  486. rdev->config.cayman.num_shader_engines = tmp + 1;
  487. tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
  488. rdev->config.cayman.num_gpus = tmp + 1;
  489. tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
  490. rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
  491. tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
  492. rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
  493. /* setup tiling info dword. gb_addr_config is not adequate since it does
  494. * not have bank info, so create a custom tiling dword.
  495. * bits 3:0 num_pipes
  496. * bits 7:4 num_banks
  497. * bits 11:8 group_size
  498. * bits 15:12 row_size
  499. */
  500. rdev->config.cayman.tile_config = 0;
  501. switch (rdev->config.cayman.num_tile_pipes) {
  502. case 1:
  503. default:
  504. rdev->config.cayman.tile_config |= (0 << 0);
  505. break;
  506. case 2:
  507. rdev->config.cayman.tile_config |= (1 << 0);
  508. break;
  509. case 4:
  510. rdev->config.cayman.tile_config |= (2 << 0);
  511. break;
  512. case 8:
  513. rdev->config.cayman.tile_config |= (3 << 0);
  514. break;
  515. }
  516. /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
  517. if (rdev->flags & RADEON_IS_IGP)
  518. rdev->config.cayman.tile_config |= 1 << 4;
  519. else {
  520. if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
  521. rdev->config.cayman.tile_config |= 1 << 4;
  522. else
  523. rdev->config.cayman.tile_config |= 0 << 4;
  524. }
  525. rdev->config.cayman.tile_config |=
  526. ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
  527. rdev->config.cayman.tile_config |=
  528. ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
  529. tmp = 0;
  530. for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
  531. u32 rb_disable_bitmap;
  532. WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
  533. WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
  534. rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
  535. tmp <<= 4;
  536. tmp |= rb_disable_bitmap;
  537. }
  538. /* enabled rb are just the one not disabled :) */
  539. disabled_rb_mask = tmp;
  540. WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
  541. WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
  542. WREG32(GB_ADDR_CONFIG, gb_addr_config);
  543. WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
  544. WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  545. tmp = gb_addr_config & NUM_PIPES_MASK;
  546. tmp = r6xx_remap_render_backend(rdev, tmp,
  547. rdev->config.cayman.max_backends_per_se *
  548. rdev->config.cayman.max_shader_engines,
  549. CAYMAN_MAX_BACKENDS, disabled_rb_mask);
  550. WREG32(GB_BACKEND_MAP, tmp);
  551. cgts_tcc_disable = 0xffff0000;
  552. for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
  553. cgts_tcc_disable &= ~(1 << (16 + i));
  554. WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
  555. WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
  556. WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
  557. WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
  558. /* reprogram the shader complex */
  559. cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
  560. for (i = 0; i < 16; i++)
  561. WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
  562. WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
  563. /* set HW defaults for 3D engine */
  564. WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
  565. sx_debug_1 = RREG32(SX_DEBUG_1);
  566. sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
  567. WREG32(SX_DEBUG_1, sx_debug_1);
  568. smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
  569. smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
  570. smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
  571. WREG32(SMX_DC_CTL0, smx_dc_ctl0);
  572. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
  573. /* need to be explicitly zero-ed */
  574. WREG32(VGT_OFFCHIP_LDS_BASE, 0);
  575. WREG32(SQ_LSTMP_RING_BASE, 0);
  576. WREG32(SQ_HSTMP_RING_BASE, 0);
  577. WREG32(SQ_ESTMP_RING_BASE, 0);
  578. WREG32(SQ_GSTMP_RING_BASE, 0);
  579. WREG32(SQ_VSTMP_RING_BASE, 0);
  580. WREG32(SQ_PSTMP_RING_BASE, 0);
  581. WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
  582. WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
  583. POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
  584. SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
  585. WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
  586. SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
  587. SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
  588. WREG32(VGT_NUM_INSTANCES, 1);
  589. WREG32(CP_PERFMON_CNTL, 0);
  590. WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
  591. FETCH_FIFO_HIWATER(0x4) |
  592. DONE_FIFO_HIWATER(0xe0) |
  593. ALU_UPDATE_FIFO_HIWATER(0x8)));
  594. WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
  595. WREG32(SQ_CONFIG, (VC_ENABLE |
  596. EXPORT_SRC_C |
  597. GFX_PRIO(0) |
  598. CS1_PRIO(0) |
  599. CS2_PRIO(1)));
  600. WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
  601. WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
  602. FORCE_EOV_MAX_REZ_CNT(255)));
  603. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
  604. AUTO_INVLD_EN(ES_AND_GS_AUTO));
  605. WREG32(VGT_GS_VERTEX_REUSE, 16);
  606. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  607. WREG32(CB_PERF_CTR0_SEL_0, 0);
  608. WREG32(CB_PERF_CTR0_SEL_1, 0);
  609. WREG32(CB_PERF_CTR1_SEL_0, 0);
  610. WREG32(CB_PERF_CTR1_SEL_1, 0);
  611. WREG32(CB_PERF_CTR2_SEL_0, 0);
  612. WREG32(CB_PERF_CTR2_SEL_1, 0);
  613. WREG32(CB_PERF_CTR3_SEL_0, 0);
  614. WREG32(CB_PERF_CTR3_SEL_1, 0);
  615. tmp = RREG32(HDP_MISC_CNTL);
  616. tmp |= HDP_FLUSH_INVALIDATE_CACHE;
  617. WREG32(HDP_MISC_CNTL, tmp);
  618. hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
  619. WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  620. WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
  621. udelay(50);
  622. }
  623. /*
  624. * GART
  625. */
  626. void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
  627. {
  628. /* flush hdp cache */
  629. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  630. /* bits 0-7 are the VM contexts0-7 */
  631. WREG32(VM_INVALIDATE_REQUEST, 1);
  632. }
  633. int cayman_pcie_gart_enable(struct radeon_device *rdev)
  634. {
  635. int i, r;
  636. if (rdev->gart.robj == NULL) {
  637. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  638. return -EINVAL;
  639. }
  640. r = radeon_gart_table_vram_pin(rdev);
  641. if (r)
  642. return r;
  643. radeon_gart_restore(rdev);
  644. /* Setup TLB control */
  645. WREG32(MC_VM_MX_L1_TLB_CNTL,
  646. (0xA << 7) |
  647. ENABLE_L1_TLB |
  648. ENABLE_L1_FRAGMENT_PROCESSING |
  649. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  650. ENABLE_ADVANCED_DRIVER_MODEL |
  651. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  652. /* Setup L2 cache */
  653. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
  654. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  655. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  656. EFFECTIVE_L2_QUEUE_SIZE(7) |
  657. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  658. WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
  659. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  660. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  661. /* setup context0 */
  662. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  663. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  664. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  665. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  666. (u32)(rdev->dummy_page.addr >> 12));
  667. WREG32(VM_CONTEXT0_CNTL2, 0);
  668. WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  669. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
  670. WREG32(0x15D4, 0);
  671. WREG32(0x15D8, 0);
  672. WREG32(0x15DC, 0);
  673. /* empty context1-7 */
  674. for (i = 1; i < 8; i++) {
  675. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
  676. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
  677. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
  678. rdev->gart.table_addr >> 12);
  679. }
  680. /* enable context1-7 */
  681. WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  682. (u32)(rdev->dummy_page.addr >> 12));
  683. WREG32(VM_CONTEXT1_CNTL2, 0);
  684. WREG32(VM_CONTEXT1_CNTL, 0);
  685. WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  686. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
  687. cayman_pcie_gart_tlb_flush(rdev);
  688. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  689. (unsigned)(rdev->mc.gtt_size >> 20),
  690. (unsigned long long)rdev->gart.table_addr);
  691. rdev->gart.ready = true;
  692. return 0;
  693. }
  694. void cayman_pcie_gart_disable(struct radeon_device *rdev)
  695. {
  696. /* Disable all tables */
  697. WREG32(VM_CONTEXT0_CNTL, 0);
  698. WREG32(VM_CONTEXT1_CNTL, 0);
  699. /* Setup TLB control */
  700. WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
  701. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  702. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  703. /* Setup L2 cache */
  704. WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  705. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  706. EFFECTIVE_L2_QUEUE_SIZE(7) |
  707. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  708. WREG32(VM_L2_CNTL2, 0);
  709. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  710. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  711. radeon_gart_table_vram_unpin(rdev);
  712. }
  713. void cayman_pcie_gart_fini(struct radeon_device *rdev)
  714. {
  715. cayman_pcie_gart_disable(rdev);
  716. radeon_gart_table_vram_free(rdev);
  717. radeon_gart_fini(rdev);
  718. }
  719. void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
  720. int ring, u32 cp_int_cntl)
  721. {
  722. u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
  723. WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
  724. WREG32(CP_INT_CNTL, cp_int_cntl);
  725. }
  726. /*
  727. * CP.
  728. */
  729. void cayman_fence_ring_emit(struct radeon_device *rdev,
  730. struct radeon_fence *fence)
  731. {
  732. struct radeon_ring *ring = &rdev->ring[fence->ring];
  733. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  734. /* flush read cache over gart for this vmid */
  735. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  736. radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
  737. radeon_ring_write(ring, 0);
  738. radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
  739. radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
  740. radeon_ring_write(ring, 0xFFFFFFFF);
  741. radeon_ring_write(ring, 0);
  742. radeon_ring_write(ring, 10); /* poll interval */
  743. /* EVENT_WRITE_EOP - flush caches, send int */
  744. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  745. radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
  746. radeon_ring_write(ring, addr & 0xffffffff);
  747. radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
  748. radeon_ring_write(ring, fence->seq);
  749. radeon_ring_write(ring, 0);
  750. }
  751. void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  752. {
  753. struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
  754. /* set to DX10/11 mode */
  755. radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
  756. radeon_ring_write(ring, 1);
  757. radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
  758. radeon_ring_write(ring,
  759. #ifdef __BIG_ENDIAN
  760. (2 << 0) |
  761. #endif
  762. (ib->gpu_addr & 0xFFFFFFFC));
  763. radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
  764. radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
  765. /* flush read cache over gart for this vmid */
  766. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  767. radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
  768. radeon_ring_write(ring, ib->vm_id);
  769. radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
  770. radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
  771. radeon_ring_write(ring, 0xFFFFFFFF);
  772. radeon_ring_write(ring, 0);
  773. radeon_ring_write(ring, 10); /* poll interval */
  774. }
  775. static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
  776. {
  777. if (enable)
  778. WREG32(CP_ME_CNTL, 0);
  779. else {
  780. radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
  781. WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
  782. WREG32(SCRATCH_UMSK, 0);
  783. }
  784. }
  785. static int cayman_cp_load_microcode(struct radeon_device *rdev)
  786. {
  787. const __be32 *fw_data;
  788. int i;
  789. if (!rdev->me_fw || !rdev->pfp_fw)
  790. return -EINVAL;
  791. cayman_cp_enable(rdev, false);
  792. fw_data = (const __be32 *)rdev->pfp_fw->data;
  793. WREG32(CP_PFP_UCODE_ADDR, 0);
  794. for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
  795. WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
  796. WREG32(CP_PFP_UCODE_ADDR, 0);
  797. fw_data = (const __be32 *)rdev->me_fw->data;
  798. WREG32(CP_ME_RAM_WADDR, 0);
  799. for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
  800. WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
  801. WREG32(CP_PFP_UCODE_ADDR, 0);
  802. WREG32(CP_ME_RAM_WADDR, 0);
  803. WREG32(CP_ME_RAM_RADDR, 0);
  804. return 0;
  805. }
  806. static int cayman_cp_start(struct radeon_device *rdev)
  807. {
  808. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  809. int r, i;
  810. r = radeon_ring_lock(rdev, ring, 7);
  811. if (r) {
  812. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  813. return r;
  814. }
  815. radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
  816. radeon_ring_write(ring, 0x1);
  817. radeon_ring_write(ring, 0x0);
  818. radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
  819. radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
  820. radeon_ring_write(ring, 0);
  821. radeon_ring_write(ring, 0);
  822. radeon_ring_unlock_commit(rdev, ring);
  823. cayman_cp_enable(rdev, true);
  824. r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
  825. if (r) {
  826. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  827. return r;
  828. }
  829. /* setup clear context state */
  830. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  831. radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  832. for (i = 0; i < cayman_default_size; i++)
  833. radeon_ring_write(ring, cayman_default_state[i]);
  834. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  835. radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  836. /* set clear context state */
  837. radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  838. radeon_ring_write(ring, 0);
  839. /* SQ_VTX_BASE_VTX_LOC */
  840. radeon_ring_write(ring, 0xc0026f00);
  841. radeon_ring_write(ring, 0x00000000);
  842. radeon_ring_write(ring, 0x00000000);
  843. radeon_ring_write(ring, 0x00000000);
  844. /* Clear consts */
  845. radeon_ring_write(ring, 0xc0036f00);
  846. radeon_ring_write(ring, 0x00000bc4);
  847. radeon_ring_write(ring, 0xffffffff);
  848. radeon_ring_write(ring, 0xffffffff);
  849. radeon_ring_write(ring, 0xffffffff);
  850. radeon_ring_write(ring, 0xc0026900);
  851. radeon_ring_write(ring, 0x00000316);
  852. radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
  853. radeon_ring_write(ring, 0x00000010); /* */
  854. radeon_ring_unlock_commit(rdev, ring);
  855. /* XXX init other rings */
  856. return 0;
  857. }
  858. static void cayman_cp_fini(struct radeon_device *rdev)
  859. {
  860. cayman_cp_enable(rdev, false);
  861. radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  862. }
  863. int cayman_cp_resume(struct radeon_device *rdev)
  864. {
  865. struct radeon_ring *ring;
  866. u32 tmp;
  867. u32 rb_bufsz;
  868. int r;
  869. /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
  870. WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
  871. SOFT_RESET_PA |
  872. SOFT_RESET_SH |
  873. SOFT_RESET_VGT |
  874. SOFT_RESET_SPI |
  875. SOFT_RESET_SX));
  876. RREG32(GRBM_SOFT_RESET);
  877. mdelay(15);
  878. WREG32(GRBM_SOFT_RESET, 0);
  879. RREG32(GRBM_SOFT_RESET);
  880. WREG32(CP_SEM_WAIT_TIMER, 0x0);
  881. WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
  882. /* Set the write pointer delay */
  883. WREG32(CP_RB_WPTR_DELAY, 0);
  884. WREG32(CP_DEBUG, (1 << 27));
  885. /* ring 0 - compute and gfx */
  886. /* Set ring buffer size */
  887. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  888. rb_bufsz = drm_order(ring->ring_size / 8);
  889. tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  890. #ifdef __BIG_ENDIAN
  891. tmp |= BUF_SWAP_32BIT;
  892. #endif
  893. WREG32(CP_RB0_CNTL, tmp);
  894. /* Initialize the ring buffer's read and write pointers */
  895. WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
  896. ring->wptr = 0;
  897. WREG32(CP_RB0_WPTR, ring->wptr);
  898. /* set the wb address wether it's enabled or not */
  899. WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
  900. WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
  901. WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
  902. if (rdev->wb.enabled)
  903. WREG32(SCRATCH_UMSK, 0xff);
  904. else {
  905. tmp |= RB_NO_UPDATE;
  906. WREG32(SCRATCH_UMSK, 0);
  907. }
  908. mdelay(1);
  909. WREG32(CP_RB0_CNTL, tmp);
  910. WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
  911. ring->rptr = RREG32(CP_RB0_RPTR);
  912. /* ring1 - compute only */
  913. /* Set ring buffer size */
  914. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  915. rb_bufsz = drm_order(ring->ring_size / 8);
  916. tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  917. #ifdef __BIG_ENDIAN
  918. tmp |= BUF_SWAP_32BIT;
  919. #endif
  920. WREG32(CP_RB1_CNTL, tmp);
  921. /* Initialize the ring buffer's read and write pointers */
  922. WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
  923. ring->wptr = 0;
  924. WREG32(CP_RB1_WPTR, ring->wptr);
  925. /* set the wb address wether it's enabled or not */
  926. WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
  927. WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
  928. mdelay(1);
  929. WREG32(CP_RB1_CNTL, tmp);
  930. WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
  931. ring->rptr = RREG32(CP_RB1_RPTR);
  932. /* ring2 - compute only */
  933. /* Set ring buffer size */
  934. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  935. rb_bufsz = drm_order(ring->ring_size / 8);
  936. tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  937. #ifdef __BIG_ENDIAN
  938. tmp |= BUF_SWAP_32BIT;
  939. #endif
  940. WREG32(CP_RB2_CNTL, tmp);
  941. /* Initialize the ring buffer's read and write pointers */
  942. WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
  943. ring->wptr = 0;
  944. WREG32(CP_RB2_WPTR, ring->wptr);
  945. /* set the wb address wether it's enabled or not */
  946. WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
  947. WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
  948. mdelay(1);
  949. WREG32(CP_RB2_CNTL, tmp);
  950. WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
  951. ring->rptr = RREG32(CP_RB2_RPTR);
  952. /* start the rings */
  953. cayman_cp_start(rdev);
  954. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
  955. rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
  956. rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
  957. /* this only test cp0 */
  958. r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  959. if (r) {
  960. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  961. rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
  962. rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
  963. return r;
  964. }
  965. return 0;
  966. }
  967. static int cayman_gpu_soft_reset(struct radeon_device *rdev)
  968. {
  969. struct evergreen_mc_save save;
  970. u32 grbm_reset = 0;
  971. if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
  972. return 0;
  973. dev_info(rdev->dev, "GPU softreset \n");
  974. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  975. RREG32(GRBM_STATUS));
  976. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  977. RREG32(GRBM_STATUS_SE0));
  978. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  979. RREG32(GRBM_STATUS_SE1));
  980. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  981. RREG32(SRBM_STATUS));
  982. dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
  983. RREG32(0x14F8));
  984. dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
  985. RREG32(0x14D8));
  986. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  987. RREG32(0x14FC));
  988. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  989. RREG32(0x14DC));
  990. evergreen_mc_stop(rdev, &save);
  991. if (evergreen_mc_wait_for_idle(rdev)) {
  992. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  993. }
  994. /* Disable CP parsing/prefetching */
  995. WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
  996. /* reset all the gfx blocks */
  997. grbm_reset = (SOFT_RESET_CP |
  998. SOFT_RESET_CB |
  999. SOFT_RESET_DB |
  1000. SOFT_RESET_GDS |
  1001. SOFT_RESET_PA |
  1002. SOFT_RESET_SC |
  1003. SOFT_RESET_SPI |
  1004. SOFT_RESET_SH |
  1005. SOFT_RESET_SX |
  1006. SOFT_RESET_TC |
  1007. SOFT_RESET_TA |
  1008. SOFT_RESET_VGT |
  1009. SOFT_RESET_IA);
  1010. dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
  1011. WREG32(GRBM_SOFT_RESET, grbm_reset);
  1012. (void)RREG32(GRBM_SOFT_RESET);
  1013. udelay(50);
  1014. WREG32(GRBM_SOFT_RESET, 0);
  1015. (void)RREG32(GRBM_SOFT_RESET);
  1016. /* Wait a little for things to settle down */
  1017. udelay(50);
  1018. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  1019. RREG32(GRBM_STATUS));
  1020. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  1021. RREG32(GRBM_STATUS_SE0));
  1022. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  1023. RREG32(GRBM_STATUS_SE1));
  1024. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  1025. RREG32(SRBM_STATUS));
  1026. evergreen_mc_resume(rdev, &save);
  1027. return 0;
  1028. }
  1029. int cayman_asic_reset(struct radeon_device *rdev)
  1030. {
  1031. return cayman_gpu_soft_reset(rdev);
  1032. }
  1033. static int cayman_startup(struct radeon_device *rdev)
  1034. {
  1035. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  1036. int r;
  1037. /* enable pcie gen2 link */
  1038. evergreen_pcie_gen2_enable(rdev);
  1039. if (rdev->flags & RADEON_IS_IGP) {
  1040. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
  1041. r = ni_init_microcode(rdev);
  1042. if (r) {
  1043. DRM_ERROR("Failed to load firmware!\n");
  1044. return r;
  1045. }
  1046. }
  1047. } else {
  1048. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
  1049. r = ni_init_microcode(rdev);
  1050. if (r) {
  1051. DRM_ERROR("Failed to load firmware!\n");
  1052. return r;
  1053. }
  1054. }
  1055. r = ni_mc_load_microcode(rdev);
  1056. if (r) {
  1057. DRM_ERROR("Failed to load MC firmware!\n");
  1058. return r;
  1059. }
  1060. }
  1061. r = r600_vram_scratch_init(rdev);
  1062. if (r)
  1063. return r;
  1064. evergreen_mc_program(rdev);
  1065. r = cayman_pcie_gart_enable(rdev);
  1066. if (r)
  1067. return r;
  1068. cayman_gpu_init(rdev);
  1069. r = evergreen_blit_init(rdev);
  1070. if (r) {
  1071. r600_blit_fini(rdev);
  1072. rdev->asic->copy.copy = NULL;
  1073. dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
  1074. }
  1075. /* allocate rlc buffers */
  1076. if (rdev->flags & RADEON_IS_IGP) {
  1077. r = si_rlc_init(rdev);
  1078. if (r) {
  1079. DRM_ERROR("Failed to init rlc BOs!\n");
  1080. return r;
  1081. }
  1082. }
  1083. /* allocate wb buffer */
  1084. r = radeon_wb_init(rdev);
  1085. if (r)
  1086. return r;
  1087. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  1088. if (r) {
  1089. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  1090. return r;
  1091. }
  1092. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  1093. if (r) {
  1094. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  1095. return r;
  1096. }
  1097. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  1098. if (r) {
  1099. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  1100. return r;
  1101. }
  1102. /* Enable IRQ */
  1103. r = r600_irq_init(rdev);
  1104. if (r) {
  1105. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  1106. radeon_irq_kms_fini(rdev);
  1107. return r;
  1108. }
  1109. evergreen_irq_set(rdev);
  1110. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
  1111. CP_RB0_RPTR, CP_RB0_WPTR,
  1112. 0, 0xfffff, RADEON_CP_PACKET2);
  1113. if (r)
  1114. return r;
  1115. r = cayman_cp_load_microcode(rdev);
  1116. if (r)
  1117. return r;
  1118. r = cayman_cp_resume(rdev);
  1119. if (r)
  1120. return r;
  1121. r = radeon_ib_pool_start(rdev);
  1122. if (r)
  1123. return r;
  1124. r = radeon_ib_ring_tests(rdev);
  1125. if (r)
  1126. return r;
  1127. r = radeon_vm_manager_start(rdev);
  1128. if (r)
  1129. return r;
  1130. return 0;
  1131. }
  1132. int cayman_resume(struct radeon_device *rdev)
  1133. {
  1134. int r;
  1135. /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
  1136. * posting will perform necessary task to bring back GPU into good
  1137. * shape.
  1138. */
  1139. /* post card */
  1140. atom_asic_init(rdev->mode_info.atom_context);
  1141. rdev->accel_working = true;
  1142. r = cayman_startup(rdev);
  1143. if (r) {
  1144. DRM_ERROR("cayman startup failed on resume\n");
  1145. rdev->accel_working = false;
  1146. return r;
  1147. }
  1148. return r;
  1149. }
  1150. int cayman_suspend(struct radeon_device *rdev)
  1151. {
  1152. /* FIXME: we should wait for ring to be empty */
  1153. radeon_ib_pool_suspend(rdev);
  1154. radeon_vm_manager_suspend(rdev);
  1155. r600_blit_suspend(rdev);
  1156. cayman_cp_enable(rdev, false);
  1157. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  1158. evergreen_irq_suspend(rdev);
  1159. radeon_wb_disable(rdev);
  1160. cayman_pcie_gart_disable(rdev);
  1161. return 0;
  1162. }
  1163. /* Plan is to move initialization in that function and use
  1164. * helper function so that radeon_device_init pretty much
  1165. * do nothing more than calling asic specific function. This
  1166. * should also allow to remove a bunch of callback function
  1167. * like vram_info.
  1168. */
  1169. int cayman_init(struct radeon_device *rdev)
  1170. {
  1171. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  1172. int r;
  1173. /* Read BIOS */
  1174. if (!radeon_get_bios(rdev)) {
  1175. if (ASIC_IS_AVIVO(rdev))
  1176. return -EINVAL;
  1177. }
  1178. /* Must be an ATOMBIOS */
  1179. if (!rdev->is_atom_bios) {
  1180. dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
  1181. return -EINVAL;
  1182. }
  1183. r = radeon_atombios_init(rdev);
  1184. if (r)
  1185. return r;
  1186. /* Post card if necessary */
  1187. if (!radeon_card_posted(rdev)) {
  1188. if (!rdev->bios) {
  1189. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  1190. return -EINVAL;
  1191. }
  1192. DRM_INFO("GPU not posted. posting now...\n");
  1193. atom_asic_init(rdev->mode_info.atom_context);
  1194. }
  1195. /* Initialize scratch registers */
  1196. r600_scratch_init(rdev);
  1197. /* Initialize surface registers */
  1198. radeon_surface_init(rdev);
  1199. /* Initialize clocks */
  1200. radeon_get_clock_info(rdev->ddev);
  1201. /* Fence driver */
  1202. r = radeon_fence_driver_init(rdev);
  1203. if (r)
  1204. return r;
  1205. /* initialize memory controller */
  1206. r = evergreen_mc_init(rdev);
  1207. if (r)
  1208. return r;
  1209. /* Memory manager */
  1210. r = radeon_bo_init(rdev);
  1211. if (r)
  1212. return r;
  1213. r = radeon_irq_kms_init(rdev);
  1214. if (r)
  1215. return r;
  1216. ring->ring_obj = NULL;
  1217. r600_ring_init(rdev, ring, 1024 * 1024);
  1218. rdev->ih.ring_obj = NULL;
  1219. r600_ih_ring_init(rdev, 64 * 1024);
  1220. r = r600_pcie_gart_init(rdev);
  1221. if (r)
  1222. return r;
  1223. r = radeon_ib_pool_init(rdev);
  1224. rdev->accel_working = true;
  1225. if (r) {
  1226. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  1227. rdev->accel_working = false;
  1228. }
  1229. r = radeon_vm_manager_init(rdev);
  1230. if (r) {
  1231. dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
  1232. }
  1233. r = cayman_startup(rdev);
  1234. if (r) {
  1235. dev_err(rdev->dev, "disabling GPU acceleration\n");
  1236. cayman_cp_fini(rdev);
  1237. r600_irq_fini(rdev);
  1238. if (rdev->flags & RADEON_IS_IGP)
  1239. si_rlc_fini(rdev);
  1240. radeon_wb_fini(rdev);
  1241. r100_ib_fini(rdev);
  1242. radeon_vm_manager_fini(rdev);
  1243. radeon_irq_kms_fini(rdev);
  1244. cayman_pcie_gart_fini(rdev);
  1245. rdev->accel_working = false;
  1246. }
  1247. /* Don't start up if the MC ucode is missing.
  1248. * The default clocks and voltages before the MC ucode
  1249. * is loaded are not suffient for advanced operations.
  1250. *
  1251. * We can skip this check for TN, because there is no MC
  1252. * ucode.
  1253. */
  1254. if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
  1255. DRM_ERROR("radeon: MC ucode required for NI+.\n");
  1256. return -EINVAL;
  1257. }
  1258. return 0;
  1259. }
  1260. void cayman_fini(struct radeon_device *rdev)
  1261. {
  1262. r600_blit_fini(rdev);
  1263. cayman_cp_fini(rdev);
  1264. r600_irq_fini(rdev);
  1265. if (rdev->flags & RADEON_IS_IGP)
  1266. si_rlc_fini(rdev);
  1267. radeon_wb_fini(rdev);
  1268. radeon_vm_manager_fini(rdev);
  1269. r100_ib_fini(rdev);
  1270. radeon_irq_kms_fini(rdev);
  1271. cayman_pcie_gart_fini(rdev);
  1272. r600_vram_scratch_fini(rdev);
  1273. radeon_gem_fini(rdev);
  1274. radeon_fence_driver_fini(rdev);
  1275. radeon_bo_fini(rdev);
  1276. radeon_atombios_fini(rdev);
  1277. kfree(rdev->bios);
  1278. rdev->bios = NULL;
  1279. }
  1280. /*
  1281. * vm
  1282. */
  1283. int cayman_vm_init(struct radeon_device *rdev)
  1284. {
  1285. /* number of VMs */
  1286. rdev->vm_manager.nvm = 8;
  1287. /* base offset of vram pages */
  1288. if (rdev->flags & RADEON_IS_IGP) {
  1289. u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
  1290. tmp <<= 22;
  1291. rdev->vm_manager.vram_base_offset = tmp;
  1292. } else
  1293. rdev->vm_manager.vram_base_offset = 0;
  1294. return 0;
  1295. }
  1296. void cayman_vm_fini(struct radeon_device *rdev)
  1297. {
  1298. }
  1299. int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
  1300. {
  1301. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
  1302. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
  1303. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
  1304. /* flush hdp cache */
  1305. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  1306. /* bits 0-7 are the VM contexts0-7 */
  1307. WREG32(VM_INVALIDATE_REQUEST, 1 << id);
  1308. return 0;
  1309. }
  1310. void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
  1311. {
  1312. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
  1313. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
  1314. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
  1315. /* flush hdp cache */
  1316. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  1317. /* bits 0-7 are the VM contexts0-7 */
  1318. WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
  1319. }
  1320. void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
  1321. {
  1322. if (vm->id == -1)
  1323. return;
  1324. /* flush hdp cache */
  1325. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
  1326. /* bits 0-7 are the VM contexts0-7 */
  1327. WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
  1328. }
  1329. #define R600_PTE_VALID (1 << 0)
  1330. #define R600_PTE_SYSTEM (1 << 1)
  1331. #define R600_PTE_SNOOPED (1 << 2)
  1332. #define R600_PTE_READABLE (1 << 5)
  1333. #define R600_PTE_WRITEABLE (1 << 6)
  1334. uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
  1335. struct radeon_vm *vm,
  1336. uint32_t flags)
  1337. {
  1338. uint32_t r600_flags = 0;
  1339. r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
  1340. r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
  1341. r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
  1342. if (flags & RADEON_VM_PAGE_SYSTEM) {
  1343. r600_flags |= R600_PTE_SYSTEM;
  1344. r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
  1345. }
  1346. return r600_flags;
  1347. }
  1348. void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
  1349. unsigned pfn, uint64_t addr, uint32_t flags)
  1350. {
  1351. void __iomem *ptr = (void *)vm->pt;
  1352. addr = addr & 0xFFFFFFFFFFFFF000ULL;
  1353. addr |= flags;
  1354. writeq(addr, ptr + (pfn * 8));
  1355. }