evergreen_blit_kms.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * Copyright 2010 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Alex Deucher <alexander.deucher@amd.com>
  25. */
  26. #include "drmP.h"
  27. #include "drm.h"
  28. #include "radeon_drm.h"
  29. #include "radeon.h"
  30. #include "evergreend.h"
  31. #include "evergreen_blit_shaders.h"
  32. #define DI_PT_RECTLIST 0x11
  33. #define DI_INDEX_SIZE_16_BIT 0x0
  34. #define DI_SRC_SEL_AUTO_INDEX 0x2
  35. #define FMT_8 0x1
  36. #define FMT_5_6_5 0x8
  37. #define FMT_8_8_8_8 0x1a
  38. #define COLOR_8 0x1
  39. #define COLOR_5_6_5 0x8
  40. #define COLOR_8_8_8_8 0x1a
  41. /* emits 17 */
  42. static void
  43. set_render_target(struct radeon_device *rdev, int format,
  44. int w, int h, u64 gpu_addr)
  45. {
  46. u32 cb_color_info;
  47. int pitch, slice;
  48. h = ALIGN(h, 8);
  49. if (h < 8)
  50. h = 8;
  51. cb_color_info = ((format << 2) | (1 << 24));
  52. pitch = (w / 8) - 1;
  53. slice = ((w * h) / 64) - 1;
  54. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
  55. radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
  56. radeon_ring_write(rdev, gpu_addr >> 8);
  57. radeon_ring_write(rdev, pitch);
  58. radeon_ring_write(rdev, slice);
  59. radeon_ring_write(rdev, 0);
  60. radeon_ring_write(rdev, cb_color_info);
  61. radeon_ring_write(rdev, (1 << 4));
  62. radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
  63. radeon_ring_write(rdev, 0);
  64. radeon_ring_write(rdev, 0);
  65. radeon_ring_write(rdev, 0);
  66. radeon_ring_write(rdev, 0);
  67. radeon_ring_write(rdev, 0);
  68. radeon_ring_write(rdev, 0);
  69. radeon_ring_write(rdev, 0);
  70. radeon_ring_write(rdev, 0);
  71. }
  72. /* emits 5dw */
  73. static void
  74. cp_set_surface_sync(struct radeon_device *rdev,
  75. u32 sync_type, u32 size,
  76. u64 mc_addr)
  77. {
  78. u32 cp_coher_size;
  79. if (size == 0xffffffff)
  80. cp_coher_size = 0xffffffff;
  81. else
  82. cp_coher_size = ((size + 255) >> 8);
  83. radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
  84. radeon_ring_write(rdev, sync_type);
  85. radeon_ring_write(rdev, cp_coher_size);
  86. radeon_ring_write(rdev, mc_addr >> 8);
  87. radeon_ring_write(rdev, 10); /* poll interval */
  88. }
  89. /* emits 11dw + 1 surface sync = 16dw */
  90. static void
  91. set_shaders(struct radeon_device *rdev)
  92. {
  93. u64 gpu_addr;
  94. /* VS */
  95. gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
  96. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
  97. radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
  98. radeon_ring_write(rdev, gpu_addr >> 8);
  99. radeon_ring_write(rdev, 2);
  100. radeon_ring_write(rdev, 0);
  101. /* PS */
  102. gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
  103. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
  104. radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
  105. radeon_ring_write(rdev, gpu_addr >> 8);
  106. radeon_ring_write(rdev, 1);
  107. radeon_ring_write(rdev, 0);
  108. radeon_ring_write(rdev, 2);
  109. gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
  110. cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
  111. }
  112. /* emits 10 + 1 sync (5) = 15 */
  113. static void
  114. set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
  115. {
  116. u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
  117. /* high addr, stride */
  118. sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
  119. /* xyzw swizzles */
  120. sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
  121. radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
  122. radeon_ring_write(rdev, 0x580);
  123. radeon_ring_write(rdev, gpu_addr & 0xffffffff);
  124. radeon_ring_write(rdev, 48 - 1); /* size */
  125. radeon_ring_write(rdev, sq_vtx_constant_word2);
  126. radeon_ring_write(rdev, sq_vtx_constant_word3);
  127. radeon_ring_write(rdev, 0);
  128. radeon_ring_write(rdev, 0);
  129. radeon_ring_write(rdev, 0);
  130. radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
  131. if ((rdev->family == CHIP_CEDAR) ||
  132. (rdev->family == CHIP_PALM))
  133. cp_set_surface_sync(rdev,
  134. PACKET3_TC_ACTION_ENA, 48, gpu_addr);
  135. else
  136. cp_set_surface_sync(rdev,
  137. PACKET3_VC_ACTION_ENA, 48, gpu_addr);
  138. }
  139. /* emits 10 */
  140. static void
  141. set_tex_resource(struct radeon_device *rdev,
  142. int format, int w, int h, int pitch,
  143. u64 gpu_addr)
  144. {
  145. u32 sq_tex_resource_word0, sq_tex_resource_word1;
  146. u32 sq_tex_resource_word4, sq_tex_resource_word7;
  147. if (h < 1)
  148. h = 1;
  149. sq_tex_resource_word0 = (1 << 0); /* 2D */
  150. sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
  151. ((w - 1) << 18));
  152. sq_tex_resource_word1 = ((h - 1) << 0);
  153. /* xyzw swizzles */
  154. sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
  155. sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
  156. radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
  157. radeon_ring_write(rdev, 0);
  158. radeon_ring_write(rdev, sq_tex_resource_word0);
  159. radeon_ring_write(rdev, sq_tex_resource_word1);
  160. radeon_ring_write(rdev, gpu_addr >> 8);
  161. radeon_ring_write(rdev, gpu_addr >> 8);
  162. radeon_ring_write(rdev, sq_tex_resource_word4);
  163. radeon_ring_write(rdev, 0);
  164. radeon_ring_write(rdev, 0);
  165. radeon_ring_write(rdev, sq_tex_resource_word7);
  166. }
  167. /* emits 12 */
  168. static void
  169. set_scissors(struct radeon_device *rdev, int x1, int y1,
  170. int x2, int y2)
  171. {
  172. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  173. radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
  174. radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
  175. radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
  176. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  177. radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
  178. radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
  179. radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
  180. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  181. radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
  182. radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
  183. radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
  184. }
  185. /* emits 10 */
  186. static void
  187. draw_auto(struct radeon_device *rdev)
  188. {
  189. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  190. radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
  191. radeon_ring_write(rdev, DI_PT_RECTLIST);
  192. radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
  193. radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
  194. radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
  195. radeon_ring_write(rdev, 1);
  196. radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
  197. radeon_ring_write(rdev, 3);
  198. radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
  199. }
  200. /* emits 30 */
  201. static void
  202. set_default_state(struct radeon_device *rdev)
  203. {
  204. u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
  205. u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
  206. u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
  207. int num_ps_gprs, num_vs_gprs, num_temp_gprs;
  208. int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
  209. int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
  210. int num_hs_threads, num_ls_threads;
  211. int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
  212. int num_hs_stack_entries, num_ls_stack_entries;
  213. switch (rdev->family) {
  214. case CHIP_CEDAR:
  215. default:
  216. num_ps_gprs = 93;
  217. num_vs_gprs = 46;
  218. num_temp_gprs = 4;
  219. num_gs_gprs = 31;
  220. num_es_gprs = 31;
  221. num_hs_gprs = 23;
  222. num_ls_gprs = 23;
  223. num_ps_threads = 96;
  224. num_vs_threads = 16;
  225. num_gs_threads = 16;
  226. num_es_threads = 16;
  227. num_hs_threads = 16;
  228. num_ls_threads = 16;
  229. num_ps_stack_entries = 42;
  230. num_vs_stack_entries = 42;
  231. num_gs_stack_entries = 42;
  232. num_es_stack_entries = 42;
  233. num_hs_stack_entries = 42;
  234. num_ls_stack_entries = 42;
  235. break;
  236. case CHIP_REDWOOD:
  237. num_ps_gprs = 93;
  238. num_vs_gprs = 46;
  239. num_temp_gprs = 4;
  240. num_gs_gprs = 31;
  241. num_es_gprs = 31;
  242. num_hs_gprs = 23;
  243. num_ls_gprs = 23;
  244. num_ps_threads = 128;
  245. num_vs_threads = 20;
  246. num_gs_threads = 20;
  247. num_es_threads = 20;
  248. num_hs_threads = 20;
  249. num_ls_threads = 20;
  250. num_ps_stack_entries = 42;
  251. num_vs_stack_entries = 42;
  252. num_gs_stack_entries = 42;
  253. num_es_stack_entries = 42;
  254. num_hs_stack_entries = 42;
  255. num_ls_stack_entries = 42;
  256. break;
  257. case CHIP_JUNIPER:
  258. num_ps_gprs = 93;
  259. num_vs_gprs = 46;
  260. num_temp_gprs = 4;
  261. num_gs_gprs = 31;
  262. num_es_gprs = 31;
  263. num_hs_gprs = 23;
  264. num_ls_gprs = 23;
  265. num_ps_threads = 128;
  266. num_vs_threads = 20;
  267. num_gs_threads = 20;
  268. num_es_threads = 20;
  269. num_hs_threads = 20;
  270. num_ls_threads = 20;
  271. num_ps_stack_entries = 85;
  272. num_vs_stack_entries = 85;
  273. num_gs_stack_entries = 85;
  274. num_es_stack_entries = 85;
  275. num_hs_stack_entries = 85;
  276. num_ls_stack_entries = 85;
  277. break;
  278. case CHIP_CYPRESS:
  279. case CHIP_HEMLOCK:
  280. num_ps_gprs = 93;
  281. num_vs_gprs = 46;
  282. num_temp_gprs = 4;
  283. num_gs_gprs = 31;
  284. num_es_gprs = 31;
  285. num_hs_gprs = 23;
  286. num_ls_gprs = 23;
  287. num_ps_threads = 128;
  288. num_vs_threads = 20;
  289. num_gs_threads = 20;
  290. num_es_threads = 20;
  291. num_hs_threads = 20;
  292. num_ls_threads = 20;
  293. num_ps_stack_entries = 85;
  294. num_vs_stack_entries = 85;
  295. num_gs_stack_entries = 85;
  296. num_es_stack_entries = 85;
  297. num_hs_stack_entries = 85;
  298. num_ls_stack_entries = 85;
  299. break;
  300. case CHIP_PALM:
  301. num_ps_gprs = 93;
  302. num_vs_gprs = 46;
  303. num_temp_gprs = 4;
  304. num_gs_gprs = 31;
  305. num_es_gprs = 31;
  306. num_hs_gprs = 23;
  307. num_ls_gprs = 23;
  308. num_ps_threads = 96;
  309. num_vs_threads = 16;
  310. num_gs_threads = 16;
  311. num_es_threads = 16;
  312. num_hs_threads = 16;
  313. num_ls_threads = 16;
  314. num_ps_stack_entries = 42;
  315. num_vs_stack_entries = 42;
  316. num_gs_stack_entries = 42;
  317. num_es_stack_entries = 42;
  318. num_hs_stack_entries = 42;
  319. num_ls_stack_entries = 42;
  320. break;
  321. }
  322. if ((rdev->family == CHIP_CEDAR) ||
  323. (rdev->family == CHIP_PALM))
  324. sq_config = 0;
  325. else
  326. sq_config = VC_ENABLE;
  327. sq_config |= (EXPORT_SRC_C |
  328. CS_PRIO(0) |
  329. LS_PRIO(0) |
  330. HS_PRIO(0) |
  331. PS_PRIO(0) |
  332. VS_PRIO(1) |
  333. GS_PRIO(2) |
  334. ES_PRIO(3));
  335. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
  336. NUM_VS_GPRS(num_vs_gprs) |
  337. NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
  338. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
  339. NUM_ES_GPRS(num_es_gprs));
  340. sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
  341. NUM_LS_GPRS(num_ls_gprs));
  342. sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
  343. NUM_VS_THREADS(num_vs_threads) |
  344. NUM_GS_THREADS(num_gs_threads) |
  345. NUM_ES_THREADS(num_es_threads));
  346. sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
  347. NUM_LS_THREADS(num_ls_threads));
  348. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
  349. NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
  350. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
  351. NUM_ES_STACK_ENTRIES(num_es_stack_entries));
  352. sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
  353. NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
  354. /* set clear context state */
  355. radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
  356. radeon_ring_write(rdev, 0);
  357. /* disable dyn gprs */
  358. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  359. radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
  360. radeon_ring_write(rdev, 0);
  361. /* SQ config */
  362. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
  363. radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
  364. radeon_ring_write(rdev, sq_config);
  365. radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
  366. radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
  367. radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
  368. radeon_ring_write(rdev, 0);
  369. radeon_ring_write(rdev, 0);
  370. radeon_ring_write(rdev, sq_thread_resource_mgmt);
  371. radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
  372. radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
  373. radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
  374. radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
  375. /* CONTEXT_CONTROL */
  376. radeon_ring_write(rdev, 0xc0012800);
  377. radeon_ring_write(rdev, 0x80000000);
  378. radeon_ring_write(rdev, 0x80000000);
  379. /* SQ_VTX_BASE_VTX_LOC */
  380. radeon_ring_write(rdev, 0xc0026f00);
  381. radeon_ring_write(rdev, 0x00000000);
  382. radeon_ring_write(rdev, 0x00000000);
  383. radeon_ring_write(rdev, 0x00000000);
  384. /* SET_SAMPLER */
  385. radeon_ring_write(rdev, 0xc0036e00);
  386. radeon_ring_write(rdev, 0x00000000);
  387. radeon_ring_write(rdev, 0x00000012);
  388. radeon_ring_write(rdev, 0x00000000);
  389. radeon_ring_write(rdev, 0x00000000);
  390. }
  391. static inline uint32_t i2f(uint32_t input)
  392. {
  393. u32 result, i, exponent, fraction;
  394. if ((input & 0x3fff) == 0)
  395. result = 0; /* 0 is a special case */
  396. else {
  397. exponent = 140; /* exponent biased by 127; */
  398. fraction = (input & 0x3fff) << 10; /* cheat and only
  399. handle numbers below 2^^15 */
  400. for (i = 0; i < 14; i++) {
  401. if (fraction & 0x800000)
  402. break;
  403. else {
  404. fraction = fraction << 1; /* keep
  405. shifting left until top bit = 1 */
  406. exponent = exponent - 1;
  407. }
  408. }
  409. result = exponent << 23 | (fraction & 0x7fffff); /* mask
  410. off top bit; assumed 1 */
  411. }
  412. return result;
  413. }
  414. int evergreen_blit_init(struct radeon_device *rdev)
  415. {
  416. u32 obj_size;
  417. int r;
  418. void *ptr;
  419. /* pin copy shader into vram if already initialized */
  420. if (rdev->r600_blit.shader_obj)
  421. goto done;
  422. mutex_init(&rdev->r600_blit.mutex);
  423. rdev->r600_blit.state_offset = 0;
  424. rdev->r600_blit.state_len = 0;
  425. obj_size = 0;
  426. rdev->r600_blit.vs_offset = obj_size;
  427. obj_size += evergreen_vs_size * 4;
  428. obj_size = ALIGN(obj_size, 256);
  429. rdev->r600_blit.ps_offset = obj_size;
  430. obj_size += evergreen_ps_size * 4;
  431. obj_size = ALIGN(obj_size, 256);
  432. r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
  433. &rdev->r600_blit.shader_obj);
  434. if (r) {
  435. DRM_ERROR("evergreen failed to allocate shader\n");
  436. return r;
  437. }
  438. DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
  439. obj_size,
  440. rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
  441. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  442. if (unlikely(r != 0))
  443. return r;
  444. r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
  445. if (r) {
  446. DRM_ERROR("failed to map blit object %d\n", r);
  447. return r;
  448. }
  449. memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
  450. memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
  451. radeon_bo_kunmap(rdev->r600_blit.shader_obj);
  452. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  453. done:
  454. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  455. if (unlikely(r != 0))
  456. return r;
  457. r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
  458. &rdev->r600_blit.shader_gpu_addr);
  459. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  460. if (r) {
  461. dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
  462. return r;
  463. }
  464. rdev->mc.active_vram_size = rdev->mc.real_vram_size;
  465. return 0;
  466. }
  467. void evergreen_blit_fini(struct radeon_device *rdev)
  468. {
  469. int r;
  470. rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
  471. if (rdev->r600_blit.shader_obj == NULL)
  472. return;
  473. /* If we can't reserve the bo, unref should be enough to destroy
  474. * it when it becomes idle.
  475. */
  476. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  477. if (!r) {
  478. radeon_bo_unpin(rdev->r600_blit.shader_obj);
  479. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  480. }
  481. radeon_bo_unref(&rdev->r600_blit.shader_obj);
  482. }
  483. static int evergreen_vb_ib_get(struct radeon_device *rdev)
  484. {
  485. int r;
  486. r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
  487. if (r) {
  488. DRM_ERROR("failed to get IB for vertex buffer\n");
  489. return r;
  490. }
  491. rdev->r600_blit.vb_total = 64*1024;
  492. rdev->r600_blit.vb_used = 0;
  493. return 0;
  494. }
  495. static void evergreen_vb_ib_put(struct radeon_device *rdev)
  496. {
  497. radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
  498. radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
  499. }
  500. int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
  501. {
  502. int r;
  503. int ring_size, line_size;
  504. int max_size;
  505. /* loops of emits + fence emit possible */
  506. int dwords_per_loop = 74, num_loops;
  507. r = evergreen_vb_ib_get(rdev);
  508. if (r)
  509. return r;
  510. /* 8 bpp vs 32 bpp for xfer unit */
  511. if (size_bytes & 3)
  512. line_size = 8192;
  513. else
  514. line_size = 8192 * 4;
  515. max_size = 8192 * line_size;
  516. /* major loops cover the max size transfer */
  517. num_loops = ((size_bytes + max_size) / max_size);
  518. /* minor loops cover the extra non aligned bits */
  519. num_loops += ((size_bytes % line_size) ? 1 : 0);
  520. /* calculate number of loops correctly */
  521. ring_size = num_loops * dwords_per_loop;
  522. /* set default + shaders */
  523. ring_size += 46; /* shaders + def state */
  524. ring_size += 10; /* fence emit for VB IB */
  525. ring_size += 5; /* done copy */
  526. ring_size += 10; /* fence emit for done copy */
  527. r = radeon_ring_lock(rdev, ring_size);
  528. if (r)
  529. return r;
  530. set_default_state(rdev); /* 30 */
  531. set_shaders(rdev); /* 16 */
  532. return 0;
  533. }
  534. void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
  535. {
  536. int r;
  537. if (rdev->r600_blit.vb_ib)
  538. evergreen_vb_ib_put(rdev);
  539. if (fence)
  540. r = radeon_fence_emit(rdev, fence);
  541. radeon_ring_unlock_commit(rdev);
  542. }
  543. void evergreen_kms_blit_copy(struct radeon_device *rdev,
  544. u64 src_gpu_addr, u64 dst_gpu_addr,
  545. int size_bytes)
  546. {
  547. int max_bytes;
  548. u64 vb_gpu_addr;
  549. u32 *vb;
  550. DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
  551. size_bytes, rdev->r600_blit.vb_used);
  552. vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
  553. if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
  554. max_bytes = 8192;
  555. while (size_bytes) {
  556. int cur_size = size_bytes;
  557. int src_x = src_gpu_addr & 255;
  558. int dst_x = dst_gpu_addr & 255;
  559. int h = 1;
  560. src_gpu_addr = src_gpu_addr & ~255ULL;
  561. dst_gpu_addr = dst_gpu_addr & ~255ULL;
  562. if (!src_x && !dst_x) {
  563. h = (cur_size / max_bytes);
  564. if (h > 8192)
  565. h = 8192;
  566. if (h == 0)
  567. h = 1;
  568. else
  569. cur_size = max_bytes;
  570. } else {
  571. if (cur_size > max_bytes)
  572. cur_size = max_bytes;
  573. if (cur_size > (max_bytes - dst_x))
  574. cur_size = (max_bytes - dst_x);
  575. if (cur_size > (max_bytes - src_x))
  576. cur_size = (max_bytes - src_x);
  577. }
  578. if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
  579. WARN_ON(1);
  580. }
  581. vb[0] = i2f(dst_x);
  582. vb[1] = 0;
  583. vb[2] = i2f(src_x);
  584. vb[3] = 0;
  585. vb[4] = i2f(dst_x);
  586. vb[5] = i2f(h);
  587. vb[6] = i2f(src_x);
  588. vb[7] = i2f(h);
  589. vb[8] = i2f(dst_x + cur_size);
  590. vb[9] = i2f(h);
  591. vb[10] = i2f(src_x + cur_size);
  592. vb[11] = i2f(h);
  593. /* src 10 */
  594. set_tex_resource(rdev, FMT_8,
  595. src_x + cur_size, h, src_x + cur_size,
  596. src_gpu_addr);
  597. /* 5 */
  598. cp_set_surface_sync(rdev,
  599. PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
  600. /* dst 17 */
  601. set_render_target(rdev, COLOR_8,
  602. dst_x + cur_size, h,
  603. dst_gpu_addr);
  604. /* scissors 12 */
  605. set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
  606. /* 15 */
  607. vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
  608. set_vtx_resource(rdev, vb_gpu_addr);
  609. /* draw 10 */
  610. draw_auto(rdev);
  611. /* 5 */
  612. cp_set_surface_sync(rdev,
  613. PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
  614. cur_size * h, dst_gpu_addr);
  615. vb += 12;
  616. rdev->r600_blit.vb_used += 12 * 4;
  617. src_gpu_addr += cur_size * h;
  618. dst_gpu_addr += cur_size * h;
  619. size_bytes -= cur_size * h;
  620. }
  621. } else {
  622. max_bytes = 8192 * 4;
  623. while (size_bytes) {
  624. int cur_size = size_bytes;
  625. int src_x = (src_gpu_addr & 255);
  626. int dst_x = (dst_gpu_addr & 255);
  627. int h = 1;
  628. src_gpu_addr = src_gpu_addr & ~255ULL;
  629. dst_gpu_addr = dst_gpu_addr & ~255ULL;
  630. if (!src_x && !dst_x) {
  631. h = (cur_size / max_bytes);
  632. if (h > 8192)
  633. h = 8192;
  634. if (h == 0)
  635. h = 1;
  636. else
  637. cur_size = max_bytes;
  638. } else {
  639. if (cur_size > max_bytes)
  640. cur_size = max_bytes;
  641. if (cur_size > (max_bytes - dst_x))
  642. cur_size = (max_bytes - dst_x);
  643. if (cur_size > (max_bytes - src_x))
  644. cur_size = (max_bytes - src_x);
  645. }
  646. if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
  647. WARN_ON(1);
  648. }
  649. vb[0] = i2f(dst_x / 4);
  650. vb[1] = 0;
  651. vb[2] = i2f(src_x / 4);
  652. vb[3] = 0;
  653. vb[4] = i2f(dst_x / 4);
  654. vb[5] = i2f(h);
  655. vb[6] = i2f(src_x / 4);
  656. vb[7] = i2f(h);
  657. vb[8] = i2f((dst_x + cur_size) / 4);
  658. vb[9] = i2f(h);
  659. vb[10] = i2f((src_x + cur_size) / 4);
  660. vb[11] = i2f(h);
  661. /* src 10 */
  662. set_tex_resource(rdev, FMT_8_8_8_8,
  663. (src_x + cur_size) / 4,
  664. h, (src_x + cur_size) / 4,
  665. src_gpu_addr);
  666. /* 5 */
  667. cp_set_surface_sync(rdev,
  668. PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
  669. /* dst 17 */
  670. set_render_target(rdev, COLOR_8_8_8_8,
  671. (dst_x + cur_size) / 4, h,
  672. dst_gpu_addr);
  673. /* scissors 12 */
  674. set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
  675. /* Vertex buffer setup 15 */
  676. vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
  677. set_vtx_resource(rdev, vb_gpu_addr);
  678. /* draw 10 */
  679. draw_auto(rdev);
  680. /* 5 */
  681. cp_set_surface_sync(rdev,
  682. PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
  683. cur_size * h, dst_gpu_addr);
  684. /* 74 ring dwords per loop */
  685. vb += 12;
  686. rdev->r600_blit.vb_used += 12 * 4;
  687. src_gpu_addr += cur_size * h;
  688. dst_gpu_addr += cur_size * h;
  689. size_bytes -= cur_size * h;
  690. }
  691. }
  692. }