r600_blit_kms.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. /*
  2. * Copyright 2009 Advanced Micro Devices, Inc.
  3. * Copyright 2009 Red Hat Inc.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22. * DEALINGS IN THE SOFTWARE.
  23. *
  24. */
  25. #include "drmP.h"
  26. #include "drm.h"
  27. #include "radeon_drm.h"
  28. #include "radeon.h"
  29. #include "r600d.h"
  30. #include "r600_blit_shaders.h"
  31. #define DI_PT_RECTLIST 0x11
  32. #define DI_INDEX_SIZE_16_BIT 0x0
  33. #define DI_SRC_SEL_AUTO_INDEX 0x2
  34. #define FMT_8 0x1
  35. #define FMT_5_6_5 0x8
  36. #define FMT_8_8_8_8 0x1a
  37. #define COLOR_8 0x1
  38. #define COLOR_5_6_5 0x8
  39. #define COLOR_8_8_8_8 0x1a
  40. /* emits 21 on rv770+, 23 on r600 */
  41. static void
  42. set_render_target(struct radeon_device *rdev, int format,
  43. int w, int h, u64 gpu_addr)
  44. {
  45. u32 cb_color_info;
  46. int pitch, slice;
  47. h = ALIGN(h, 8);
  48. if (h < 8)
  49. h = 8;
  50. cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
  51. pitch = (w / 8) - 1;
  52. slice = ((w * h) / 64) - 1;
  53. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  54. radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  55. radeon_ring_write(rdev, gpu_addr >> 8);
  56. if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
  57. radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
  58. radeon_ring_write(rdev, 2 << 0);
  59. }
  60. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  61. radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  62. radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
  63. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  64. radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  65. radeon_ring_write(rdev, 0);
  66. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  67. radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  68. radeon_ring_write(rdev, cb_color_info);
  69. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  70. radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  71. radeon_ring_write(rdev, 0);
  72. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  73. radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  74. radeon_ring_write(rdev, 0);
  75. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  76. radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  77. radeon_ring_write(rdev, 0);
  78. }
  79. /* emits 5dw */
  80. static void
  81. cp_set_surface_sync(struct radeon_device *rdev,
  82. u32 sync_type, u32 size,
  83. u64 mc_addr)
  84. {
  85. u32 cp_coher_size;
  86. if (size == 0xffffffff)
  87. cp_coher_size = 0xffffffff;
  88. else
  89. cp_coher_size = ((size + 255) >> 8);
  90. radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
  91. radeon_ring_write(rdev, sync_type);
  92. radeon_ring_write(rdev, cp_coher_size);
  93. radeon_ring_write(rdev, mc_addr >> 8);
  94. radeon_ring_write(rdev, 10); /* poll interval */
  95. }
  96. /* emits 21dw + 1 surface sync = 26dw */
  97. static void
  98. set_shaders(struct radeon_device *rdev)
  99. {
  100. u64 gpu_addr;
  101. u32 sq_pgm_resources;
  102. /* setup shader regs */
  103. sq_pgm_resources = (1 << 0);
  104. /* VS */
  105. gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
  106. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  107. radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  108. radeon_ring_write(rdev, gpu_addr >> 8);
  109. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  110. radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  111. radeon_ring_write(rdev, sq_pgm_resources);
  112. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  113. radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  114. radeon_ring_write(rdev, 0);
  115. /* PS */
  116. gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
  117. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  118. radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  119. radeon_ring_write(rdev, gpu_addr >> 8);
  120. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  121. radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  122. radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
  123. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  124. radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  125. radeon_ring_write(rdev, 2);
  126. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
  127. radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  128. radeon_ring_write(rdev, 0);
  129. gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
  130. cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
  131. }
  132. /* emits 9 + 1 sync (5) = 14*/
  133. static void
  134. set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
  135. {
  136. u32 sq_vtx_constant_word2;
  137. sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
  138. #ifdef __BIG_ENDIAN
  139. sq_vtx_constant_word2 |= (2 << 30);
  140. #endif
  141. radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
  142. radeon_ring_write(rdev, 0x460);
  143. radeon_ring_write(rdev, gpu_addr & 0xffffffff);
  144. radeon_ring_write(rdev, 48 - 1);
  145. radeon_ring_write(rdev, sq_vtx_constant_word2);
  146. radeon_ring_write(rdev, 1 << 0);
  147. radeon_ring_write(rdev, 0);
  148. radeon_ring_write(rdev, 0);
  149. radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
  150. if ((rdev->family == CHIP_RV610) ||
  151. (rdev->family == CHIP_RV620) ||
  152. (rdev->family == CHIP_RS780) ||
  153. (rdev->family == CHIP_RS880) ||
  154. (rdev->family == CHIP_RV710))
  155. cp_set_surface_sync(rdev,
  156. PACKET3_TC_ACTION_ENA, 48, gpu_addr);
  157. else
  158. cp_set_surface_sync(rdev,
  159. PACKET3_VC_ACTION_ENA, 48, gpu_addr);
  160. }
  161. /* emits 9 */
  162. static void
  163. set_tex_resource(struct radeon_device *rdev,
  164. int format, int w, int h, int pitch,
  165. u64 gpu_addr)
  166. {
  167. uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
  168. if (h < 1)
  169. h = 1;
  170. sq_tex_resource_word0 = (1 << 0) | (1 << 3);
  171. sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
  172. ((w - 1) << 19));
  173. sq_tex_resource_word1 = (format << 26);
  174. sq_tex_resource_word1 |= ((h - 1) << 0);
  175. sq_tex_resource_word4 = ((1 << 14) |
  176. (0 << 16) |
  177. (1 << 19) |
  178. (2 << 22) |
  179. (3 << 25));
  180. radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
  181. radeon_ring_write(rdev, 0);
  182. radeon_ring_write(rdev, sq_tex_resource_word0);
  183. radeon_ring_write(rdev, sq_tex_resource_word1);
  184. radeon_ring_write(rdev, gpu_addr >> 8);
  185. radeon_ring_write(rdev, gpu_addr >> 8);
  186. radeon_ring_write(rdev, sq_tex_resource_word4);
  187. radeon_ring_write(rdev, 0);
  188. radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
  189. }
  190. /* emits 12 */
  191. static void
  192. set_scissors(struct radeon_device *rdev, int x1, int y1,
  193. int x2, int y2)
  194. {
  195. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  196. radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  197. radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
  198. radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
  199. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  200. radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  201. radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
  202. radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
  203. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  204. radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
  205. radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
  206. radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
  207. }
  208. /* emits 10 */
  209. static void
  210. draw_auto(struct radeon_device *rdev)
  211. {
  212. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
  213. radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
  214. radeon_ring_write(rdev, DI_PT_RECTLIST);
  215. radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
  216. radeon_ring_write(rdev,
  217. #ifdef __BIG_ENDIAN
  218. (2 << 2) |
  219. #endif
  220. DI_INDEX_SIZE_16_BIT);
  221. radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
  222. radeon_ring_write(rdev, 1);
  223. radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
  224. radeon_ring_write(rdev, 3);
  225. radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
  226. }
  227. /* emits 14 */
  228. static void
  229. set_default_state(struct radeon_device *rdev)
  230. {
  231. u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
  232. u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
  233. int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
  234. int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
  235. int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
  236. u64 gpu_addr;
  237. int dwords;
  238. switch (rdev->family) {
  239. case CHIP_R600:
  240. num_ps_gprs = 192;
  241. num_vs_gprs = 56;
  242. num_temp_gprs = 4;
  243. num_gs_gprs = 0;
  244. num_es_gprs = 0;
  245. num_ps_threads = 136;
  246. num_vs_threads = 48;
  247. num_gs_threads = 4;
  248. num_es_threads = 4;
  249. num_ps_stack_entries = 128;
  250. num_vs_stack_entries = 128;
  251. num_gs_stack_entries = 0;
  252. num_es_stack_entries = 0;
  253. break;
  254. case CHIP_RV630:
  255. case CHIP_RV635:
  256. num_ps_gprs = 84;
  257. num_vs_gprs = 36;
  258. num_temp_gprs = 4;
  259. num_gs_gprs = 0;
  260. num_es_gprs = 0;
  261. num_ps_threads = 144;
  262. num_vs_threads = 40;
  263. num_gs_threads = 4;
  264. num_es_threads = 4;
  265. num_ps_stack_entries = 40;
  266. num_vs_stack_entries = 40;
  267. num_gs_stack_entries = 32;
  268. num_es_stack_entries = 16;
  269. break;
  270. case CHIP_RV610:
  271. case CHIP_RV620:
  272. case CHIP_RS780:
  273. case CHIP_RS880:
  274. default:
  275. num_ps_gprs = 84;
  276. num_vs_gprs = 36;
  277. num_temp_gprs = 4;
  278. num_gs_gprs = 0;
  279. num_es_gprs = 0;
  280. num_ps_threads = 136;
  281. num_vs_threads = 48;
  282. num_gs_threads = 4;
  283. num_es_threads = 4;
  284. num_ps_stack_entries = 40;
  285. num_vs_stack_entries = 40;
  286. num_gs_stack_entries = 32;
  287. num_es_stack_entries = 16;
  288. break;
  289. case CHIP_RV670:
  290. num_ps_gprs = 144;
  291. num_vs_gprs = 40;
  292. num_temp_gprs = 4;
  293. num_gs_gprs = 0;
  294. num_es_gprs = 0;
  295. num_ps_threads = 136;
  296. num_vs_threads = 48;
  297. num_gs_threads = 4;
  298. num_es_threads = 4;
  299. num_ps_stack_entries = 40;
  300. num_vs_stack_entries = 40;
  301. num_gs_stack_entries = 32;
  302. num_es_stack_entries = 16;
  303. break;
  304. case CHIP_RV770:
  305. num_ps_gprs = 192;
  306. num_vs_gprs = 56;
  307. num_temp_gprs = 4;
  308. num_gs_gprs = 0;
  309. num_es_gprs = 0;
  310. num_ps_threads = 188;
  311. num_vs_threads = 60;
  312. num_gs_threads = 0;
  313. num_es_threads = 0;
  314. num_ps_stack_entries = 256;
  315. num_vs_stack_entries = 256;
  316. num_gs_stack_entries = 0;
  317. num_es_stack_entries = 0;
  318. break;
  319. case CHIP_RV730:
  320. case CHIP_RV740:
  321. num_ps_gprs = 84;
  322. num_vs_gprs = 36;
  323. num_temp_gprs = 4;
  324. num_gs_gprs = 0;
  325. num_es_gprs = 0;
  326. num_ps_threads = 188;
  327. num_vs_threads = 60;
  328. num_gs_threads = 0;
  329. num_es_threads = 0;
  330. num_ps_stack_entries = 128;
  331. num_vs_stack_entries = 128;
  332. num_gs_stack_entries = 0;
  333. num_es_stack_entries = 0;
  334. break;
  335. case CHIP_RV710:
  336. num_ps_gprs = 192;
  337. num_vs_gprs = 56;
  338. num_temp_gprs = 4;
  339. num_gs_gprs = 0;
  340. num_es_gprs = 0;
  341. num_ps_threads = 144;
  342. num_vs_threads = 48;
  343. num_gs_threads = 0;
  344. num_es_threads = 0;
  345. num_ps_stack_entries = 128;
  346. num_vs_stack_entries = 128;
  347. num_gs_stack_entries = 0;
  348. num_es_stack_entries = 0;
  349. break;
  350. }
  351. if ((rdev->family == CHIP_RV610) ||
  352. (rdev->family == CHIP_RV620) ||
  353. (rdev->family == CHIP_RS780) ||
  354. (rdev->family == CHIP_RS880) ||
  355. (rdev->family == CHIP_RV710))
  356. sq_config = 0;
  357. else
  358. sq_config = VC_ENABLE;
  359. sq_config |= (DX9_CONSTS |
  360. ALU_INST_PREFER_VECTOR |
  361. PS_PRIO(0) |
  362. VS_PRIO(1) |
  363. GS_PRIO(2) |
  364. ES_PRIO(3));
  365. sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
  366. NUM_VS_GPRS(num_vs_gprs) |
  367. NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
  368. sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
  369. NUM_ES_GPRS(num_es_gprs));
  370. sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
  371. NUM_VS_THREADS(num_vs_threads) |
  372. NUM_GS_THREADS(num_gs_threads) |
  373. NUM_ES_THREADS(num_es_threads));
  374. sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
  375. NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
  376. sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
  377. NUM_ES_STACK_ENTRIES(num_es_stack_entries));
  378. /* emit an IB pointing at default state */
  379. dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
  380. gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
  381. radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
  382. radeon_ring_write(rdev,
  383. #ifdef __BIG_ENDIAN
  384. (2 << 0) |
  385. #endif
  386. (gpu_addr & 0xFFFFFFFC));
  387. radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
  388. radeon_ring_write(rdev, dwords);
  389. /* SQ config */
  390. radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
  391. radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
  392. radeon_ring_write(rdev, sq_config);
  393. radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
  394. radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
  395. radeon_ring_write(rdev, sq_thread_resource_mgmt);
  396. radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
  397. radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
  398. }
  399. static inline uint32_t i2f(uint32_t input)
  400. {
  401. u32 result, i, exponent, fraction;
  402. if ((input & 0x3fff) == 0)
  403. result = 0; /* 0 is a special case */
  404. else {
  405. exponent = 140; /* exponent biased by 127; */
  406. fraction = (input & 0x3fff) << 10; /* cheat and only
  407. handle numbers below 2^^15 */
  408. for (i = 0; i < 14; i++) {
  409. if (fraction & 0x800000)
  410. break;
  411. else {
  412. fraction = fraction << 1; /* keep
  413. shifting left until top bit = 1 */
  414. exponent = exponent - 1;
  415. }
  416. }
  417. result = exponent << 23 | (fraction & 0x7fffff); /* mask
  418. off top bit; assumed 1 */
  419. }
  420. return result;
  421. }
  422. int r600_blit_init(struct radeon_device *rdev)
  423. {
  424. u32 obj_size;
  425. int i, r, dwords;
  426. void *ptr;
  427. u32 packet2s[16];
  428. int num_packet2s = 0;
  429. /* pin copy shader into vram if already initialized */
  430. if (rdev->r600_blit.shader_obj)
  431. goto done;
  432. mutex_init(&rdev->r600_blit.mutex);
  433. rdev->r600_blit.state_offset = 0;
  434. if (rdev->family >= CHIP_RV770)
  435. rdev->r600_blit.state_len = r7xx_default_size;
  436. else
  437. rdev->r600_blit.state_len = r6xx_default_size;
  438. dwords = rdev->r600_blit.state_len;
  439. while (dwords & 0xf) {
  440. packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
  441. dwords++;
  442. }
  443. obj_size = dwords * 4;
  444. obj_size = ALIGN(obj_size, 256);
  445. rdev->r600_blit.vs_offset = obj_size;
  446. obj_size += r6xx_vs_size * 4;
  447. obj_size = ALIGN(obj_size, 256);
  448. rdev->r600_blit.ps_offset = obj_size;
  449. obj_size += r6xx_ps_size * 4;
  450. obj_size = ALIGN(obj_size, 256);
  451. r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
  452. &rdev->r600_blit.shader_obj);
  453. if (r) {
  454. DRM_ERROR("r600 failed to allocate shader\n");
  455. return r;
  456. }
  457. DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
  458. obj_size,
  459. rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
  460. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  461. if (unlikely(r != 0))
  462. return r;
  463. r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
  464. if (r) {
  465. DRM_ERROR("failed to map blit object %d\n", r);
  466. return r;
  467. }
  468. if (rdev->family >= CHIP_RV770)
  469. memcpy_toio(ptr + rdev->r600_blit.state_offset,
  470. r7xx_default_state, rdev->r600_blit.state_len * 4);
  471. else
  472. memcpy_toio(ptr + rdev->r600_blit.state_offset,
  473. r6xx_default_state, rdev->r600_blit.state_len * 4);
  474. if (num_packet2s)
  475. memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
  476. packet2s, num_packet2s * 4);
  477. for (i = 0; i < r6xx_vs_size; i++)
  478. *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
  479. for (i = 0; i < r6xx_ps_size; i++)
  480. *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
  481. radeon_bo_kunmap(rdev->r600_blit.shader_obj);
  482. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  483. done:
  484. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  485. if (unlikely(r != 0))
  486. return r;
  487. r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
  488. &rdev->r600_blit.shader_gpu_addr);
  489. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  490. if (r) {
  491. dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
  492. return r;
  493. }
  494. radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
  495. return 0;
  496. }
  497. void r600_blit_fini(struct radeon_device *rdev)
  498. {
  499. int r;
  500. radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
  501. if (rdev->r600_blit.shader_obj == NULL)
  502. return;
  503. /* If we can't reserve the bo, unref should be enough to destroy
  504. * it when it becomes idle.
  505. */
  506. r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
  507. if (!r) {
  508. radeon_bo_unpin(rdev->r600_blit.shader_obj);
  509. radeon_bo_unreserve(rdev->r600_blit.shader_obj);
  510. }
  511. radeon_bo_unref(&rdev->r600_blit.shader_obj);
  512. }
  513. static int r600_vb_ib_get(struct radeon_device *rdev)
  514. {
  515. int r;
  516. r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
  517. if (r) {
  518. DRM_ERROR("failed to get IB for vertex buffer\n");
  519. return r;
  520. }
  521. rdev->r600_blit.vb_total = 64*1024;
  522. rdev->r600_blit.vb_used = 0;
  523. return 0;
  524. }
  525. static void r600_vb_ib_put(struct radeon_device *rdev)
  526. {
  527. radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
  528. radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
  529. }
  530. int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
  531. {
  532. int r;
  533. int ring_size, line_size;
  534. int max_size;
  535. /* loops of emits 64 + fence emit possible */
  536. int dwords_per_loop = 76, num_loops;
  537. r = r600_vb_ib_get(rdev);
  538. if (r)
  539. return r;
  540. /* set_render_target emits 2 extra dwords on rv6xx */
  541. if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
  542. dwords_per_loop += 2;
  543. /* 8 bpp vs 32 bpp for xfer unit */
  544. if (size_bytes & 3)
  545. line_size = 8192;
  546. else
  547. line_size = 8192*4;
  548. max_size = 8192 * line_size;
  549. /* major loops cover the max size transfer */
  550. num_loops = ((size_bytes + max_size) / max_size);
  551. /* minor loops cover the extra non aligned bits */
  552. num_loops += ((size_bytes % line_size) ? 1 : 0);
  553. /* calculate number of loops correctly */
  554. ring_size = num_loops * dwords_per_loop;
  555. /* set default + shaders */
  556. ring_size += 40; /* shaders + def state */
  557. ring_size += 10; /* fence emit for VB IB */
  558. ring_size += 5; /* done copy */
  559. ring_size += 10; /* fence emit for done copy */
  560. r = radeon_ring_lock(rdev, ring_size);
  561. if (r)
  562. return r;
  563. set_default_state(rdev); /* 14 */
  564. set_shaders(rdev); /* 26 */
  565. return 0;
  566. }
  567. void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
  568. {
  569. int r;
  570. if (rdev->r600_blit.vb_ib)
  571. r600_vb_ib_put(rdev);
  572. if (fence)
  573. r = radeon_fence_emit(rdev, fence);
  574. radeon_ring_unlock_commit(rdev);
  575. }
  576. void r600_kms_blit_copy(struct radeon_device *rdev,
  577. u64 src_gpu_addr, u64 dst_gpu_addr,
  578. int size_bytes)
  579. {
  580. int max_bytes;
  581. u64 vb_gpu_addr;
  582. u32 *vb;
  583. DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
  584. size_bytes, rdev->r600_blit.vb_used);
  585. vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
  586. if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
  587. max_bytes = 8192;
  588. while (size_bytes) {
  589. int cur_size = size_bytes;
  590. int src_x = src_gpu_addr & 255;
  591. int dst_x = dst_gpu_addr & 255;
  592. int h = 1;
  593. src_gpu_addr = src_gpu_addr & ~255ULL;
  594. dst_gpu_addr = dst_gpu_addr & ~255ULL;
  595. if (!src_x && !dst_x) {
  596. h = (cur_size / max_bytes);
  597. if (h > 8192)
  598. h = 8192;
  599. if (h == 0)
  600. h = 1;
  601. else
  602. cur_size = max_bytes;
  603. } else {
  604. if (cur_size > max_bytes)
  605. cur_size = max_bytes;
  606. if (cur_size > (max_bytes - dst_x))
  607. cur_size = (max_bytes - dst_x);
  608. if (cur_size > (max_bytes - src_x))
  609. cur_size = (max_bytes - src_x);
  610. }
  611. if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
  612. WARN_ON(1);
  613. }
  614. vb[0] = i2f(dst_x);
  615. vb[1] = 0;
  616. vb[2] = i2f(src_x);
  617. vb[3] = 0;
  618. vb[4] = i2f(dst_x);
  619. vb[5] = i2f(h);
  620. vb[6] = i2f(src_x);
  621. vb[7] = i2f(h);
  622. vb[8] = i2f(dst_x + cur_size);
  623. vb[9] = i2f(h);
  624. vb[10] = i2f(src_x + cur_size);
  625. vb[11] = i2f(h);
  626. /* src 9 */
  627. set_tex_resource(rdev, FMT_8,
  628. src_x + cur_size, h, src_x + cur_size,
  629. src_gpu_addr);
  630. /* 5 */
  631. cp_set_surface_sync(rdev,
  632. PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
  633. /* dst 23 */
  634. set_render_target(rdev, COLOR_8,
  635. dst_x + cur_size, h,
  636. dst_gpu_addr);
  637. /* scissors 12 */
  638. set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
  639. /* 14 */
  640. vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
  641. set_vtx_resource(rdev, vb_gpu_addr);
  642. /* draw 10 */
  643. draw_auto(rdev);
  644. /* 5 */
  645. cp_set_surface_sync(rdev,
  646. PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
  647. cur_size * h, dst_gpu_addr);
  648. vb += 12;
  649. rdev->r600_blit.vb_used += 12 * 4;
  650. src_gpu_addr += cur_size * h;
  651. dst_gpu_addr += cur_size * h;
  652. size_bytes -= cur_size * h;
  653. }
  654. } else {
  655. max_bytes = 8192 * 4;
  656. while (size_bytes) {
  657. int cur_size = size_bytes;
  658. int src_x = (src_gpu_addr & 255);
  659. int dst_x = (dst_gpu_addr & 255);
  660. int h = 1;
  661. src_gpu_addr = src_gpu_addr & ~255ULL;
  662. dst_gpu_addr = dst_gpu_addr & ~255ULL;
  663. if (!src_x && !dst_x) {
  664. h = (cur_size / max_bytes);
  665. if (h > 8192)
  666. h = 8192;
  667. if (h == 0)
  668. h = 1;
  669. else
  670. cur_size = max_bytes;
  671. } else {
  672. if (cur_size > max_bytes)
  673. cur_size = max_bytes;
  674. if (cur_size > (max_bytes - dst_x))
  675. cur_size = (max_bytes - dst_x);
  676. if (cur_size > (max_bytes - src_x))
  677. cur_size = (max_bytes - src_x);
  678. }
  679. if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
  680. WARN_ON(1);
  681. }
  682. vb[0] = i2f(dst_x / 4);
  683. vb[1] = 0;
  684. vb[2] = i2f(src_x / 4);
  685. vb[3] = 0;
  686. vb[4] = i2f(dst_x / 4);
  687. vb[5] = i2f(h);
  688. vb[6] = i2f(src_x / 4);
  689. vb[7] = i2f(h);
  690. vb[8] = i2f((dst_x + cur_size) / 4);
  691. vb[9] = i2f(h);
  692. vb[10] = i2f((src_x + cur_size) / 4);
  693. vb[11] = i2f(h);
  694. /* src 9 */
  695. set_tex_resource(rdev, FMT_8_8_8_8,
  696. (src_x + cur_size) / 4,
  697. h, (src_x + cur_size) / 4,
  698. src_gpu_addr);
  699. /* 5 */
  700. cp_set_surface_sync(rdev,
  701. PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
  702. /* dst 23 */
  703. set_render_target(rdev, COLOR_8_8_8_8,
  704. (dst_x + cur_size) / 4, h,
  705. dst_gpu_addr);
  706. /* scissors 12 */
  707. set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
  708. /* Vertex buffer setup 14 */
  709. vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
  710. set_vtx_resource(rdev, vb_gpu_addr);
  711. /* draw 10 */
  712. draw_auto(rdev);
  713. /* 5 */
  714. cp_set_surface_sync(rdev,
  715. PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
  716. cur_size * h, dst_gpu_addr);
  717. /* 78 ring dwords per loop */
  718. vb += 12;
  719. rdev->r600_blit.vb_used += 12 * 4;
  720. src_gpu_addr += cur_size * h;
  721. dst_gpu_addr += cur_size * h;
  722. size_bytes -= cur_size * h;
  723. }
  724. }
  725. }