r600_cs.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/kernel.h>
  29. #include "drmP.h"
  30. #include "radeon.h"
  31. #include "r600d.h"
  32. #include "r600_reg_safe.h"
  33. static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
  34. struct radeon_cs_reloc **cs_reloc);
  35. static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
  36. struct radeon_cs_reloc **cs_reloc);
  37. typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
  38. static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
  39. extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
  40. struct r600_cs_track {
  41. /* configuration we miror so that we use same code btw kms/ums */
  42. u32 group_size;
  43. u32 nbanks;
  44. u32 npipes;
  45. /* value we track */
  46. u32 sq_config;
  47. u32 nsamples;
  48. u32 cb_color_base_last[8];
  49. struct radeon_bo *cb_color_bo[8];
  50. u64 cb_color_bo_mc[8];
  51. u32 cb_color_bo_offset[8];
  52. struct radeon_bo *cb_color_frag_bo[8];
  53. struct radeon_bo *cb_color_tile_bo[8];
  54. u32 cb_color_info[8];
  55. u32 cb_color_view[8];
  56. u32 cb_color_size_idx[8];
  57. u32 cb_target_mask;
  58. u32 cb_shader_mask;
  59. u32 cb_color_size[8];
  60. u32 vgt_strmout_en;
  61. u32 vgt_strmout_buffer_en;
  62. struct radeon_bo *vgt_strmout_bo[4];
  63. u64 vgt_strmout_bo_mc[4];
  64. u32 vgt_strmout_bo_offset[4];
  65. u32 vgt_strmout_size[4];
  66. u32 db_depth_control;
  67. u32 db_depth_info;
  68. u32 db_depth_size_idx;
  69. u32 db_depth_view;
  70. u32 db_depth_size;
  71. u32 db_offset;
  72. struct radeon_bo *db_bo;
  73. u64 db_bo_mc;
  74. bool sx_misc_kill_all_prims;
  75. };
  76. #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
  77. #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
  78. #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
  79. #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
  80. #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
  81. #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
  82. #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
  83. #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
  84. struct gpu_formats {
  85. unsigned blockwidth;
  86. unsigned blockheight;
  87. unsigned blocksize;
  88. unsigned valid_color;
  89. enum radeon_family min_family;
  90. };
  91. static const struct gpu_formats color_formats_table[] = {
  92. /* 8 bit */
  93. FMT_8_BIT(V_038004_COLOR_8, 1),
  94. FMT_8_BIT(V_038004_COLOR_4_4, 1),
  95. FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
  96. FMT_8_BIT(V_038004_FMT_1, 0),
  97. /* 16-bit */
  98. FMT_16_BIT(V_038004_COLOR_16, 1),
  99. FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
  100. FMT_16_BIT(V_038004_COLOR_8_8, 1),
  101. FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
  102. FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
  103. FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
  104. FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
  105. FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
  106. /* 24-bit */
  107. FMT_24_BIT(V_038004_FMT_8_8_8),
  108. /* 32-bit */
  109. FMT_32_BIT(V_038004_COLOR_32, 1),
  110. FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
  111. FMT_32_BIT(V_038004_COLOR_16_16, 1),
  112. FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
  113. FMT_32_BIT(V_038004_COLOR_8_24, 1),
  114. FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
  115. FMT_32_BIT(V_038004_COLOR_24_8, 1),
  116. FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
  117. FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
  118. FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
  119. FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
  120. FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
  121. FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
  122. FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
  123. FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
  124. FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
  125. FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
  126. FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
  127. /* 48-bit */
  128. FMT_48_BIT(V_038004_FMT_16_16_16),
  129. FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
  130. /* 64-bit */
  131. FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
  132. FMT_64_BIT(V_038004_COLOR_32_32, 1),
  133. FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
  134. FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
  135. FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
  136. FMT_96_BIT(V_038004_FMT_32_32_32),
  137. FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
  138. /* 128-bit */
  139. FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
  140. FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
  141. [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
  142. [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
  143. /* block compressed formats */
  144. [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
  145. [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
  146. [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
  147. [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
  148. [V_038004_FMT_BC5] = { 4, 4, 16, 0},
  149. [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
  150. [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
  151. /* The other Evergreen formats */
  152. [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
  153. };
  154. bool r600_fmt_is_valid_color(u32 format)
  155. {
  156. if (format >= ARRAY_SIZE(color_formats_table))
  157. return false;
  158. if (color_formats_table[format].valid_color)
  159. return true;
  160. return false;
  161. }
  162. bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
  163. {
  164. if (format >= ARRAY_SIZE(color_formats_table))
  165. return false;
  166. if (family < color_formats_table[format].min_family)
  167. return false;
  168. if (color_formats_table[format].blockwidth > 0)
  169. return true;
  170. return false;
  171. }
  172. int r600_fmt_get_blocksize(u32 format)
  173. {
  174. if (format >= ARRAY_SIZE(color_formats_table))
  175. return 0;
  176. return color_formats_table[format].blocksize;
  177. }
  178. int r600_fmt_get_nblocksx(u32 format, u32 w)
  179. {
  180. unsigned bw;
  181. if (format >= ARRAY_SIZE(color_formats_table))
  182. return 0;
  183. bw = color_formats_table[format].blockwidth;
  184. if (bw == 0)
  185. return 0;
  186. return (w + bw - 1) / bw;
  187. }
  188. int r600_fmt_get_nblocksy(u32 format, u32 h)
  189. {
  190. unsigned bh;
  191. if (format >= ARRAY_SIZE(color_formats_table))
  192. return 0;
  193. bh = color_formats_table[format].blockheight;
  194. if (bh == 0)
  195. return 0;
  196. return (h + bh - 1) / bh;
  197. }
  198. struct array_mode_checker {
  199. int array_mode;
  200. u32 group_size;
  201. u32 nbanks;
  202. u32 npipes;
  203. u32 nsamples;
  204. u32 blocksize;
  205. };
  206. /* returns alignment in pixels for pitch/height/depth and bytes for base */
  207. static int r600_get_array_mode_alignment(struct array_mode_checker *values,
  208. u32 *pitch_align,
  209. u32 *height_align,
  210. u32 *depth_align,
  211. u64 *base_align)
  212. {
  213. u32 tile_width = 8;
  214. u32 tile_height = 8;
  215. u32 macro_tile_width = values->nbanks;
  216. u32 macro_tile_height = values->npipes;
  217. u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
  218. u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
  219. switch (values->array_mode) {
  220. case ARRAY_LINEAR_GENERAL:
  221. /* technically tile_width/_height for pitch/height */
  222. *pitch_align = 1; /* tile_width */
  223. *height_align = 1; /* tile_height */
  224. *depth_align = 1;
  225. *base_align = 1;
  226. break;
  227. case ARRAY_LINEAR_ALIGNED:
  228. *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
  229. *height_align = 1;
  230. *depth_align = 1;
  231. *base_align = values->group_size;
  232. break;
  233. case ARRAY_1D_TILED_THIN1:
  234. *pitch_align = max((u32)tile_width,
  235. (u32)(values->group_size /
  236. (tile_height * values->blocksize * values->nsamples)));
  237. *height_align = tile_height;
  238. *depth_align = 1;
  239. *base_align = values->group_size;
  240. break;
  241. case ARRAY_2D_TILED_THIN1:
  242. *pitch_align = max((u32)macro_tile_width * tile_width,
  243. (u32)((values->group_size * values->nbanks) /
  244. (values->blocksize * values->nsamples * tile_width)));
  245. *height_align = macro_tile_height * tile_height;
  246. *depth_align = 1;
  247. *base_align = max(macro_tile_bytes,
  248. (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
  249. break;
  250. default:
  251. return -EINVAL;
  252. }
  253. return 0;
  254. }
  255. static void r600_cs_track_init(struct r600_cs_track *track)
  256. {
  257. int i;
  258. /* assume DX9 mode */
  259. track->sq_config = DX9_CONSTS;
  260. for (i = 0; i < 8; i++) {
  261. track->cb_color_base_last[i] = 0;
  262. track->cb_color_size[i] = 0;
  263. track->cb_color_size_idx[i] = 0;
  264. track->cb_color_info[i] = 0;
  265. track->cb_color_view[i] = 0xFFFFFFFF;
  266. track->cb_color_bo[i] = NULL;
  267. track->cb_color_bo_offset[i] = 0xFFFFFFFF;
  268. track->cb_color_bo_mc[i] = 0xFFFFFFFF;
  269. }
  270. track->cb_target_mask = 0xFFFFFFFF;
  271. track->cb_shader_mask = 0xFFFFFFFF;
  272. track->db_bo = NULL;
  273. track->db_bo_mc = 0xFFFFFFFF;
  274. /* assume the biggest format and that htile is enabled */
  275. track->db_depth_info = 7 | (1 << 25);
  276. track->db_depth_view = 0xFFFFC000;
  277. track->db_depth_size = 0xFFFFFFFF;
  278. track->db_depth_size_idx = 0;
  279. track->db_depth_control = 0xFFFFFFFF;
  280. for (i = 0; i < 4; i++) {
  281. track->vgt_strmout_size[i] = 0;
  282. track->vgt_strmout_bo[i] = NULL;
  283. track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
  284. track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
  285. }
  286. track->sx_misc_kill_all_prims = false;
  287. }
  288. static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
  289. {
  290. struct r600_cs_track *track = p->track;
  291. u32 slice_tile_max, size, tmp;
  292. u32 height, height_align, pitch, pitch_align, depth_align;
  293. u64 base_offset, base_align;
  294. struct array_mode_checker array_check;
  295. volatile u32 *ib = p->ib->ptr;
  296. unsigned array_mode;
  297. u32 format;
  298. if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
  299. dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
  300. return -EINVAL;
  301. }
  302. size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
  303. format = G_0280A0_FORMAT(track->cb_color_info[i]);
  304. if (!r600_fmt_is_valid_color(format)) {
  305. dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
  306. __func__, __LINE__, format,
  307. i, track->cb_color_info[i]);
  308. return -EINVAL;
  309. }
  310. /* pitch in pixels */
  311. pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
  312. slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
  313. slice_tile_max *= 64;
  314. height = slice_tile_max / pitch;
  315. if (height > 8192)
  316. height = 8192;
  317. array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
  318. base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
  319. array_check.array_mode = array_mode;
  320. array_check.group_size = track->group_size;
  321. array_check.nbanks = track->nbanks;
  322. array_check.npipes = track->npipes;
  323. array_check.nsamples = track->nsamples;
  324. array_check.blocksize = r600_fmt_get_blocksize(format);
  325. if (r600_get_array_mode_alignment(&array_check,
  326. &pitch_align, &height_align, &depth_align, &base_align)) {
  327. dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
  328. G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
  329. track->cb_color_info[i]);
  330. return -EINVAL;
  331. }
  332. switch (array_mode) {
  333. case V_0280A0_ARRAY_LINEAR_GENERAL:
  334. break;
  335. case V_0280A0_ARRAY_LINEAR_ALIGNED:
  336. break;
  337. case V_0280A0_ARRAY_1D_TILED_THIN1:
  338. /* avoid breaking userspace */
  339. if (height > 7)
  340. height &= ~0x7;
  341. break;
  342. case V_0280A0_ARRAY_2D_TILED_THIN1:
  343. break;
  344. default:
  345. dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
  346. G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
  347. track->cb_color_info[i]);
  348. return -EINVAL;
  349. }
  350. if (!IS_ALIGNED(pitch, pitch_align)) {
  351. dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
  352. __func__, __LINE__, pitch, pitch_align, array_mode);
  353. return -EINVAL;
  354. }
  355. if (!IS_ALIGNED(height, height_align)) {
  356. dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
  357. __func__, __LINE__, height, height_align, array_mode);
  358. return -EINVAL;
  359. }
  360. if (!IS_ALIGNED(base_offset, base_align)) {
  361. dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
  362. base_offset, base_align, array_mode);
  363. return -EINVAL;
  364. }
  365. /* check offset */
  366. tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
  367. switch (array_mode) {
  368. default:
  369. case V_0280A0_ARRAY_LINEAR_GENERAL:
  370. case V_0280A0_ARRAY_LINEAR_ALIGNED:
  371. tmp += track->cb_color_view[i] & 0xFF;
  372. break;
  373. case V_0280A0_ARRAY_1D_TILED_THIN1:
  374. case V_0280A0_ARRAY_2D_TILED_THIN1:
  375. tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
  376. break;
  377. }
  378. if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
  379. if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
  380. /* the initial DDX does bad things with the CB size occasionally */
  381. /* it rounds up height too far for slice tile max but the BO is smaller */
  382. /* r600c,g also seem to flush at bad times in some apps resulting in
  383. * bogus values here. So for linear just allow anything to avoid breaking
  384. * broken userspace.
  385. */
  386. } else {
  387. dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
  388. __func__, i, array_mode,
  389. track->cb_color_bo_offset[i], tmp,
  390. radeon_bo_size(track->cb_color_bo[i]),
  391. pitch, height, r600_fmt_get_nblocksx(format, pitch),
  392. r600_fmt_get_nblocksy(format, height),
  393. r600_fmt_get_blocksize(format));
  394. return -EINVAL;
  395. }
  396. }
  397. /* limit max tile */
  398. tmp = (height * pitch) >> 6;
  399. if (tmp < slice_tile_max)
  400. slice_tile_max = tmp;
  401. tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
  402. S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
  403. ib[track->cb_color_size_idx[i]] = tmp;
  404. return 0;
  405. }
  406. static int r600_cs_track_check(struct radeon_cs_parser *p)
  407. {
  408. struct r600_cs_track *track = p->track;
  409. u32 tmp;
  410. int r, i;
  411. volatile u32 *ib = p->ib->ptr;
  412. /* on legacy kernel we don't perform advanced check */
  413. if (p->rdev == NULL)
  414. return 0;
  415. /* check streamout */
  416. if (track->vgt_strmout_en) {
  417. for (i = 0; i < 4; i++) {
  418. if (track->vgt_strmout_buffer_en & (1 << i)) {
  419. if (track->vgt_strmout_bo[i]) {
  420. u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
  421. (u64)track->vgt_strmout_size[i];
  422. if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
  423. DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
  424. i, offset,
  425. radeon_bo_size(track->vgt_strmout_bo[i]));
  426. return -EINVAL;
  427. }
  428. } else {
  429. dev_warn(p->dev, "No buffer for streamout %d\n", i);
  430. return -EINVAL;
  431. }
  432. }
  433. }
  434. }
  435. if (track->sx_misc_kill_all_prims)
  436. return 0;
  437. /* check that we have a cb for each enabled target, we don't check
  438. * shader_mask because it seems mesa isn't always setting it :(
  439. */
  440. tmp = track->cb_target_mask;
  441. for (i = 0; i < 8; i++) {
  442. if ((tmp >> (i * 4)) & 0xF) {
  443. /* at least one component is enabled */
  444. if (track->cb_color_bo[i] == NULL) {
  445. dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
  446. __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
  447. return -EINVAL;
  448. }
  449. /* perform rewrite of CB_COLOR[0-7]_SIZE */
  450. r = r600_cs_track_validate_cb(p, i);
  451. if (r)
  452. return r;
  453. }
  454. }
  455. /* Check depth buffer */
  456. if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
  457. G_028800_Z_ENABLE(track->db_depth_control)) {
  458. u32 nviews, bpe, ntiles, size, slice_tile_max;
  459. u32 height, height_align, pitch, pitch_align, depth_align;
  460. u64 base_offset, base_align;
  461. struct array_mode_checker array_check;
  462. int array_mode;
  463. if (track->db_bo == NULL) {
  464. dev_warn(p->dev, "z/stencil with no depth buffer\n");
  465. return -EINVAL;
  466. }
  467. if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
  468. dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
  469. return -EINVAL;
  470. }
  471. switch (G_028010_FORMAT(track->db_depth_info)) {
  472. case V_028010_DEPTH_16:
  473. bpe = 2;
  474. break;
  475. case V_028010_DEPTH_X8_24:
  476. case V_028010_DEPTH_8_24:
  477. case V_028010_DEPTH_X8_24_FLOAT:
  478. case V_028010_DEPTH_8_24_FLOAT:
  479. case V_028010_DEPTH_32_FLOAT:
  480. bpe = 4;
  481. break;
  482. case V_028010_DEPTH_X24_8_32_FLOAT:
  483. bpe = 8;
  484. break;
  485. default:
  486. dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
  487. return -EINVAL;
  488. }
  489. if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
  490. if (!track->db_depth_size_idx) {
  491. dev_warn(p->dev, "z/stencil buffer size not set\n");
  492. return -EINVAL;
  493. }
  494. tmp = radeon_bo_size(track->db_bo) - track->db_offset;
  495. tmp = (tmp / bpe) >> 6;
  496. if (!tmp) {
  497. dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
  498. track->db_depth_size, bpe, track->db_offset,
  499. radeon_bo_size(track->db_bo));
  500. return -EINVAL;
  501. }
  502. ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
  503. } else {
  504. size = radeon_bo_size(track->db_bo);
  505. /* pitch in pixels */
  506. pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
  507. slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
  508. slice_tile_max *= 64;
  509. height = slice_tile_max / pitch;
  510. if (height > 8192)
  511. height = 8192;
  512. base_offset = track->db_bo_mc + track->db_offset;
  513. array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
  514. array_check.array_mode = array_mode;
  515. array_check.group_size = track->group_size;
  516. array_check.nbanks = track->nbanks;
  517. array_check.npipes = track->npipes;
  518. array_check.nsamples = track->nsamples;
  519. array_check.blocksize = bpe;
  520. if (r600_get_array_mode_alignment(&array_check,
  521. &pitch_align, &height_align, &depth_align, &base_align)) {
  522. dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
  523. G_028010_ARRAY_MODE(track->db_depth_info),
  524. track->db_depth_info);
  525. return -EINVAL;
  526. }
  527. switch (array_mode) {
  528. case V_028010_ARRAY_1D_TILED_THIN1:
  529. /* don't break userspace */
  530. height &= ~0x7;
  531. break;
  532. case V_028010_ARRAY_2D_TILED_THIN1:
  533. break;
  534. default:
  535. dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
  536. G_028010_ARRAY_MODE(track->db_depth_info),
  537. track->db_depth_info);
  538. return -EINVAL;
  539. }
  540. if (!IS_ALIGNED(pitch, pitch_align)) {
  541. dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
  542. __func__, __LINE__, pitch, pitch_align, array_mode);
  543. return -EINVAL;
  544. }
  545. if (!IS_ALIGNED(height, height_align)) {
  546. dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
  547. __func__, __LINE__, height, height_align, array_mode);
  548. return -EINVAL;
  549. }
  550. if (!IS_ALIGNED(base_offset, base_align)) {
  551. dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
  552. base_offset, base_align, array_mode);
  553. return -EINVAL;
  554. }
  555. ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
  556. nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
  557. tmp = ntiles * bpe * 64 * nviews;
  558. if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
  559. dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
  560. array_mode,
  561. track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
  562. radeon_bo_size(track->db_bo));
  563. return -EINVAL;
  564. }
  565. }
  566. }
  567. return 0;
  568. }
  569. /**
  570. * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
  571. * @parser: parser structure holding parsing context.
  572. * @pkt: where to store packet informations
  573. *
  574. * Assume that chunk_ib_index is properly set. Will return -EINVAL
  575. * if packet is bigger than remaining ib size. or if packets is unknown.
  576. **/
  577. int r600_cs_packet_parse(struct radeon_cs_parser *p,
  578. struct radeon_cs_packet *pkt,
  579. unsigned idx)
  580. {
  581. struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
  582. uint32_t header;
  583. if (idx >= ib_chunk->length_dw) {
  584. DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
  585. idx, ib_chunk->length_dw);
  586. return -EINVAL;
  587. }
  588. header = radeon_get_ib_value(p, idx);
  589. pkt->idx = idx;
  590. pkt->type = CP_PACKET_GET_TYPE(header);
  591. pkt->count = CP_PACKET_GET_COUNT(header);
  592. pkt->one_reg_wr = 0;
  593. switch (pkt->type) {
  594. case PACKET_TYPE0:
  595. pkt->reg = CP_PACKET0_GET_REG(header);
  596. break;
  597. case PACKET_TYPE3:
  598. pkt->opcode = CP_PACKET3_GET_OPCODE(header);
  599. break;
  600. case PACKET_TYPE2:
  601. pkt->count = -1;
  602. break;
  603. default:
  604. DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
  605. return -EINVAL;
  606. }
  607. if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
  608. DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
  609. pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
  610. return -EINVAL;
  611. }
  612. return 0;
  613. }
  614. /**
  615. * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
  616. * @parser: parser structure holding parsing context.
  617. * @data: pointer to relocation data
  618. * @offset_start: starting offset
  619. * @offset_mask: offset mask (to align start offset on)
  620. * @reloc: reloc informations
  621. *
  622. * Check next packet is relocation packet3, do bo validation and compute
  623. * GPU offset using the provided start.
  624. **/
  625. static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
  626. struct radeon_cs_reloc **cs_reloc)
  627. {
  628. struct radeon_cs_chunk *relocs_chunk;
  629. struct radeon_cs_packet p3reloc;
  630. unsigned idx;
  631. int r;
  632. if (p->chunk_relocs_idx == -1) {
  633. DRM_ERROR("No relocation chunk !\n");
  634. return -EINVAL;
  635. }
  636. *cs_reloc = NULL;
  637. relocs_chunk = &p->chunks[p->chunk_relocs_idx];
  638. r = r600_cs_packet_parse(p, &p3reloc, p->idx);
  639. if (r) {
  640. return r;
  641. }
  642. p->idx += p3reloc.count + 2;
  643. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  644. DRM_ERROR("No packet3 for relocation for packet at %d.\n",
  645. p3reloc.idx);
  646. return -EINVAL;
  647. }
  648. idx = radeon_get_ib_value(p, p3reloc.idx + 1);
  649. if (idx >= relocs_chunk->length_dw) {
  650. DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
  651. idx, relocs_chunk->length_dw);
  652. return -EINVAL;
  653. }
  654. /* FIXME: we assume reloc size is 4 dwords */
  655. *cs_reloc = p->relocs_ptr[(idx / 4)];
  656. return 0;
  657. }
  658. /**
  659. * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
  660. * @parser: parser structure holding parsing context.
  661. * @data: pointer to relocation data
  662. * @offset_start: starting offset
  663. * @offset_mask: offset mask (to align start offset on)
  664. * @reloc: reloc informations
  665. *
  666. * Check next packet is relocation packet3, do bo validation and compute
  667. * GPU offset using the provided start.
  668. **/
  669. static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
  670. struct radeon_cs_reloc **cs_reloc)
  671. {
  672. struct radeon_cs_chunk *relocs_chunk;
  673. struct radeon_cs_packet p3reloc;
  674. unsigned idx;
  675. int r;
  676. if (p->chunk_relocs_idx == -1) {
  677. DRM_ERROR("No relocation chunk !\n");
  678. return -EINVAL;
  679. }
  680. *cs_reloc = NULL;
  681. relocs_chunk = &p->chunks[p->chunk_relocs_idx];
  682. r = r600_cs_packet_parse(p, &p3reloc, p->idx);
  683. if (r) {
  684. return r;
  685. }
  686. p->idx += p3reloc.count + 2;
  687. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  688. DRM_ERROR("No packet3 for relocation for packet at %d.\n",
  689. p3reloc.idx);
  690. return -EINVAL;
  691. }
  692. idx = radeon_get_ib_value(p, p3reloc.idx + 1);
  693. if (idx >= relocs_chunk->length_dw) {
  694. DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
  695. idx, relocs_chunk->length_dw);
  696. return -EINVAL;
  697. }
  698. *cs_reloc = p->relocs;
  699. (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
  700. (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
  701. return 0;
  702. }
  703. /**
  704. * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
  705. * @parser: parser structure holding parsing context.
  706. *
  707. * Check next packet is relocation packet3, do bo validation and compute
  708. * GPU offset using the provided start.
  709. **/
  710. static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
  711. {
  712. struct radeon_cs_packet p3reloc;
  713. int r;
  714. r = r600_cs_packet_parse(p, &p3reloc, p->idx);
  715. if (r) {
  716. return 0;
  717. }
  718. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  719. return 0;
  720. }
  721. return 1;
  722. }
  723. /**
  724. * r600_cs_packet_next_vline() - parse userspace VLINE packet
  725. * @parser: parser structure holding parsing context.
  726. *
  727. * Userspace sends a special sequence for VLINE waits.
  728. * PACKET0 - VLINE_START_END + value
  729. * PACKET3 - WAIT_REG_MEM poll vline status reg
  730. * RELOC (P3) - crtc_id in reloc.
  731. *
  732. * This function parses this and relocates the VLINE START END
  733. * and WAIT_REG_MEM packets to the correct crtc.
  734. * It also detects a switched off crtc and nulls out the
  735. * wait in that case.
  736. */
  737. static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
  738. {
  739. struct drm_mode_object *obj;
  740. struct drm_crtc *crtc;
  741. struct radeon_crtc *radeon_crtc;
  742. struct radeon_cs_packet p3reloc, wait_reg_mem;
  743. int crtc_id;
  744. int r;
  745. uint32_t header, h_idx, reg, wait_reg_mem_info;
  746. volatile uint32_t *ib;
  747. ib = p->ib->ptr;
  748. /* parse the WAIT_REG_MEM */
  749. r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
  750. if (r)
  751. return r;
  752. /* check its a WAIT_REG_MEM */
  753. if (wait_reg_mem.type != PACKET_TYPE3 ||
  754. wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
  755. DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
  756. return -EINVAL;
  757. }
  758. wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
  759. /* bit 4 is reg (0) or mem (1) */
  760. if (wait_reg_mem_info & 0x10) {
  761. DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
  762. return -EINVAL;
  763. }
  764. /* waiting for value to be equal */
  765. if ((wait_reg_mem_info & 0x7) != 0x3) {
  766. DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
  767. return -EINVAL;
  768. }
  769. if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
  770. DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
  771. return -EINVAL;
  772. }
  773. if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
  774. DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
  775. return -EINVAL;
  776. }
  777. /* jump over the NOP */
  778. r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
  779. if (r)
  780. return r;
  781. h_idx = p->idx - 2;
  782. p->idx += wait_reg_mem.count + 2;
  783. p->idx += p3reloc.count + 2;
  784. header = radeon_get_ib_value(p, h_idx);
  785. crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
  786. reg = CP_PACKET0_GET_REG(header);
  787. obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
  788. if (!obj) {
  789. DRM_ERROR("cannot find crtc %d\n", crtc_id);
  790. return -EINVAL;
  791. }
  792. crtc = obj_to_crtc(obj);
  793. radeon_crtc = to_radeon_crtc(crtc);
  794. crtc_id = radeon_crtc->crtc_id;
  795. if (!crtc->enabled) {
  796. /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
  797. ib[h_idx + 2] = PACKET2(0);
  798. ib[h_idx + 3] = PACKET2(0);
  799. ib[h_idx + 4] = PACKET2(0);
  800. ib[h_idx + 5] = PACKET2(0);
  801. ib[h_idx + 6] = PACKET2(0);
  802. ib[h_idx + 7] = PACKET2(0);
  803. ib[h_idx + 8] = PACKET2(0);
  804. } else if (crtc_id == 1) {
  805. switch (reg) {
  806. case AVIVO_D1MODE_VLINE_START_END:
  807. header &= ~R600_CP_PACKET0_REG_MASK;
  808. header |= AVIVO_D2MODE_VLINE_START_END >> 2;
  809. break;
  810. default:
  811. DRM_ERROR("unknown crtc reloc\n");
  812. return -EINVAL;
  813. }
  814. ib[h_idx] = header;
  815. ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
  816. }
  817. return 0;
  818. }
  819. static int r600_packet0_check(struct radeon_cs_parser *p,
  820. struct radeon_cs_packet *pkt,
  821. unsigned idx, unsigned reg)
  822. {
  823. int r;
  824. switch (reg) {
  825. case AVIVO_D1MODE_VLINE_START_END:
  826. r = r600_cs_packet_parse_vline(p);
  827. if (r) {
  828. DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
  829. idx, reg);
  830. return r;
  831. }
  832. break;
  833. default:
  834. printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
  835. reg, idx);
  836. return -EINVAL;
  837. }
  838. return 0;
  839. }
  840. static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
  841. struct radeon_cs_packet *pkt)
  842. {
  843. unsigned reg, i;
  844. unsigned idx;
  845. int r;
  846. idx = pkt->idx + 1;
  847. reg = pkt->reg;
  848. for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
  849. r = r600_packet0_check(p, pkt, idx, reg);
  850. if (r) {
  851. return r;
  852. }
  853. }
  854. return 0;
  855. }
  856. /**
  857. * r600_cs_check_reg() - check if register is authorized or not
  858. * @parser: parser structure holding parsing context
  859. * @reg: register we are testing
  860. * @idx: index into the cs buffer
  861. *
  862. * This function will test against r600_reg_safe_bm and return 0
  863. * if register is safe. If register is not flag as safe this function
  864. * will test it against a list of register needind special handling.
  865. */
  866. static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  867. {
  868. struct r600_cs_track *track = (struct r600_cs_track *)p->track;
  869. struct radeon_cs_reloc *reloc;
  870. u32 m, i, tmp, *ib;
  871. int r;
  872. i = (reg >> 7);
  873. if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
  874. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  875. return -EINVAL;
  876. }
  877. m = 1 << ((reg >> 2) & 31);
  878. if (!(r600_reg_safe_bm[i] & m))
  879. return 0;
  880. ib = p->ib->ptr;
  881. switch (reg) {
  882. /* force following reg to 0 in an attempt to disable out buffer
  883. * which will need us to better understand how it works to perform
  884. * security check on it (Jerome)
  885. */
  886. case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
  887. case R_008C44_SQ_ESGS_RING_SIZE:
  888. case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
  889. case R_008C54_SQ_ESTMP_RING_SIZE:
  890. case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
  891. case R_008C74_SQ_FBUF_RING_SIZE:
  892. case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
  893. case R_008C5C_SQ_GSTMP_RING_SIZE:
  894. case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
  895. case R_008C4C_SQ_GSVS_RING_SIZE:
  896. case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
  897. case R_008C6C_SQ_PSTMP_RING_SIZE:
  898. case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
  899. case R_008C7C_SQ_REDUC_RING_SIZE:
  900. case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
  901. case R_008C64_SQ_VSTMP_RING_SIZE:
  902. case R_0288C8_SQ_GS_VERT_ITEMSIZE:
  903. /* get value to populate the IB don't remove */
  904. tmp =radeon_get_ib_value(p, idx);
  905. ib[idx] = 0;
  906. break;
  907. case SQ_CONFIG:
  908. track->sq_config = radeon_get_ib_value(p, idx);
  909. break;
  910. case R_028800_DB_DEPTH_CONTROL:
  911. track->db_depth_control = radeon_get_ib_value(p, idx);
  912. break;
  913. case R_028010_DB_DEPTH_INFO:
  914. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
  915. r600_cs_packet_next_is_pkt3_nop(p)) {
  916. r = r600_cs_packet_next_reloc(p, &reloc);
  917. if (r) {
  918. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  919. "0x%04X\n", reg);
  920. return -EINVAL;
  921. }
  922. track->db_depth_info = radeon_get_ib_value(p, idx);
  923. ib[idx] &= C_028010_ARRAY_MODE;
  924. track->db_depth_info &= C_028010_ARRAY_MODE;
  925. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  926. ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
  927. track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
  928. } else {
  929. ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
  930. track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
  931. }
  932. } else
  933. track->db_depth_info = radeon_get_ib_value(p, idx);
  934. break;
  935. case R_028004_DB_DEPTH_VIEW:
  936. track->db_depth_view = radeon_get_ib_value(p, idx);
  937. break;
  938. case R_028000_DB_DEPTH_SIZE:
  939. track->db_depth_size = radeon_get_ib_value(p, idx);
  940. track->db_depth_size_idx = idx;
  941. break;
  942. case R_028AB0_VGT_STRMOUT_EN:
  943. track->vgt_strmout_en = radeon_get_ib_value(p, idx);
  944. break;
  945. case R_028B20_VGT_STRMOUT_BUFFER_EN:
  946. track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
  947. break;
  948. case VGT_STRMOUT_BUFFER_BASE_0:
  949. case VGT_STRMOUT_BUFFER_BASE_1:
  950. case VGT_STRMOUT_BUFFER_BASE_2:
  951. case VGT_STRMOUT_BUFFER_BASE_3:
  952. r = r600_cs_packet_next_reloc(p, &reloc);
  953. if (r) {
  954. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  955. "0x%04X\n", reg);
  956. return -EINVAL;
  957. }
  958. tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
  959. track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
  960. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  961. track->vgt_strmout_bo[tmp] = reloc->robj;
  962. track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
  963. break;
  964. case VGT_STRMOUT_BUFFER_SIZE_0:
  965. case VGT_STRMOUT_BUFFER_SIZE_1:
  966. case VGT_STRMOUT_BUFFER_SIZE_2:
  967. case VGT_STRMOUT_BUFFER_SIZE_3:
  968. tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
  969. /* size in register is DWs, convert to bytes */
  970. track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
  971. break;
  972. case CP_COHER_BASE:
  973. r = r600_cs_packet_next_reloc(p, &reloc);
  974. if (r) {
  975. dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
  976. "0x%04X\n", reg);
  977. return -EINVAL;
  978. }
  979. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  980. break;
  981. case R_028238_CB_TARGET_MASK:
  982. track->cb_target_mask = radeon_get_ib_value(p, idx);
  983. break;
  984. case R_02823C_CB_SHADER_MASK:
  985. track->cb_shader_mask = radeon_get_ib_value(p, idx);
  986. break;
  987. case R_028C04_PA_SC_AA_CONFIG:
  988. tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
  989. track->nsamples = 1 << tmp;
  990. break;
  991. case R_0280A0_CB_COLOR0_INFO:
  992. case R_0280A4_CB_COLOR1_INFO:
  993. case R_0280A8_CB_COLOR2_INFO:
  994. case R_0280AC_CB_COLOR3_INFO:
  995. case R_0280B0_CB_COLOR4_INFO:
  996. case R_0280B4_CB_COLOR5_INFO:
  997. case R_0280B8_CB_COLOR6_INFO:
  998. case R_0280BC_CB_COLOR7_INFO:
  999. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
  1000. r600_cs_packet_next_is_pkt3_nop(p)) {
  1001. r = r600_cs_packet_next_reloc(p, &reloc);
  1002. if (r) {
  1003. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1004. return -EINVAL;
  1005. }
  1006. tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
  1007. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1008. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  1009. ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
  1010. track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
  1011. } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
  1012. ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
  1013. track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
  1014. }
  1015. } else {
  1016. tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
  1017. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1018. }
  1019. break;
  1020. case R_028080_CB_COLOR0_VIEW:
  1021. case R_028084_CB_COLOR1_VIEW:
  1022. case R_028088_CB_COLOR2_VIEW:
  1023. case R_02808C_CB_COLOR3_VIEW:
  1024. case R_028090_CB_COLOR4_VIEW:
  1025. case R_028094_CB_COLOR5_VIEW:
  1026. case R_028098_CB_COLOR6_VIEW:
  1027. case R_02809C_CB_COLOR7_VIEW:
  1028. tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
  1029. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  1030. break;
  1031. case R_028060_CB_COLOR0_SIZE:
  1032. case R_028064_CB_COLOR1_SIZE:
  1033. case R_028068_CB_COLOR2_SIZE:
  1034. case R_02806C_CB_COLOR3_SIZE:
  1035. case R_028070_CB_COLOR4_SIZE:
  1036. case R_028074_CB_COLOR5_SIZE:
  1037. case R_028078_CB_COLOR6_SIZE:
  1038. case R_02807C_CB_COLOR7_SIZE:
  1039. tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
  1040. track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
  1041. track->cb_color_size_idx[tmp] = idx;
  1042. break;
  1043. /* This register were added late, there is userspace
  1044. * which does provide relocation for those but set
  1045. * 0 offset. In order to avoid breaking old userspace
  1046. * we detect this and set address to point to last
  1047. * CB_COLOR0_BASE, note that if userspace doesn't set
  1048. * CB_COLOR0_BASE before this register we will report
  1049. * error. Old userspace always set CB_COLOR0_BASE
  1050. * before any of this.
  1051. */
  1052. case R_0280E0_CB_COLOR0_FRAG:
  1053. case R_0280E4_CB_COLOR1_FRAG:
  1054. case R_0280E8_CB_COLOR2_FRAG:
  1055. case R_0280EC_CB_COLOR3_FRAG:
  1056. case R_0280F0_CB_COLOR4_FRAG:
  1057. case R_0280F4_CB_COLOR5_FRAG:
  1058. case R_0280F8_CB_COLOR6_FRAG:
  1059. case R_0280FC_CB_COLOR7_FRAG:
  1060. tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
  1061. if (!r600_cs_packet_next_is_pkt3_nop(p)) {
  1062. if (!track->cb_color_base_last[tmp]) {
  1063. dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
  1064. return -EINVAL;
  1065. }
  1066. ib[idx] = track->cb_color_base_last[tmp];
  1067. track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
  1068. } else {
  1069. r = r600_cs_packet_next_reloc(p, &reloc);
  1070. if (r) {
  1071. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1072. return -EINVAL;
  1073. }
  1074. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1075. track->cb_color_frag_bo[tmp] = reloc->robj;
  1076. }
  1077. break;
  1078. case R_0280C0_CB_COLOR0_TILE:
  1079. case R_0280C4_CB_COLOR1_TILE:
  1080. case R_0280C8_CB_COLOR2_TILE:
  1081. case R_0280CC_CB_COLOR3_TILE:
  1082. case R_0280D0_CB_COLOR4_TILE:
  1083. case R_0280D4_CB_COLOR5_TILE:
  1084. case R_0280D8_CB_COLOR6_TILE:
  1085. case R_0280DC_CB_COLOR7_TILE:
  1086. tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
  1087. if (!r600_cs_packet_next_is_pkt3_nop(p)) {
  1088. if (!track->cb_color_base_last[tmp]) {
  1089. dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
  1090. return -EINVAL;
  1091. }
  1092. ib[idx] = track->cb_color_base_last[tmp];
  1093. track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
  1094. } else {
  1095. r = r600_cs_packet_next_reloc(p, &reloc);
  1096. if (r) {
  1097. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1098. return -EINVAL;
  1099. }
  1100. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1101. track->cb_color_tile_bo[tmp] = reloc->robj;
  1102. }
  1103. break;
  1104. case CB_COLOR0_BASE:
  1105. case CB_COLOR1_BASE:
  1106. case CB_COLOR2_BASE:
  1107. case CB_COLOR3_BASE:
  1108. case CB_COLOR4_BASE:
  1109. case CB_COLOR5_BASE:
  1110. case CB_COLOR6_BASE:
  1111. case CB_COLOR7_BASE:
  1112. r = r600_cs_packet_next_reloc(p, &reloc);
  1113. if (r) {
  1114. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1115. "0x%04X\n", reg);
  1116. return -EINVAL;
  1117. }
  1118. tmp = (reg - CB_COLOR0_BASE) / 4;
  1119. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
  1120. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1121. track->cb_color_base_last[tmp] = ib[idx];
  1122. track->cb_color_bo[tmp] = reloc->robj;
  1123. track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
  1124. break;
  1125. case DB_DEPTH_BASE:
  1126. r = r600_cs_packet_next_reloc(p, &reloc);
  1127. if (r) {
  1128. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1129. "0x%04X\n", reg);
  1130. return -EINVAL;
  1131. }
  1132. track->db_offset = radeon_get_ib_value(p, idx) << 8;
  1133. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1134. track->db_bo = reloc->robj;
  1135. track->db_bo_mc = reloc->lobj.gpu_offset;
  1136. break;
  1137. case DB_HTILE_DATA_BASE:
  1138. case SQ_PGM_START_FS:
  1139. case SQ_PGM_START_ES:
  1140. case SQ_PGM_START_VS:
  1141. case SQ_PGM_START_GS:
  1142. case SQ_PGM_START_PS:
  1143. case SQ_ALU_CONST_CACHE_GS_0:
  1144. case SQ_ALU_CONST_CACHE_GS_1:
  1145. case SQ_ALU_CONST_CACHE_GS_2:
  1146. case SQ_ALU_CONST_CACHE_GS_3:
  1147. case SQ_ALU_CONST_CACHE_GS_4:
  1148. case SQ_ALU_CONST_CACHE_GS_5:
  1149. case SQ_ALU_CONST_CACHE_GS_6:
  1150. case SQ_ALU_CONST_CACHE_GS_7:
  1151. case SQ_ALU_CONST_CACHE_GS_8:
  1152. case SQ_ALU_CONST_CACHE_GS_9:
  1153. case SQ_ALU_CONST_CACHE_GS_10:
  1154. case SQ_ALU_CONST_CACHE_GS_11:
  1155. case SQ_ALU_CONST_CACHE_GS_12:
  1156. case SQ_ALU_CONST_CACHE_GS_13:
  1157. case SQ_ALU_CONST_CACHE_GS_14:
  1158. case SQ_ALU_CONST_CACHE_GS_15:
  1159. case SQ_ALU_CONST_CACHE_PS_0:
  1160. case SQ_ALU_CONST_CACHE_PS_1:
  1161. case SQ_ALU_CONST_CACHE_PS_2:
  1162. case SQ_ALU_CONST_CACHE_PS_3:
  1163. case SQ_ALU_CONST_CACHE_PS_4:
  1164. case SQ_ALU_CONST_CACHE_PS_5:
  1165. case SQ_ALU_CONST_CACHE_PS_6:
  1166. case SQ_ALU_CONST_CACHE_PS_7:
  1167. case SQ_ALU_CONST_CACHE_PS_8:
  1168. case SQ_ALU_CONST_CACHE_PS_9:
  1169. case SQ_ALU_CONST_CACHE_PS_10:
  1170. case SQ_ALU_CONST_CACHE_PS_11:
  1171. case SQ_ALU_CONST_CACHE_PS_12:
  1172. case SQ_ALU_CONST_CACHE_PS_13:
  1173. case SQ_ALU_CONST_CACHE_PS_14:
  1174. case SQ_ALU_CONST_CACHE_PS_15:
  1175. case SQ_ALU_CONST_CACHE_VS_0:
  1176. case SQ_ALU_CONST_CACHE_VS_1:
  1177. case SQ_ALU_CONST_CACHE_VS_2:
  1178. case SQ_ALU_CONST_CACHE_VS_3:
  1179. case SQ_ALU_CONST_CACHE_VS_4:
  1180. case SQ_ALU_CONST_CACHE_VS_5:
  1181. case SQ_ALU_CONST_CACHE_VS_6:
  1182. case SQ_ALU_CONST_CACHE_VS_7:
  1183. case SQ_ALU_CONST_CACHE_VS_8:
  1184. case SQ_ALU_CONST_CACHE_VS_9:
  1185. case SQ_ALU_CONST_CACHE_VS_10:
  1186. case SQ_ALU_CONST_CACHE_VS_11:
  1187. case SQ_ALU_CONST_CACHE_VS_12:
  1188. case SQ_ALU_CONST_CACHE_VS_13:
  1189. case SQ_ALU_CONST_CACHE_VS_14:
  1190. case SQ_ALU_CONST_CACHE_VS_15:
  1191. r = r600_cs_packet_next_reloc(p, &reloc);
  1192. if (r) {
  1193. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1194. "0x%04X\n", reg);
  1195. return -EINVAL;
  1196. }
  1197. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1198. break;
  1199. case SX_MEMORY_EXPORT_BASE:
  1200. r = r600_cs_packet_next_reloc(p, &reloc);
  1201. if (r) {
  1202. dev_warn(p->dev, "bad SET_CONFIG_REG "
  1203. "0x%04X\n", reg);
  1204. return -EINVAL;
  1205. }
  1206. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1207. break;
  1208. case SX_MISC:
  1209. track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
  1210. break;
  1211. default:
  1212. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1213. return -EINVAL;
  1214. }
  1215. return 0;
  1216. }
  1217. unsigned r600_mip_minify(unsigned size, unsigned level)
  1218. {
  1219. unsigned val;
  1220. val = max(1U, size >> level);
  1221. if (level > 0)
  1222. val = roundup_pow_of_two(val);
  1223. return val;
  1224. }
  1225. static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
  1226. unsigned w0, unsigned h0, unsigned d0, unsigned format,
  1227. unsigned block_align, unsigned height_align, unsigned base_align,
  1228. unsigned *l0_size, unsigned *mipmap_size)
  1229. {
  1230. unsigned offset, i, level;
  1231. unsigned width, height, depth, size;
  1232. unsigned blocksize;
  1233. unsigned nbx, nby;
  1234. unsigned nlevels = llevel - blevel + 1;
  1235. *l0_size = -1;
  1236. blocksize = r600_fmt_get_blocksize(format);
  1237. w0 = r600_mip_minify(w0, 0);
  1238. h0 = r600_mip_minify(h0, 0);
  1239. d0 = r600_mip_minify(d0, 0);
  1240. for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
  1241. width = r600_mip_minify(w0, i);
  1242. nbx = r600_fmt_get_nblocksx(format, width);
  1243. nbx = round_up(nbx, block_align);
  1244. height = r600_mip_minify(h0, i);
  1245. nby = r600_fmt_get_nblocksy(format, height);
  1246. nby = round_up(nby, height_align);
  1247. depth = r600_mip_minify(d0, i);
  1248. size = nbx * nby * blocksize;
  1249. if (nfaces)
  1250. size *= nfaces;
  1251. else
  1252. size *= depth;
  1253. if (i == 0)
  1254. *l0_size = size;
  1255. if (i == 0 || i == 1)
  1256. offset = round_up(offset, base_align);
  1257. offset += size;
  1258. }
  1259. *mipmap_size = offset;
  1260. if (llevel == 0)
  1261. *mipmap_size = *l0_size;
  1262. if (!blevel)
  1263. *mipmap_size -= *l0_size;
  1264. }
  1265. /**
  1266. * r600_check_texture_resource() - check if register is authorized or not
  1267. * @p: parser structure holding parsing context
  1268. * @idx: index into the cs buffer
  1269. * @texture: texture's bo structure
  1270. * @mipmap: mipmap's bo structure
  1271. *
  1272. * This function will check that the resource has valid field and that
  1273. * the texture and mipmap bo object are big enough to cover this resource.
  1274. */
  1275. static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
  1276. struct radeon_bo *texture,
  1277. struct radeon_bo *mipmap,
  1278. u64 base_offset,
  1279. u64 mip_offset,
  1280. u32 tiling_flags)
  1281. {
  1282. struct r600_cs_track *track = p->track;
  1283. u32 nfaces, llevel, blevel, w0, h0, d0;
  1284. u32 word0, word1, l0_size, mipmap_size, word2, word3;
  1285. u32 height_align, pitch, pitch_align, depth_align;
  1286. u32 array, barray, larray;
  1287. u64 base_align;
  1288. struct array_mode_checker array_check;
  1289. u32 format;
  1290. /* on legacy kernel we don't perform advanced check */
  1291. if (p->rdev == NULL)
  1292. return 0;
  1293. /* convert to bytes */
  1294. base_offset <<= 8;
  1295. mip_offset <<= 8;
  1296. word0 = radeon_get_ib_value(p, idx + 0);
  1297. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1298. if (tiling_flags & RADEON_TILING_MACRO)
  1299. word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
  1300. else if (tiling_flags & RADEON_TILING_MICRO)
  1301. word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
  1302. }
  1303. word1 = radeon_get_ib_value(p, idx + 1);
  1304. w0 = G_038000_TEX_WIDTH(word0) + 1;
  1305. h0 = G_038004_TEX_HEIGHT(word1) + 1;
  1306. d0 = G_038004_TEX_DEPTH(word1);
  1307. nfaces = 1;
  1308. array = 0;
  1309. switch (G_038000_DIM(word0)) {
  1310. case V_038000_SQ_TEX_DIM_1D:
  1311. case V_038000_SQ_TEX_DIM_2D:
  1312. case V_038000_SQ_TEX_DIM_3D:
  1313. break;
  1314. case V_038000_SQ_TEX_DIM_CUBEMAP:
  1315. if (p->family >= CHIP_RV770)
  1316. nfaces = 8;
  1317. else
  1318. nfaces = 6;
  1319. break;
  1320. case V_038000_SQ_TEX_DIM_1D_ARRAY:
  1321. case V_038000_SQ_TEX_DIM_2D_ARRAY:
  1322. array = 1;
  1323. break;
  1324. case V_038000_SQ_TEX_DIM_2D_MSAA:
  1325. case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
  1326. default:
  1327. dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
  1328. return -EINVAL;
  1329. }
  1330. format = G_038004_DATA_FORMAT(word1);
  1331. if (!r600_fmt_is_valid_texture(format, p->family)) {
  1332. dev_warn(p->dev, "%s:%d texture invalid format %d\n",
  1333. __func__, __LINE__, format);
  1334. return -EINVAL;
  1335. }
  1336. /* pitch in texels */
  1337. pitch = (G_038000_PITCH(word0) + 1) * 8;
  1338. array_check.array_mode = G_038000_TILE_MODE(word0);
  1339. array_check.group_size = track->group_size;
  1340. array_check.nbanks = track->nbanks;
  1341. array_check.npipes = track->npipes;
  1342. array_check.nsamples = 1;
  1343. array_check.blocksize = r600_fmt_get_blocksize(format);
  1344. if (r600_get_array_mode_alignment(&array_check,
  1345. &pitch_align, &height_align, &depth_align, &base_align)) {
  1346. dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
  1347. __func__, __LINE__, G_038000_TILE_MODE(word0));
  1348. return -EINVAL;
  1349. }
  1350. /* XXX check height as well... */
  1351. if (!IS_ALIGNED(pitch, pitch_align)) {
  1352. dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
  1353. __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
  1354. return -EINVAL;
  1355. }
  1356. if (!IS_ALIGNED(base_offset, base_align)) {
  1357. dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
  1358. __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
  1359. return -EINVAL;
  1360. }
  1361. if (!IS_ALIGNED(mip_offset, base_align)) {
  1362. dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
  1363. __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
  1364. return -EINVAL;
  1365. }
  1366. word2 = radeon_get_ib_value(p, idx + 2) << 8;
  1367. word3 = radeon_get_ib_value(p, idx + 3) << 8;
  1368. word0 = radeon_get_ib_value(p, idx + 4);
  1369. word1 = radeon_get_ib_value(p, idx + 5);
  1370. blevel = G_038010_BASE_LEVEL(word0);
  1371. llevel = G_038014_LAST_LEVEL(word1);
  1372. if (blevel > llevel) {
  1373. dev_warn(p->dev, "texture blevel %d > llevel %d\n",
  1374. blevel, llevel);
  1375. }
  1376. if (array == 1) {
  1377. barray = G_038014_BASE_ARRAY(word1);
  1378. larray = G_038014_LAST_ARRAY(word1);
  1379. nfaces = larray - barray + 1;
  1380. }
  1381. r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
  1382. pitch_align, height_align, base_align,
  1383. &l0_size, &mipmap_size);
  1384. /* using get ib will give us the offset into the texture bo */
  1385. if ((l0_size + word2) > radeon_bo_size(texture)) {
  1386. dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
  1387. w0, h0, pitch_align, height_align,
  1388. array_check.array_mode, format, word2,
  1389. l0_size, radeon_bo_size(texture));
  1390. dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
  1391. return -EINVAL;
  1392. }
  1393. /* using get ib will give us the offset into the mipmap bo */
  1394. word3 = radeon_get_ib_value(p, idx + 3) << 8;
  1395. if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
  1396. /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
  1397. w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
  1398. }
  1399. return 0;
  1400. }
  1401. static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  1402. {
  1403. u32 m, i;
  1404. i = (reg >> 7);
  1405. if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
  1406. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1407. return false;
  1408. }
  1409. m = 1 << ((reg >> 2) & 31);
  1410. if (!(r600_reg_safe_bm[i] & m))
  1411. return true;
  1412. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1413. return false;
  1414. }
  1415. static int r600_packet3_check(struct radeon_cs_parser *p,
  1416. struct radeon_cs_packet *pkt)
  1417. {
  1418. struct radeon_cs_reloc *reloc;
  1419. struct r600_cs_track *track;
  1420. volatile u32 *ib;
  1421. unsigned idx;
  1422. unsigned i;
  1423. unsigned start_reg, end_reg, reg;
  1424. int r;
  1425. u32 idx_value;
  1426. track = (struct r600_cs_track *)p->track;
  1427. ib = p->ib->ptr;
  1428. idx = pkt->idx + 1;
  1429. idx_value = radeon_get_ib_value(p, idx);
  1430. switch (pkt->opcode) {
  1431. case PACKET3_SET_PREDICATION:
  1432. {
  1433. int pred_op;
  1434. int tmp;
  1435. uint64_t offset;
  1436. if (pkt->count != 1) {
  1437. DRM_ERROR("bad SET PREDICATION\n");
  1438. return -EINVAL;
  1439. }
  1440. tmp = radeon_get_ib_value(p, idx + 1);
  1441. pred_op = (tmp >> 16) & 0x7;
  1442. /* for the clear predicate operation */
  1443. if (pred_op == 0)
  1444. return 0;
  1445. if (pred_op > 2) {
  1446. DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
  1447. return -EINVAL;
  1448. }
  1449. r = r600_cs_packet_next_reloc(p, &reloc);
  1450. if (r) {
  1451. DRM_ERROR("bad SET PREDICATION\n");
  1452. return -EINVAL;
  1453. }
  1454. offset = reloc->lobj.gpu_offset +
  1455. (idx_value & 0xfffffff0) +
  1456. ((u64)(tmp & 0xff) << 32);
  1457. ib[idx + 0] = offset;
  1458. ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  1459. }
  1460. break;
  1461. case PACKET3_START_3D_CMDBUF:
  1462. if (p->family >= CHIP_RV770 || pkt->count) {
  1463. DRM_ERROR("bad START_3D\n");
  1464. return -EINVAL;
  1465. }
  1466. break;
  1467. case PACKET3_CONTEXT_CONTROL:
  1468. if (pkt->count != 1) {
  1469. DRM_ERROR("bad CONTEXT_CONTROL\n");
  1470. return -EINVAL;
  1471. }
  1472. break;
  1473. case PACKET3_INDEX_TYPE:
  1474. case PACKET3_NUM_INSTANCES:
  1475. if (pkt->count) {
  1476. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
  1477. return -EINVAL;
  1478. }
  1479. break;
  1480. case PACKET3_DRAW_INDEX:
  1481. {
  1482. uint64_t offset;
  1483. if (pkt->count != 3) {
  1484. DRM_ERROR("bad DRAW_INDEX\n");
  1485. return -EINVAL;
  1486. }
  1487. r = r600_cs_packet_next_reloc(p, &reloc);
  1488. if (r) {
  1489. DRM_ERROR("bad DRAW_INDEX\n");
  1490. return -EINVAL;
  1491. }
  1492. offset = reloc->lobj.gpu_offset +
  1493. idx_value +
  1494. ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
  1495. ib[idx+0] = offset;
  1496. ib[idx+1] = upper_32_bits(offset) & 0xff;
  1497. r = r600_cs_track_check(p);
  1498. if (r) {
  1499. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1500. return r;
  1501. }
  1502. break;
  1503. }
  1504. case PACKET3_DRAW_INDEX_AUTO:
  1505. if (pkt->count != 1) {
  1506. DRM_ERROR("bad DRAW_INDEX_AUTO\n");
  1507. return -EINVAL;
  1508. }
  1509. r = r600_cs_track_check(p);
  1510. if (r) {
  1511. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1512. return r;
  1513. }
  1514. break;
  1515. case PACKET3_DRAW_INDEX_IMMD_BE:
  1516. case PACKET3_DRAW_INDEX_IMMD:
  1517. if (pkt->count < 2) {
  1518. DRM_ERROR("bad DRAW_INDEX_IMMD\n");
  1519. return -EINVAL;
  1520. }
  1521. r = r600_cs_track_check(p);
  1522. if (r) {
  1523. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1524. return r;
  1525. }
  1526. break;
  1527. case PACKET3_WAIT_REG_MEM:
  1528. if (pkt->count != 5) {
  1529. DRM_ERROR("bad WAIT_REG_MEM\n");
  1530. return -EINVAL;
  1531. }
  1532. /* bit 4 is reg (0) or mem (1) */
  1533. if (idx_value & 0x10) {
  1534. uint64_t offset;
  1535. r = r600_cs_packet_next_reloc(p, &reloc);
  1536. if (r) {
  1537. DRM_ERROR("bad WAIT_REG_MEM\n");
  1538. return -EINVAL;
  1539. }
  1540. offset = reloc->lobj.gpu_offset +
  1541. (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
  1542. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1543. ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
  1544. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1545. }
  1546. break;
  1547. case PACKET3_SURFACE_SYNC:
  1548. if (pkt->count != 3) {
  1549. DRM_ERROR("bad SURFACE_SYNC\n");
  1550. return -EINVAL;
  1551. }
  1552. /* 0xffffffff/0x0 is flush all cache flag */
  1553. if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
  1554. radeon_get_ib_value(p, idx + 2) != 0) {
  1555. r = r600_cs_packet_next_reloc(p, &reloc);
  1556. if (r) {
  1557. DRM_ERROR("bad SURFACE_SYNC\n");
  1558. return -EINVAL;
  1559. }
  1560. ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1561. }
  1562. break;
  1563. case PACKET3_EVENT_WRITE:
  1564. if (pkt->count != 2 && pkt->count != 0) {
  1565. DRM_ERROR("bad EVENT_WRITE\n");
  1566. return -EINVAL;
  1567. }
  1568. if (pkt->count) {
  1569. uint64_t offset;
  1570. r = r600_cs_packet_next_reloc(p, &reloc);
  1571. if (r) {
  1572. DRM_ERROR("bad EVENT_WRITE\n");
  1573. return -EINVAL;
  1574. }
  1575. offset = reloc->lobj.gpu_offset +
  1576. (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
  1577. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1578. ib[idx+1] = offset & 0xfffffff8;
  1579. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1580. }
  1581. break;
  1582. case PACKET3_EVENT_WRITE_EOP:
  1583. {
  1584. uint64_t offset;
  1585. if (pkt->count != 4) {
  1586. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  1587. return -EINVAL;
  1588. }
  1589. r = r600_cs_packet_next_reloc(p, &reloc);
  1590. if (r) {
  1591. DRM_ERROR("bad EVENT_WRITE\n");
  1592. return -EINVAL;
  1593. }
  1594. offset = reloc->lobj.gpu_offset +
  1595. (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
  1596. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1597. ib[idx+1] = offset & 0xfffffffc;
  1598. ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  1599. break;
  1600. }
  1601. case PACKET3_SET_CONFIG_REG:
  1602. start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
  1603. end_reg = 4 * pkt->count + start_reg - 4;
  1604. if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
  1605. (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
  1606. (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
  1607. DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
  1608. return -EINVAL;
  1609. }
  1610. for (i = 0; i < pkt->count; i++) {
  1611. reg = start_reg + (4 * i);
  1612. r = r600_cs_check_reg(p, reg, idx+1+i);
  1613. if (r)
  1614. return r;
  1615. }
  1616. break;
  1617. case PACKET3_SET_CONTEXT_REG:
  1618. start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
  1619. end_reg = 4 * pkt->count + start_reg - 4;
  1620. if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
  1621. (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
  1622. (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
  1623. DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
  1624. return -EINVAL;
  1625. }
  1626. for (i = 0; i < pkt->count; i++) {
  1627. reg = start_reg + (4 * i);
  1628. r = r600_cs_check_reg(p, reg, idx+1+i);
  1629. if (r)
  1630. return r;
  1631. }
  1632. break;
  1633. case PACKET3_SET_RESOURCE:
  1634. if (pkt->count % 7) {
  1635. DRM_ERROR("bad SET_RESOURCE\n");
  1636. return -EINVAL;
  1637. }
  1638. start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
  1639. end_reg = 4 * pkt->count + start_reg - 4;
  1640. if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
  1641. (start_reg >= PACKET3_SET_RESOURCE_END) ||
  1642. (end_reg >= PACKET3_SET_RESOURCE_END)) {
  1643. DRM_ERROR("bad SET_RESOURCE\n");
  1644. return -EINVAL;
  1645. }
  1646. for (i = 0; i < (pkt->count / 7); i++) {
  1647. struct radeon_bo *texture, *mipmap;
  1648. u32 size, offset, base_offset, mip_offset;
  1649. switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
  1650. case SQ_TEX_VTX_VALID_TEXTURE:
  1651. /* tex base */
  1652. r = r600_cs_packet_next_reloc(p, &reloc);
  1653. if (r) {
  1654. DRM_ERROR("bad SET_RESOURCE\n");
  1655. return -EINVAL;
  1656. }
  1657. base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1658. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1659. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
  1660. ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
  1661. else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
  1662. ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
  1663. }
  1664. texture = reloc->robj;
  1665. /* tex mip base */
  1666. r = r600_cs_packet_next_reloc(p, &reloc);
  1667. if (r) {
  1668. DRM_ERROR("bad SET_RESOURCE\n");
  1669. return -EINVAL;
  1670. }
  1671. mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1672. mipmap = reloc->robj;
  1673. r = r600_check_texture_resource(p, idx+(i*7)+1,
  1674. texture, mipmap,
  1675. base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
  1676. mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
  1677. reloc->lobj.tiling_flags);
  1678. if (r)
  1679. return r;
  1680. ib[idx+1+(i*7)+2] += base_offset;
  1681. ib[idx+1+(i*7)+3] += mip_offset;
  1682. break;
  1683. case SQ_TEX_VTX_VALID_BUFFER:
  1684. {
  1685. uint64_t offset64;
  1686. /* vtx base */
  1687. r = r600_cs_packet_next_reloc(p, &reloc);
  1688. if (r) {
  1689. DRM_ERROR("bad SET_RESOURCE\n");
  1690. return -EINVAL;
  1691. }
  1692. offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
  1693. size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
  1694. if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
  1695. /* force size to size of the buffer */
  1696. dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
  1697. size + offset, radeon_bo_size(reloc->robj));
  1698. ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
  1699. }
  1700. offset64 = reloc->lobj.gpu_offset + offset;
  1701. ib[idx+1+(i*8)+0] = offset64;
  1702. ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
  1703. (upper_32_bits(offset64) & 0xff);
  1704. break;
  1705. }
  1706. case SQ_TEX_VTX_INVALID_TEXTURE:
  1707. case SQ_TEX_VTX_INVALID_BUFFER:
  1708. default:
  1709. DRM_ERROR("bad SET_RESOURCE\n");
  1710. return -EINVAL;
  1711. }
  1712. }
  1713. break;
  1714. case PACKET3_SET_ALU_CONST:
  1715. if (track->sq_config & DX9_CONSTS) {
  1716. start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
  1717. end_reg = 4 * pkt->count + start_reg - 4;
  1718. if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
  1719. (start_reg >= PACKET3_SET_ALU_CONST_END) ||
  1720. (end_reg >= PACKET3_SET_ALU_CONST_END)) {
  1721. DRM_ERROR("bad SET_ALU_CONST\n");
  1722. return -EINVAL;
  1723. }
  1724. }
  1725. break;
  1726. case PACKET3_SET_BOOL_CONST:
  1727. start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
  1728. end_reg = 4 * pkt->count + start_reg - 4;
  1729. if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
  1730. (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
  1731. (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
  1732. DRM_ERROR("bad SET_BOOL_CONST\n");
  1733. return -EINVAL;
  1734. }
  1735. break;
  1736. case PACKET3_SET_LOOP_CONST:
  1737. start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
  1738. end_reg = 4 * pkt->count + start_reg - 4;
  1739. if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
  1740. (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
  1741. (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
  1742. DRM_ERROR("bad SET_LOOP_CONST\n");
  1743. return -EINVAL;
  1744. }
  1745. break;
  1746. case PACKET3_SET_CTL_CONST:
  1747. start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
  1748. end_reg = 4 * pkt->count + start_reg - 4;
  1749. if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
  1750. (start_reg >= PACKET3_SET_CTL_CONST_END) ||
  1751. (end_reg >= PACKET3_SET_CTL_CONST_END)) {
  1752. DRM_ERROR("bad SET_CTL_CONST\n");
  1753. return -EINVAL;
  1754. }
  1755. break;
  1756. case PACKET3_SET_SAMPLER:
  1757. if (pkt->count % 3) {
  1758. DRM_ERROR("bad SET_SAMPLER\n");
  1759. return -EINVAL;
  1760. }
  1761. start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
  1762. end_reg = 4 * pkt->count + start_reg - 4;
  1763. if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
  1764. (start_reg >= PACKET3_SET_SAMPLER_END) ||
  1765. (end_reg >= PACKET3_SET_SAMPLER_END)) {
  1766. DRM_ERROR("bad SET_SAMPLER\n");
  1767. return -EINVAL;
  1768. }
  1769. break;
  1770. case PACKET3_SURFACE_BASE_UPDATE:
  1771. if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
  1772. DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
  1773. return -EINVAL;
  1774. }
  1775. if (pkt->count) {
  1776. DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
  1777. return -EINVAL;
  1778. }
  1779. break;
  1780. case PACKET3_STRMOUT_BUFFER_UPDATE:
  1781. if (pkt->count != 4) {
  1782. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
  1783. return -EINVAL;
  1784. }
  1785. /* Updating memory at DST_ADDRESS. */
  1786. if (idx_value & 0x1) {
  1787. u64 offset;
  1788. r = r600_cs_packet_next_reloc(p, &reloc);
  1789. if (r) {
  1790. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
  1791. return -EINVAL;
  1792. }
  1793. offset = radeon_get_ib_value(p, idx+1);
  1794. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  1795. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  1796. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
  1797. offset + 4, radeon_bo_size(reloc->robj));
  1798. return -EINVAL;
  1799. }
  1800. offset += reloc->lobj.gpu_offset;
  1801. ib[idx+1] = offset;
  1802. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1803. }
  1804. /* Reading data from SRC_ADDRESS. */
  1805. if (((idx_value >> 1) & 0x3) == 2) {
  1806. u64 offset;
  1807. r = r600_cs_packet_next_reloc(p, &reloc);
  1808. if (r) {
  1809. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
  1810. return -EINVAL;
  1811. }
  1812. offset = radeon_get_ib_value(p, idx+3);
  1813. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  1814. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  1815. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
  1816. offset + 4, radeon_bo_size(reloc->robj));
  1817. return -EINVAL;
  1818. }
  1819. offset += reloc->lobj.gpu_offset;
  1820. ib[idx+3] = offset;
  1821. ib[idx+4] = upper_32_bits(offset) & 0xff;
  1822. }
  1823. break;
  1824. case PACKET3_COPY_DW:
  1825. if (pkt->count != 4) {
  1826. DRM_ERROR("bad COPY_DW (invalid count)\n");
  1827. return -EINVAL;
  1828. }
  1829. if (idx_value & 0x1) {
  1830. u64 offset;
  1831. /* SRC is memory. */
  1832. r = r600_cs_packet_next_reloc(p, &reloc);
  1833. if (r) {
  1834. DRM_ERROR("bad COPY_DW (missing src reloc)\n");
  1835. return -EINVAL;
  1836. }
  1837. offset = radeon_get_ib_value(p, idx+1);
  1838. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  1839. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  1840. DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
  1841. offset + 4, radeon_bo_size(reloc->robj));
  1842. return -EINVAL;
  1843. }
  1844. offset += reloc->lobj.gpu_offset;
  1845. ib[idx+1] = offset;
  1846. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1847. } else {
  1848. /* SRC is a reg. */
  1849. reg = radeon_get_ib_value(p, idx+1) << 2;
  1850. if (!r600_is_safe_reg(p, reg, idx+1))
  1851. return -EINVAL;
  1852. }
  1853. if (idx_value & 0x2) {
  1854. u64 offset;
  1855. /* DST is memory. */
  1856. r = r600_cs_packet_next_reloc(p, &reloc);
  1857. if (r) {
  1858. DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
  1859. return -EINVAL;
  1860. }
  1861. offset = radeon_get_ib_value(p, idx+3);
  1862. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  1863. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  1864. DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
  1865. offset + 4, radeon_bo_size(reloc->robj));
  1866. return -EINVAL;
  1867. }
  1868. offset += reloc->lobj.gpu_offset;
  1869. ib[idx+3] = offset;
  1870. ib[idx+4] = upper_32_bits(offset) & 0xff;
  1871. } else {
  1872. /* DST is a reg. */
  1873. reg = radeon_get_ib_value(p, idx+3) << 2;
  1874. if (!r600_is_safe_reg(p, reg, idx+3))
  1875. return -EINVAL;
  1876. }
  1877. break;
  1878. case PACKET3_NOP:
  1879. break;
  1880. default:
  1881. DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
  1882. return -EINVAL;
  1883. }
  1884. return 0;
  1885. }
  1886. int r600_cs_parse(struct radeon_cs_parser *p)
  1887. {
  1888. struct radeon_cs_packet pkt;
  1889. struct r600_cs_track *track;
  1890. int r;
  1891. if (p->track == NULL) {
  1892. /* initialize tracker, we are in kms */
  1893. track = kzalloc(sizeof(*track), GFP_KERNEL);
  1894. if (track == NULL)
  1895. return -ENOMEM;
  1896. r600_cs_track_init(track);
  1897. if (p->rdev->family < CHIP_RV770) {
  1898. track->npipes = p->rdev->config.r600.tiling_npipes;
  1899. track->nbanks = p->rdev->config.r600.tiling_nbanks;
  1900. track->group_size = p->rdev->config.r600.tiling_group_size;
  1901. } else if (p->rdev->family <= CHIP_RV740) {
  1902. track->npipes = p->rdev->config.rv770.tiling_npipes;
  1903. track->nbanks = p->rdev->config.rv770.tiling_nbanks;
  1904. track->group_size = p->rdev->config.rv770.tiling_group_size;
  1905. }
  1906. p->track = track;
  1907. }
  1908. do {
  1909. r = r600_cs_packet_parse(p, &pkt, p->idx);
  1910. if (r) {
  1911. kfree(p->track);
  1912. p->track = NULL;
  1913. return r;
  1914. }
  1915. p->idx += pkt.count + 2;
  1916. switch (pkt.type) {
  1917. case PACKET_TYPE0:
  1918. r = r600_cs_parse_packet0(p, &pkt);
  1919. break;
  1920. case PACKET_TYPE2:
  1921. break;
  1922. case PACKET_TYPE3:
  1923. r = r600_packet3_check(p, &pkt);
  1924. break;
  1925. default:
  1926. DRM_ERROR("Unknown packet type %d !\n", pkt.type);
  1927. kfree(p->track);
  1928. p->track = NULL;
  1929. return -EINVAL;
  1930. }
  1931. if (r) {
  1932. kfree(p->track);
  1933. p->track = NULL;
  1934. return r;
  1935. }
  1936. } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
  1937. #if 0
  1938. for (r = 0; r < p->ib->length_dw; r++) {
  1939. printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
  1940. mdelay(1);
  1941. }
  1942. #endif
  1943. kfree(p->track);
  1944. p->track = NULL;
  1945. return 0;
  1946. }
  1947. static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
  1948. {
  1949. if (p->chunk_relocs_idx == -1) {
  1950. return 0;
  1951. }
  1952. p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
  1953. if (p->relocs == NULL) {
  1954. return -ENOMEM;
  1955. }
  1956. return 0;
  1957. }
  1958. /**
  1959. * cs_parser_fini() - clean parser states
  1960. * @parser: parser structure holding parsing context.
  1961. * @error: error number
  1962. *
  1963. * If error is set than unvalidate buffer, otherwise just free memory
  1964. * used by parsing context.
  1965. **/
  1966. static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
  1967. {
  1968. unsigned i;
  1969. kfree(parser->relocs);
  1970. for (i = 0; i < parser->nchunks; i++) {
  1971. kfree(parser->chunks[i].kdata);
  1972. kfree(parser->chunks[i].kpage[0]);
  1973. kfree(parser->chunks[i].kpage[1]);
  1974. }
  1975. kfree(parser->chunks);
  1976. kfree(parser->chunks_array);
  1977. }
  1978. int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
  1979. unsigned family, u32 *ib, int *l)
  1980. {
  1981. struct radeon_cs_parser parser;
  1982. struct radeon_cs_chunk *ib_chunk;
  1983. struct radeon_ib fake_ib;
  1984. struct r600_cs_track *track;
  1985. int r;
  1986. /* initialize tracker */
  1987. track = kzalloc(sizeof(*track), GFP_KERNEL);
  1988. if (track == NULL)
  1989. return -ENOMEM;
  1990. r600_cs_track_init(track);
  1991. r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
  1992. /* initialize parser */
  1993. memset(&parser, 0, sizeof(struct radeon_cs_parser));
  1994. parser.filp = filp;
  1995. parser.dev = &dev->pdev->dev;
  1996. parser.rdev = NULL;
  1997. parser.family = family;
  1998. parser.ib = &fake_ib;
  1999. parser.track = track;
  2000. fake_ib.ptr = ib;
  2001. r = radeon_cs_parser_init(&parser, data);
  2002. if (r) {
  2003. DRM_ERROR("Failed to initialize parser !\n");
  2004. r600_cs_parser_fini(&parser, r);
  2005. return r;
  2006. }
  2007. r = r600_cs_parser_relocs_legacy(&parser);
  2008. if (r) {
  2009. DRM_ERROR("Failed to parse relocation !\n");
  2010. r600_cs_parser_fini(&parser, r);
  2011. return r;
  2012. }
  2013. /* Copy the packet into the IB, the parser will read from the
  2014. * input memory (cached) and write to the IB (which can be
  2015. * uncached). */
  2016. ib_chunk = &parser.chunks[parser.chunk_ib_idx];
  2017. parser.ib->length_dw = ib_chunk->length_dw;
  2018. *l = parser.ib->length_dw;
  2019. r = r600_cs_parse(&parser);
  2020. if (r) {
  2021. DRM_ERROR("Invalid command stream !\n");
  2022. r600_cs_parser_fini(&parser, r);
  2023. return r;
  2024. }
  2025. r = radeon_cs_finish_pages(&parser);
  2026. if (r) {
  2027. DRM_ERROR("Invalid command stream !\n");
  2028. r600_cs_parser_fini(&parser, r);
  2029. return r;
  2030. }
  2031. r600_cs_parser_fini(&parser, r);
  2032. return r;
  2033. }
  2034. void r600_cs_legacy_init(void)
  2035. {
  2036. r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
  2037. }