r600_cs.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/kernel.h>
  29. #include "drmP.h"
  30. #include "radeon.h"
  31. #include "r600d.h"
  32. #include "r600_reg_safe.h"
  33. static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
  34. struct radeon_cs_reloc **cs_reloc);
  35. static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
  36. struct radeon_cs_reloc **cs_reloc);
  37. typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
  38. static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
  39. extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
  40. struct r600_cs_track {
  41. /* configuration we miror so that we use same code btw kms/ums */
  42. u32 group_size;
  43. u32 nbanks;
  44. u32 npipes;
  45. /* value we track */
  46. u32 sq_config;
  47. u32 log_nsamples;
  48. u32 nsamples;
  49. u32 cb_color_base_last[8];
  50. struct radeon_bo *cb_color_bo[8];
  51. u64 cb_color_bo_mc[8];
  52. u64 cb_color_bo_offset[8];
  53. struct radeon_bo *cb_color_frag_bo[8];
  54. u64 cb_color_frag_offset[8];
  55. struct radeon_bo *cb_color_tile_bo[8];
  56. u64 cb_color_tile_offset[8];
  57. u32 cb_color_mask[8];
  58. u32 cb_color_info[8];
  59. u32 cb_color_view[8];
  60. u32 cb_color_size_idx[8]; /* unused */
  61. u32 cb_target_mask;
  62. u32 cb_shader_mask; /* unused */
  63. u32 cb_color_size[8];
  64. u32 vgt_strmout_en;
  65. u32 vgt_strmout_buffer_en;
  66. struct radeon_bo *vgt_strmout_bo[4];
  67. u64 vgt_strmout_bo_mc[4]; /* unused */
  68. u32 vgt_strmout_bo_offset[4];
  69. u32 vgt_strmout_size[4];
  70. u32 db_depth_control;
  71. u32 db_depth_info;
  72. u32 db_depth_size_idx;
  73. u32 db_depth_view;
  74. u32 db_depth_size;
  75. u32 db_offset;
  76. struct radeon_bo *db_bo;
  77. u64 db_bo_mc;
  78. bool sx_misc_kill_all_prims;
  79. bool cb_dirty;
  80. bool db_dirty;
  81. bool streamout_dirty;
  82. struct radeon_bo *htile_bo;
  83. u64 htile_offset;
  84. u32 htile_surface;
  85. };
  86. #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
  87. #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
  88. #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
  89. #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
  90. #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
  91. #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
  92. #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
  93. #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
  94. struct gpu_formats {
  95. unsigned blockwidth;
  96. unsigned blockheight;
  97. unsigned blocksize;
  98. unsigned valid_color;
  99. enum radeon_family min_family;
  100. };
  101. static const struct gpu_formats color_formats_table[] = {
  102. /* 8 bit */
  103. FMT_8_BIT(V_038004_COLOR_8, 1),
  104. FMT_8_BIT(V_038004_COLOR_4_4, 1),
  105. FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
  106. FMT_8_BIT(V_038004_FMT_1, 0),
  107. /* 16-bit */
  108. FMT_16_BIT(V_038004_COLOR_16, 1),
  109. FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
  110. FMT_16_BIT(V_038004_COLOR_8_8, 1),
  111. FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
  112. FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
  113. FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
  114. FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
  115. FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
  116. /* 24-bit */
  117. FMT_24_BIT(V_038004_FMT_8_8_8),
  118. /* 32-bit */
  119. FMT_32_BIT(V_038004_COLOR_32, 1),
  120. FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
  121. FMT_32_BIT(V_038004_COLOR_16_16, 1),
  122. FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
  123. FMT_32_BIT(V_038004_COLOR_8_24, 1),
  124. FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
  125. FMT_32_BIT(V_038004_COLOR_24_8, 1),
  126. FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
  127. FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
  128. FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
  129. FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
  130. FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
  131. FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
  132. FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
  133. FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
  134. FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
  135. FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
  136. FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
  137. /* 48-bit */
  138. FMT_48_BIT(V_038004_FMT_16_16_16),
  139. FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
  140. /* 64-bit */
  141. FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
  142. FMT_64_BIT(V_038004_COLOR_32_32, 1),
  143. FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
  144. FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
  145. FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
  146. FMT_96_BIT(V_038004_FMT_32_32_32),
  147. FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
  148. /* 128-bit */
  149. FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
  150. FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
  151. [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
  152. [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
  153. /* block compressed formats */
  154. [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
  155. [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
  156. [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
  157. [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
  158. [V_038004_FMT_BC5] = { 4, 4, 16, 0},
  159. [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
  160. [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
  161. /* The other Evergreen formats */
  162. [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
  163. };
  164. bool r600_fmt_is_valid_color(u32 format)
  165. {
  166. if (format >= ARRAY_SIZE(color_formats_table))
  167. return false;
  168. if (color_formats_table[format].valid_color)
  169. return true;
  170. return false;
  171. }
  172. bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
  173. {
  174. if (format >= ARRAY_SIZE(color_formats_table))
  175. return false;
  176. if (family < color_formats_table[format].min_family)
  177. return false;
  178. if (color_formats_table[format].blockwidth > 0)
  179. return true;
  180. return false;
  181. }
  182. int r600_fmt_get_blocksize(u32 format)
  183. {
  184. if (format >= ARRAY_SIZE(color_formats_table))
  185. return 0;
  186. return color_formats_table[format].blocksize;
  187. }
  188. int r600_fmt_get_nblocksx(u32 format, u32 w)
  189. {
  190. unsigned bw;
  191. if (format >= ARRAY_SIZE(color_formats_table))
  192. return 0;
  193. bw = color_formats_table[format].blockwidth;
  194. if (bw == 0)
  195. return 0;
  196. return (w + bw - 1) / bw;
  197. }
  198. int r600_fmt_get_nblocksy(u32 format, u32 h)
  199. {
  200. unsigned bh;
  201. if (format >= ARRAY_SIZE(color_formats_table))
  202. return 0;
  203. bh = color_formats_table[format].blockheight;
  204. if (bh == 0)
  205. return 0;
  206. return (h + bh - 1) / bh;
  207. }
  208. struct array_mode_checker {
  209. int array_mode;
  210. u32 group_size;
  211. u32 nbanks;
  212. u32 npipes;
  213. u32 nsamples;
  214. u32 blocksize;
  215. };
  216. /* returns alignment in pixels for pitch/height/depth and bytes for base */
  217. static int r600_get_array_mode_alignment(struct array_mode_checker *values,
  218. u32 *pitch_align,
  219. u32 *height_align,
  220. u32 *depth_align,
  221. u64 *base_align)
  222. {
  223. u32 tile_width = 8;
  224. u32 tile_height = 8;
  225. u32 macro_tile_width = values->nbanks;
  226. u32 macro_tile_height = values->npipes;
  227. u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
  228. u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
  229. switch (values->array_mode) {
  230. case ARRAY_LINEAR_GENERAL:
  231. /* technically tile_width/_height for pitch/height */
  232. *pitch_align = 1; /* tile_width */
  233. *height_align = 1; /* tile_height */
  234. *depth_align = 1;
  235. *base_align = 1;
  236. break;
  237. case ARRAY_LINEAR_ALIGNED:
  238. *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
  239. *height_align = 1;
  240. *depth_align = 1;
  241. *base_align = values->group_size;
  242. break;
  243. case ARRAY_1D_TILED_THIN1:
  244. *pitch_align = max((u32)tile_width,
  245. (u32)(values->group_size /
  246. (tile_height * values->blocksize * values->nsamples)));
  247. *height_align = tile_height;
  248. *depth_align = 1;
  249. *base_align = values->group_size;
  250. break;
  251. case ARRAY_2D_TILED_THIN1:
  252. *pitch_align = max((u32)macro_tile_width * tile_width,
  253. (u32)((values->group_size * values->nbanks) /
  254. (values->blocksize * values->nsamples * tile_width)));
  255. *height_align = macro_tile_height * tile_height;
  256. *depth_align = 1;
  257. *base_align = max(macro_tile_bytes,
  258. (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
  259. break;
  260. default:
  261. return -EINVAL;
  262. }
  263. return 0;
  264. }
  265. static void r600_cs_track_init(struct r600_cs_track *track)
  266. {
  267. int i;
  268. /* assume DX9 mode */
  269. track->sq_config = DX9_CONSTS;
  270. for (i = 0; i < 8; i++) {
  271. track->cb_color_base_last[i] = 0;
  272. track->cb_color_size[i] = 0;
  273. track->cb_color_size_idx[i] = 0;
  274. track->cb_color_info[i] = 0;
  275. track->cb_color_view[i] = 0xFFFFFFFF;
  276. track->cb_color_bo[i] = NULL;
  277. track->cb_color_bo_offset[i] = 0xFFFFFFFF;
  278. track->cb_color_bo_mc[i] = 0xFFFFFFFF;
  279. }
  280. track->cb_target_mask = 0xFFFFFFFF;
  281. track->cb_shader_mask = 0xFFFFFFFF;
  282. track->cb_dirty = true;
  283. track->db_bo = NULL;
  284. track->db_bo_mc = 0xFFFFFFFF;
  285. /* assume the biggest format and that htile is enabled */
  286. track->db_depth_info = 7 | (1 << 25);
  287. track->db_depth_view = 0xFFFFC000;
  288. track->db_depth_size = 0xFFFFFFFF;
  289. track->db_depth_size_idx = 0;
  290. track->db_depth_control = 0xFFFFFFFF;
  291. track->db_dirty = true;
  292. track->htile_bo = NULL;
  293. track->htile_offset = 0xFFFFFFFF;
  294. track->htile_surface = 0;
  295. for (i = 0; i < 4; i++) {
  296. track->vgt_strmout_size[i] = 0;
  297. track->vgt_strmout_bo[i] = NULL;
  298. track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
  299. track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
  300. }
  301. track->streamout_dirty = true;
  302. track->sx_misc_kill_all_prims = false;
  303. }
  304. static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
  305. {
  306. struct r600_cs_track *track = p->track;
  307. u32 slice_tile_max, size, tmp;
  308. u32 height, height_align, pitch, pitch_align, depth_align;
  309. u64 base_offset, base_align;
  310. struct array_mode_checker array_check;
  311. volatile u32 *ib = p->ib.ptr;
  312. unsigned array_mode;
  313. u32 format;
  314. size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
  315. format = G_0280A0_FORMAT(track->cb_color_info[i]);
  316. if (!r600_fmt_is_valid_color(format)) {
  317. dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
  318. __func__, __LINE__, format,
  319. i, track->cb_color_info[i]);
  320. return -EINVAL;
  321. }
  322. /* pitch in pixels */
  323. pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
  324. slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
  325. slice_tile_max *= 64;
  326. height = slice_tile_max / pitch;
  327. if (height > 8192)
  328. height = 8192;
  329. array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
  330. base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
  331. array_check.array_mode = array_mode;
  332. array_check.group_size = track->group_size;
  333. array_check.nbanks = track->nbanks;
  334. array_check.npipes = track->npipes;
  335. array_check.nsamples = track->nsamples;
  336. array_check.blocksize = r600_fmt_get_blocksize(format);
  337. if (r600_get_array_mode_alignment(&array_check,
  338. &pitch_align, &height_align, &depth_align, &base_align)) {
  339. dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
  340. G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
  341. track->cb_color_info[i]);
  342. return -EINVAL;
  343. }
  344. switch (array_mode) {
  345. case V_0280A0_ARRAY_LINEAR_GENERAL:
  346. break;
  347. case V_0280A0_ARRAY_LINEAR_ALIGNED:
  348. break;
  349. case V_0280A0_ARRAY_1D_TILED_THIN1:
  350. /* avoid breaking userspace */
  351. if (height > 7)
  352. height &= ~0x7;
  353. break;
  354. case V_0280A0_ARRAY_2D_TILED_THIN1:
  355. break;
  356. default:
  357. dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
  358. G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
  359. track->cb_color_info[i]);
  360. return -EINVAL;
  361. }
  362. if (!IS_ALIGNED(pitch, pitch_align)) {
  363. dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
  364. __func__, __LINE__, pitch, pitch_align, array_mode);
  365. return -EINVAL;
  366. }
  367. if (!IS_ALIGNED(height, height_align)) {
  368. dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
  369. __func__, __LINE__, height, height_align, array_mode);
  370. return -EINVAL;
  371. }
  372. if (!IS_ALIGNED(base_offset, base_align)) {
  373. dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
  374. base_offset, base_align, array_mode);
  375. return -EINVAL;
  376. }
  377. /* check offset */
  378. tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
  379. r600_fmt_get_blocksize(format) * track->nsamples;
  380. switch (array_mode) {
  381. default:
  382. case V_0280A0_ARRAY_LINEAR_GENERAL:
  383. case V_0280A0_ARRAY_LINEAR_ALIGNED:
  384. tmp += track->cb_color_view[i] & 0xFF;
  385. break;
  386. case V_0280A0_ARRAY_1D_TILED_THIN1:
  387. case V_0280A0_ARRAY_2D_TILED_THIN1:
  388. tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
  389. break;
  390. }
  391. if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
  392. if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
  393. /* the initial DDX does bad things with the CB size occasionally */
  394. /* it rounds up height too far for slice tile max but the BO is smaller */
  395. /* r600c,g also seem to flush at bad times in some apps resulting in
  396. * bogus values here. So for linear just allow anything to avoid breaking
  397. * broken userspace.
  398. */
  399. } else {
  400. dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
  401. __func__, i, array_mode,
  402. track->cb_color_bo_offset[i], tmp,
  403. radeon_bo_size(track->cb_color_bo[i]),
  404. pitch, height, r600_fmt_get_nblocksx(format, pitch),
  405. r600_fmt_get_nblocksy(format, height),
  406. r600_fmt_get_blocksize(format));
  407. return -EINVAL;
  408. }
  409. }
  410. /* limit max tile */
  411. tmp = (height * pitch) >> 6;
  412. if (tmp < slice_tile_max)
  413. slice_tile_max = tmp;
  414. tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
  415. S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
  416. ib[track->cb_color_size_idx[i]] = tmp;
  417. /* FMASK/CMASK */
  418. switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
  419. case V_0280A0_TILE_DISABLE:
  420. break;
  421. case V_0280A0_FRAG_ENABLE:
  422. if (track->nsamples > 1) {
  423. uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
  424. /* the tile size is 8x8, but the size is in units of bits.
  425. * for bytes, do just * 8. */
  426. uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
  427. if (bytes + track->cb_color_frag_offset[i] >
  428. radeon_bo_size(track->cb_color_frag_bo[i])) {
  429. dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
  430. "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
  431. __func__, tile_max, bytes,
  432. track->cb_color_frag_offset[i],
  433. radeon_bo_size(track->cb_color_frag_bo[i]));
  434. return -EINVAL;
  435. }
  436. }
  437. /* fall through */
  438. case V_0280A0_CLEAR_ENABLE:
  439. {
  440. uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
  441. /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
  442. * (128*128) / (8*8) / 2 = 128 bytes per block. */
  443. uint32_t bytes = (block_max + 1) * 128;
  444. if (bytes + track->cb_color_tile_offset[i] >
  445. radeon_bo_size(track->cb_color_tile_bo[i])) {
  446. dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
  447. "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
  448. __func__, block_max, bytes,
  449. track->cb_color_tile_offset[i],
  450. radeon_bo_size(track->cb_color_tile_bo[i]));
  451. return -EINVAL;
  452. }
  453. break;
  454. }
  455. default:
  456. dev_warn(p->dev, "%s invalid tile mode\n", __func__);
  457. return -EINVAL;
  458. }
  459. return 0;
  460. }
  461. static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
  462. {
  463. struct r600_cs_track *track = p->track;
  464. u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
  465. u32 height_align, pitch_align, depth_align;
  466. u32 pitch = 8192;
  467. u32 height = 8192;
  468. u64 base_offset, base_align;
  469. struct array_mode_checker array_check;
  470. int array_mode;
  471. volatile u32 *ib = p->ib.ptr;
  472. if (track->db_bo == NULL) {
  473. dev_warn(p->dev, "z/stencil with no depth buffer\n");
  474. return -EINVAL;
  475. }
  476. switch (G_028010_FORMAT(track->db_depth_info)) {
  477. case V_028010_DEPTH_16:
  478. bpe = 2;
  479. break;
  480. case V_028010_DEPTH_X8_24:
  481. case V_028010_DEPTH_8_24:
  482. case V_028010_DEPTH_X8_24_FLOAT:
  483. case V_028010_DEPTH_8_24_FLOAT:
  484. case V_028010_DEPTH_32_FLOAT:
  485. bpe = 4;
  486. break;
  487. case V_028010_DEPTH_X24_8_32_FLOAT:
  488. bpe = 8;
  489. break;
  490. default:
  491. dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
  492. return -EINVAL;
  493. }
  494. if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
  495. if (!track->db_depth_size_idx) {
  496. dev_warn(p->dev, "z/stencil buffer size not set\n");
  497. return -EINVAL;
  498. }
  499. tmp = radeon_bo_size(track->db_bo) - track->db_offset;
  500. tmp = (tmp / bpe) >> 6;
  501. if (!tmp) {
  502. dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
  503. track->db_depth_size, bpe, track->db_offset,
  504. radeon_bo_size(track->db_bo));
  505. return -EINVAL;
  506. }
  507. ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
  508. } else {
  509. size = radeon_bo_size(track->db_bo);
  510. /* pitch in pixels */
  511. pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
  512. slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
  513. slice_tile_max *= 64;
  514. height = slice_tile_max / pitch;
  515. if (height > 8192)
  516. height = 8192;
  517. base_offset = track->db_bo_mc + track->db_offset;
  518. array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
  519. array_check.array_mode = array_mode;
  520. array_check.group_size = track->group_size;
  521. array_check.nbanks = track->nbanks;
  522. array_check.npipes = track->npipes;
  523. array_check.nsamples = track->nsamples;
  524. array_check.blocksize = bpe;
  525. if (r600_get_array_mode_alignment(&array_check,
  526. &pitch_align, &height_align, &depth_align, &base_align)) {
  527. dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
  528. G_028010_ARRAY_MODE(track->db_depth_info),
  529. track->db_depth_info);
  530. return -EINVAL;
  531. }
  532. switch (array_mode) {
  533. case V_028010_ARRAY_1D_TILED_THIN1:
  534. /* don't break userspace */
  535. height &= ~0x7;
  536. break;
  537. case V_028010_ARRAY_2D_TILED_THIN1:
  538. break;
  539. default:
  540. dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
  541. G_028010_ARRAY_MODE(track->db_depth_info),
  542. track->db_depth_info);
  543. return -EINVAL;
  544. }
  545. if (!IS_ALIGNED(pitch, pitch_align)) {
  546. dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
  547. __func__, __LINE__, pitch, pitch_align, array_mode);
  548. return -EINVAL;
  549. }
  550. if (!IS_ALIGNED(height, height_align)) {
  551. dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
  552. __func__, __LINE__, height, height_align, array_mode);
  553. return -EINVAL;
  554. }
  555. if (!IS_ALIGNED(base_offset, base_align)) {
  556. dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
  557. base_offset, base_align, array_mode);
  558. return -EINVAL;
  559. }
  560. ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
  561. nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
  562. tmp = ntiles * bpe * 64 * nviews * track->nsamples;
  563. if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
  564. dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
  565. array_mode,
  566. track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
  567. radeon_bo_size(track->db_bo));
  568. return -EINVAL;
  569. }
  570. }
  571. /* hyperz */
  572. if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
  573. unsigned long size;
  574. unsigned nbx, nby;
  575. if (track->htile_bo == NULL) {
  576. dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
  577. __func__, __LINE__, track->db_depth_info);
  578. return -EINVAL;
  579. }
  580. if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
  581. dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
  582. __func__, __LINE__, track->db_depth_size);
  583. return -EINVAL;
  584. }
  585. nbx = pitch;
  586. nby = height;
  587. if (G_028D24_LINEAR(track->htile_surface)) {
  588. /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
  589. nbx = round_up(nbx, 16 * 8);
  590. /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
  591. nby = round_up(nby, track->npipes * 8);
  592. } else {
  593. /* htile widht & nby (8 or 4) make 2 bits number */
  594. tmp = track->htile_surface & 3;
  595. /* align is htile align * 8, htile align vary according to
  596. * number of pipe and tile width and nby
  597. */
  598. switch (track->npipes) {
  599. case 8:
  600. switch (tmp) {
  601. case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  602. nbx = round_up(nbx, 64 * 8);
  603. nby = round_up(nby, 64 * 8);
  604. break;
  605. case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
  606. case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
  607. nbx = round_up(nbx, 64 * 8);
  608. nby = round_up(nby, 32 * 8);
  609. break;
  610. case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
  611. nbx = round_up(nbx, 32 * 8);
  612. nby = round_up(nby, 32 * 8);
  613. break;
  614. default:
  615. return -EINVAL;
  616. }
  617. break;
  618. case 4:
  619. switch (tmp) {
  620. case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  621. nbx = round_up(nbx, 64 * 8);
  622. nby = round_up(nby, 32 * 8);
  623. break;
  624. case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
  625. case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
  626. nbx = round_up(nbx, 32 * 8);
  627. nby = round_up(nby, 32 * 8);
  628. break;
  629. case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
  630. nbx = round_up(nbx, 32 * 8);
  631. nby = round_up(nby, 16 * 8);
  632. break;
  633. default:
  634. return -EINVAL;
  635. }
  636. break;
  637. case 2:
  638. switch (tmp) {
  639. case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  640. nbx = round_up(nbx, 32 * 8);
  641. nby = round_up(nby, 32 * 8);
  642. break;
  643. case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
  644. case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
  645. nbx = round_up(nbx, 32 * 8);
  646. nby = round_up(nby, 16 * 8);
  647. break;
  648. case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
  649. nbx = round_up(nbx, 16 * 8);
  650. nby = round_up(nby, 16 * 8);
  651. break;
  652. default:
  653. return -EINVAL;
  654. }
  655. break;
  656. case 1:
  657. switch (tmp) {
  658. case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  659. nbx = round_up(nbx, 32 * 8);
  660. nby = round_up(nby, 16 * 8);
  661. break;
  662. case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
  663. case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
  664. nbx = round_up(nbx, 16 * 8);
  665. nby = round_up(nby, 16 * 8);
  666. break;
  667. case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
  668. nbx = round_up(nbx, 16 * 8);
  669. nby = round_up(nby, 8 * 8);
  670. break;
  671. default:
  672. return -EINVAL;
  673. }
  674. break;
  675. default:
  676. dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
  677. __func__, __LINE__, track->npipes);
  678. return -EINVAL;
  679. }
  680. }
  681. /* compute number of htile */
  682. nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
  683. nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
  684. size = nbx * nby * 4;
  685. size += track->htile_offset;
  686. if (size > radeon_bo_size(track->htile_bo)) {
  687. dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
  688. __func__, __LINE__, radeon_bo_size(track->htile_bo),
  689. size, nbx, nby);
  690. return -EINVAL;
  691. }
  692. }
  693. track->db_dirty = false;
  694. return 0;
  695. }
  696. static int r600_cs_track_check(struct radeon_cs_parser *p)
  697. {
  698. struct r600_cs_track *track = p->track;
  699. u32 tmp;
  700. int r, i;
  701. /* on legacy kernel we don't perform advanced check */
  702. if (p->rdev == NULL)
  703. return 0;
  704. /* check streamout */
  705. if (track->streamout_dirty && track->vgt_strmout_en) {
  706. for (i = 0; i < 4; i++) {
  707. if (track->vgt_strmout_buffer_en & (1 << i)) {
  708. if (track->vgt_strmout_bo[i]) {
  709. u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
  710. (u64)track->vgt_strmout_size[i];
  711. if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
  712. DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
  713. i, offset,
  714. radeon_bo_size(track->vgt_strmout_bo[i]));
  715. return -EINVAL;
  716. }
  717. } else {
  718. dev_warn(p->dev, "No buffer for streamout %d\n", i);
  719. return -EINVAL;
  720. }
  721. }
  722. }
  723. track->streamout_dirty = false;
  724. }
  725. if (track->sx_misc_kill_all_prims)
  726. return 0;
  727. /* check that we have a cb for each enabled target, we don't check
  728. * shader_mask because it seems mesa isn't always setting it :(
  729. */
  730. if (track->cb_dirty) {
  731. tmp = track->cb_target_mask;
  732. for (i = 0; i < 8; i++) {
  733. if ((tmp >> (i * 4)) & 0xF) {
  734. /* at least one component is enabled */
  735. if (track->cb_color_bo[i] == NULL) {
  736. dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
  737. __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
  738. return -EINVAL;
  739. }
  740. /* perform rewrite of CB_COLOR[0-7]_SIZE */
  741. r = r600_cs_track_validate_cb(p, i);
  742. if (r)
  743. return r;
  744. }
  745. }
  746. track->cb_dirty = false;
  747. }
  748. /* Check depth buffer */
  749. if (track->db_dirty &&
  750. G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
  751. (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
  752. G_028800_Z_ENABLE(track->db_depth_control))) {
  753. r = r600_cs_track_validate_db(p);
  754. if (r)
  755. return r;
  756. }
  757. return 0;
  758. }
  759. /**
  760. * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
  761. * @parser: parser structure holding parsing context.
  762. * @pkt: where to store packet informations
  763. *
  764. * Assume that chunk_ib_index is properly set. Will return -EINVAL
  765. * if packet is bigger than remaining ib size. or if packets is unknown.
  766. **/
  767. int r600_cs_packet_parse(struct radeon_cs_parser *p,
  768. struct radeon_cs_packet *pkt,
  769. unsigned idx)
  770. {
  771. struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
  772. uint32_t header;
  773. if (idx >= ib_chunk->length_dw) {
  774. DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
  775. idx, ib_chunk->length_dw);
  776. return -EINVAL;
  777. }
  778. header = radeon_get_ib_value(p, idx);
  779. pkt->idx = idx;
  780. pkt->type = CP_PACKET_GET_TYPE(header);
  781. pkt->count = CP_PACKET_GET_COUNT(header);
  782. pkt->one_reg_wr = 0;
  783. switch (pkt->type) {
  784. case PACKET_TYPE0:
  785. pkt->reg = CP_PACKET0_GET_REG(header);
  786. break;
  787. case PACKET_TYPE3:
  788. pkt->opcode = CP_PACKET3_GET_OPCODE(header);
  789. break;
  790. case PACKET_TYPE2:
  791. pkt->count = -1;
  792. break;
  793. default:
  794. DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
  795. return -EINVAL;
  796. }
  797. if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
  798. DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
  799. pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
  800. return -EINVAL;
  801. }
  802. return 0;
  803. }
  804. /**
  805. * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
  806. * @parser: parser structure holding parsing context.
  807. * @data: pointer to relocation data
  808. * @offset_start: starting offset
  809. * @offset_mask: offset mask (to align start offset on)
  810. * @reloc: reloc informations
  811. *
  812. * Check next packet is relocation packet3, do bo validation and compute
  813. * GPU offset using the provided start.
  814. **/
  815. static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
  816. struct radeon_cs_reloc **cs_reloc)
  817. {
  818. struct radeon_cs_chunk *relocs_chunk;
  819. struct radeon_cs_packet p3reloc;
  820. unsigned idx;
  821. int r;
  822. if (p->chunk_relocs_idx == -1) {
  823. DRM_ERROR("No relocation chunk !\n");
  824. return -EINVAL;
  825. }
  826. *cs_reloc = NULL;
  827. relocs_chunk = &p->chunks[p->chunk_relocs_idx];
  828. r = r600_cs_packet_parse(p, &p3reloc, p->idx);
  829. if (r) {
  830. return r;
  831. }
  832. p->idx += p3reloc.count + 2;
  833. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  834. DRM_ERROR("No packet3 for relocation for packet at %d.\n",
  835. p3reloc.idx);
  836. return -EINVAL;
  837. }
  838. idx = radeon_get_ib_value(p, p3reloc.idx + 1);
  839. if (idx >= relocs_chunk->length_dw) {
  840. DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
  841. idx, relocs_chunk->length_dw);
  842. return -EINVAL;
  843. }
  844. /* FIXME: we assume reloc size is 4 dwords */
  845. *cs_reloc = p->relocs_ptr[(idx / 4)];
  846. return 0;
  847. }
  848. /**
  849. * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
  850. * @parser: parser structure holding parsing context.
  851. * @data: pointer to relocation data
  852. * @offset_start: starting offset
  853. * @offset_mask: offset mask (to align start offset on)
  854. * @reloc: reloc informations
  855. *
  856. * Check next packet is relocation packet3, do bo validation and compute
  857. * GPU offset using the provided start.
  858. **/
  859. static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
  860. struct radeon_cs_reloc **cs_reloc)
  861. {
  862. struct radeon_cs_chunk *relocs_chunk;
  863. struct radeon_cs_packet p3reloc;
  864. unsigned idx;
  865. int r;
  866. if (p->chunk_relocs_idx == -1) {
  867. DRM_ERROR("No relocation chunk !\n");
  868. return -EINVAL;
  869. }
  870. *cs_reloc = NULL;
  871. relocs_chunk = &p->chunks[p->chunk_relocs_idx];
  872. r = r600_cs_packet_parse(p, &p3reloc, p->idx);
  873. if (r) {
  874. return r;
  875. }
  876. p->idx += p3reloc.count + 2;
  877. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  878. DRM_ERROR("No packet3 for relocation for packet at %d.\n",
  879. p3reloc.idx);
  880. return -EINVAL;
  881. }
  882. idx = radeon_get_ib_value(p, p3reloc.idx + 1);
  883. if (idx >= relocs_chunk->length_dw) {
  884. DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
  885. idx, relocs_chunk->length_dw);
  886. return -EINVAL;
  887. }
  888. *cs_reloc = p->relocs;
  889. (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
  890. (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
  891. return 0;
  892. }
  893. /**
  894. * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
  895. * @parser: parser structure holding parsing context.
  896. *
  897. * Check next packet is relocation packet3, do bo validation and compute
  898. * GPU offset using the provided start.
  899. **/
  900. static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
  901. {
  902. struct radeon_cs_packet p3reloc;
  903. int r;
  904. r = r600_cs_packet_parse(p, &p3reloc, p->idx);
  905. if (r) {
  906. return 0;
  907. }
  908. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  909. return 0;
  910. }
  911. return 1;
  912. }
  913. /**
  914. * r600_cs_packet_next_vline() - parse userspace VLINE packet
  915. * @parser: parser structure holding parsing context.
  916. *
  917. * Userspace sends a special sequence for VLINE waits.
  918. * PACKET0 - VLINE_START_END + value
  919. * PACKET3 - WAIT_REG_MEM poll vline status reg
  920. * RELOC (P3) - crtc_id in reloc.
  921. *
  922. * This function parses this and relocates the VLINE START END
  923. * and WAIT_REG_MEM packets to the correct crtc.
  924. * It also detects a switched off crtc and nulls out the
  925. * wait in that case.
  926. */
  927. static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
  928. {
  929. struct drm_mode_object *obj;
  930. struct drm_crtc *crtc;
  931. struct radeon_crtc *radeon_crtc;
  932. struct radeon_cs_packet p3reloc, wait_reg_mem;
  933. int crtc_id;
  934. int r;
  935. uint32_t header, h_idx, reg, wait_reg_mem_info;
  936. volatile uint32_t *ib;
  937. ib = p->ib.ptr;
  938. /* parse the WAIT_REG_MEM */
  939. r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
  940. if (r)
  941. return r;
  942. /* check its a WAIT_REG_MEM */
  943. if (wait_reg_mem.type != PACKET_TYPE3 ||
  944. wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
  945. DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
  946. return -EINVAL;
  947. }
  948. wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
  949. /* bit 4 is reg (0) or mem (1) */
  950. if (wait_reg_mem_info & 0x10) {
  951. DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
  952. return -EINVAL;
  953. }
  954. /* waiting for value to be equal */
  955. if ((wait_reg_mem_info & 0x7) != 0x3) {
  956. DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
  957. return -EINVAL;
  958. }
  959. if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
  960. DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
  961. return -EINVAL;
  962. }
  963. if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
  964. DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
  965. return -EINVAL;
  966. }
  967. /* jump over the NOP */
  968. r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
  969. if (r)
  970. return r;
  971. h_idx = p->idx - 2;
  972. p->idx += wait_reg_mem.count + 2;
  973. p->idx += p3reloc.count + 2;
  974. header = radeon_get_ib_value(p, h_idx);
  975. crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
  976. reg = CP_PACKET0_GET_REG(header);
  977. obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
  978. if (!obj) {
  979. DRM_ERROR("cannot find crtc %d\n", crtc_id);
  980. return -EINVAL;
  981. }
  982. crtc = obj_to_crtc(obj);
  983. radeon_crtc = to_radeon_crtc(crtc);
  984. crtc_id = radeon_crtc->crtc_id;
  985. if (!crtc->enabled) {
  986. /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
  987. ib[h_idx + 2] = PACKET2(0);
  988. ib[h_idx + 3] = PACKET2(0);
  989. ib[h_idx + 4] = PACKET2(0);
  990. ib[h_idx + 5] = PACKET2(0);
  991. ib[h_idx + 6] = PACKET2(0);
  992. ib[h_idx + 7] = PACKET2(0);
  993. ib[h_idx + 8] = PACKET2(0);
  994. } else if (crtc_id == 1) {
  995. switch (reg) {
  996. case AVIVO_D1MODE_VLINE_START_END:
  997. header &= ~R600_CP_PACKET0_REG_MASK;
  998. header |= AVIVO_D2MODE_VLINE_START_END >> 2;
  999. break;
  1000. default:
  1001. DRM_ERROR("unknown crtc reloc\n");
  1002. return -EINVAL;
  1003. }
  1004. ib[h_idx] = header;
  1005. ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
  1006. }
  1007. return 0;
  1008. }
  1009. static int r600_packet0_check(struct radeon_cs_parser *p,
  1010. struct radeon_cs_packet *pkt,
  1011. unsigned idx, unsigned reg)
  1012. {
  1013. int r;
  1014. switch (reg) {
  1015. case AVIVO_D1MODE_VLINE_START_END:
  1016. r = r600_cs_packet_parse_vline(p);
  1017. if (r) {
  1018. DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
  1019. idx, reg);
  1020. return r;
  1021. }
  1022. break;
  1023. default:
  1024. printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
  1025. reg, idx);
  1026. return -EINVAL;
  1027. }
  1028. return 0;
  1029. }
  1030. static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
  1031. struct radeon_cs_packet *pkt)
  1032. {
  1033. unsigned reg, i;
  1034. unsigned idx;
  1035. int r;
  1036. idx = pkt->idx + 1;
  1037. reg = pkt->reg;
  1038. for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
  1039. r = r600_packet0_check(p, pkt, idx, reg);
  1040. if (r) {
  1041. return r;
  1042. }
  1043. }
  1044. return 0;
  1045. }
  1046. /**
  1047. * r600_cs_check_reg() - check if register is authorized or not
  1048. * @parser: parser structure holding parsing context
  1049. * @reg: register we are testing
  1050. * @idx: index into the cs buffer
  1051. *
  1052. * This function will test against r600_reg_safe_bm and return 0
  1053. * if register is safe. If register is not flag as safe this function
  1054. * will test it against a list of register needind special handling.
  1055. */
  1056. static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  1057. {
  1058. struct r600_cs_track *track = (struct r600_cs_track *)p->track;
  1059. struct radeon_cs_reloc *reloc;
  1060. u32 m, i, tmp, *ib;
  1061. int r;
  1062. i = (reg >> 7);
  1063. if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
  1064. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1065. return -EINVAL;
  1066. }
  1067. m = 1 << ((reg >> 2) & 31);
  1068. if (!(r600_reg_safe_bm[i] & m))
  1069. return 0;
  1070. ib = p->ib.ptr;
  1071. switch (reg) {
  1072. /* force following reg to 0 in an attempt to disable out buffer
  1073. * which will need us to better understand how it works to perform
  1074. * security check on it (Jerome)
  1075. */
  1076. case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
  1077. case R_008C44_SQ_ESGS_RING_SIZE:
  1078. case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
  1079. case R_008C54_SQ_ESTMP_RING_SIZE:
  1080. case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
  1081. case R_008C74_SQ_FBUF_RING_SIZE:
  1082. case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
  1083. case R_008C5C_SQ_GSTMP_RING_SIZE:
  1084. case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
  1085. case R_008C4C_SQ_GSVS_RING_SIZE:
  1086. case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
  1087. case R_008C6C_SQ_PSTMP_RING_SIZE:
  1088. case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
  1089. case R_008C7C_SQ_REDUC_RING_SIZE:
  1090. case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
  1091. case R_008C64_SQ_VSTMP_RING_SIZE:
  1092. case R_0288C8_SQ_GS_VERT_ITEMSIZE:
  1093. /* get value to populate the IB don't remove */
  1094. tmp =radeon_get_ib_value(p, idx);
  1095. ib[idx] = 0;
  1096. break;
  1097. case SQ_CONFIG:
  1098. track->sq_config = radeon_get_ib_value(p, idx);
  1099. break;
  1100. case R_028800_DB_DEPTH_CONTROL:
  1101. track->db_depth_control = radeon_get_ib_value(p, idx);
  1102. track->db_dirty = true;
  1103. break;
  1104. case R_028010_DB_DEPTH_INFO:
  1105. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
  1106. r600_cs_packet_next_is_pkt3_nop(p)) {
  1107. r = r600_cs_packet_next_reloc(p, &reloc);
  1108. if (r) {
  1109. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1110. "0x%04X\n", reg);
  1111. return -EINVAL;
  1112. }
  1113. track->db_depth_info = radeon_get_ib_value(p, idx);
  1114. ib[idx] &= C_028010_ARRAY_MODE;
  1115. track->db_depth_info &= C_028010_ARRAY_MODE;
  1116. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  1117. ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
  1118. track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
  1119. } else {
  1120. ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
  1121. track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
  1122. }
  1123. } else {
  1124. track->db_depth_info = radeon_get_ib_value(p, idx);
  1125. }
  1126. track->db_dirty = true;
  1127. break;
  1128. case R_028004_DB_DEPTH_VIEW:
  1129. track->db_depth_view = radeon_get_ib_value(p, idx);
  1130. track->db_dirty = true;
  1131. break;
  1132. case R_028000_DB_DEPTH_SIZE:
  1133. track->db_depth_size = radeon_get_ib_value(p, idx);
  1134. track->db_depth_size_idx = idx;
  1135. track->db_dirty = true;
  1136. break;
  1137. case R_028AB0_VGT_STRMOUT_EN:
  1138. track->vgt_strmout_en = radeon_get_ib_value(p, idx);
  1139. track->streamout_dirty = true;
  1140. break;
  1141. case R_028B20_VGT_STRMOUT_BUFFER_EN:
  1142. track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
  1143. track->streamout_dirty = true;
  1144. break;
  1145. case VGT_STRMOUT_BUFFER_BASE_0:
  1146. case VGT_STRMOUT_BUFFER_BASE_1:
  1147. case VGT_STRMOUT_BUFFER_BASE_2:
  1148. case VGT_STRMOUT_BUFFER_BASE_3:
  1149. r = r600_cs_packet_next_reloc(p, &reloc);
  1150. if (r) {
  1151. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1152. "0x%04X\n", reg);
  1153. return -EINVAL;
  1154. }
  1155. tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
  1156. track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
  1157. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1158. track->vgt_strmout_bo[tmp] = reloc->robj;
  1159. track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
  1160. track->streamout_dirty = true;
  1161. break;
  1162. case VGT_STRMOUT_BUFFER_SIZE_0:
  1163. case VGT_STRMOUT_BUFFER_SIZE_1:
  1164. case VGT_STRMOUT_BUFFER_SIZE_2:
  1165. case VGT_STRMOUT_BUFFER_SIZE_3:
  1166. tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
  1167. /* size in register is DWs, convert to bytes */
  1168. track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
  1169. track->streamout_dirty = true;
  1170. break;
  1171. case CP_COHER_BASE:
  1172. r = r600_cs_packet_next_reloc(p, &reloc);
  1173. if (r) {
  1174. dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
  1175. "0x%04X\n", reg);
  1176. return -EINVAL;
  1177. }
  1178. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1179. break;
  1180. case R_028238_CB_TARGET_MASK:
  1181. track->cb_target_mask = radeon_get_ib_value(p, idx);
  1182. track->cb_dirty = true;
  1183. break;
  1184. case R_02823C_CB_SHADER_MASK:
  1185. track->cb_shader_mask = radeon_get_ib_value(p, idx);
  1186. break;
  1187. case R_028C04_PA_SC_AA_CONFIG:
  1188. tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
  1189. track->log_nsamples = tmp;
  1190. track->nsamples = 1 << tmp;
  1191. track->cb_dirty = true;
  1192. break;
  1193. case R_0280A0_CB_COLOR0_INFO:
  1194. case R_0280A4_CB_COLOR1_INFO:
  1195. case R_0280A8_CB_COLOR2_INFO:
  1196. case R_0280AC_CB_COLOR3_INFO:
  1197. case R_0280B0_CB_COLOR4_INFO:
  1198. case R_0280B4_CB_COLOR5_INFO:
  1199. case R_0280B8_CB_COLOR6_INFO:
  1200. case R_0280BC_CB_COLOR7_INFO:
  1201. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
  1202. r600_cs_packet_next_is_pkt3_nop(p)) {
  1203. r = r600_cs_packet_next_reloc(p, &reloc);
  1204. if (r) {
  1205. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1206. return -EINVAL;
  1207. }
  1208. tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
  1209. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1210. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  1211. ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
  1212. track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
  1213. } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
  1214. ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
  1215. track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
  1216. }
  1217. } else {
  1218. tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
  1219. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1220. }
  1221. track->cb_dirty = true;
  1222. break;
  1223. case R_028080_CB_COLOR0_VIEW:
  1224. case R_028084_CB_COLOR1_VIEW:
  1225. case R_028088_CB_COLOR2_VIEW:
  1226. case R_02808C_CB_COLOR3_VIEW:
  1227. case R_028090_CB_COLOR4_VIEW:
  1228. case R_028094_CB_COLOR5_VIEW:
  1229. case R_028098_CB_COLOR6_VIEW:
  1230. case R_02809C_CB_COLOR7_VIEW:
  1231. tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
  1232. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  1233. track->cb_dirty = true;
  1234. break;
  1235. case R_028060_CB_COLOR0_SIZE:
  1236. case R_028064_CB_COLOR1_SIZE:
  1237. case R_028068_CB_COLOR2_SIZE:
  1238. case R_02806C_CB_COLOR3_SIZE:
  1239. case R_028070_CB_COLOR4_SIZE:
  1240. case R_028074_CB_COLOR5_SIZE:
  1241. case R_028078_CB_COLOR6_SIZE:
  1242. case R_02807C_CB_COLOR7_SIZE:
  1243. tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
  1244. track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
  1245. track->cb_color_size_idx[tmp] = idx;
  1246. track->cb_dirty = true;
  1247. break;
  1248. /* This register were added late, there is userspace
  1249. * which does provide relocation for those but set
  1250. * 0 offset. In order to avoid breaking old userspace
  1251. * we detect this and set address to point to last
  1252. * CB_COLOR0_BASE, note that if userspace doesn't set
  1253. * CB_COLOR0_BASE before this register we will report
  1254. * error. Old userspace always set CB_COLOR0_BASE
  1255. * before any of this.
  1256. */
  1257. case R_0280E0_CB_COLOR0_FRAG:
  1258. case R_0280E4_CB_COLOR1_FRAG:
  1259. case R_0280E8_CB_COLOR2_FRAG:
  1260. case R_0280EC_CB_COLOR3_FRAG:
  1261. case R_0280F0_CB_COLOR4_FRAG:
  1262. case R_0280F4_CB_COLOR5_FRAG:
  1263. case R_0280F8_CB_COLOR6_FRAG:
  1264. case R_0280FC_CB_COLOR7_FRAG:
  1265. tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
  1266. if (!r600_cs_packet_next_is_pkt3_nop(p)) {
  1267. if (!track->cb_color_base_last[tmp]) {
  1268. dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
  1269. return -EINVAL;
  1270. }
  1271. track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
  1272. track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
  1273. ib[idx] = track->cb_color_base_last[tmp];
  1274. } else {
  1275. r = r600_cs_packet_next_reloc(p, &reloc);
  1276. if (r) {
  1277. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1278. return -EINVAL;
  1279. }
  1280. track->cb_color_frag_bo[tmp] = reloc->robj;
  1281. track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
  1282. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1283. }
  1284. if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
  1285. track->cb_dirty = true;
  1286. }
  1287. break;
  1288. case R_0280C0_CB_COLOR0_TILE:
  1289. case R_0280C4_CB_COLOR1_TILE:
  1290. case R_0280C8_CB_COLOR2_TILE:
  1291. case R_0280CC_CB_COLOR3_TILE:
  1292. case R_0280D0_CB_COLOR4_TILE:
  1293. case R_0280D4_CB_COLOR5_TILE:
  1294. case R_0280D8_CB_COLOR6_TILE:
  1295. case R_0280DC_CB_COLOR7_TILE:
  1296. tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
  1297. if (!r600_cs_packet_next_is_pkt3_nop(p)) {
  1298. if (!track->cb_color_base_last[tmp]) {
  1299. dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
  1300. return -EINVAL;
  1301. }
  1302. track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
  1303. track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
  1304. ib[idx] = track->cb_color_base_last[tmp];
  1305. } else {
  1306. r = r600_cs_packet_next_reloc(p, &reloc);
  1307. if (r) {
  1308. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1309. return -EINVAL;
  1310. }
  1311. track->cb_color_tile_bo[tmp] = reloc->robj;
  1312. track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
  1313. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1314. }
  1315. if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
  1316. track->cb_dirty = true;
  1317. }
  1318. break;
  1319. case R_028100_CB_COLOR0_MASK:
  1320. case R_028104_CB_COLOR1_MASK:
  1321. case R_028108_CB_COLOR2_MASK:
  1322. case R_02810C_CB_COLOR3_MASK:
  1323. case R_028110_CB_COLOR4_MASK:
  1324. case R_028114_CB_COLOR5_MASK:
  1325. case R_028118_CB_COLOR6_MASK:
  1326. case R_02811C_CB_COLOR7_MASK:
  1327. tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
  1328. track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
  1329. if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
  1330. track->cb_dirty = true;
  1331. }
  1332. break;
  1333. case CB_COLOR0_BASE:
  1334. case CB_COLOR1_BASE:
  1335. case CB_COLOR2_BASE:
  1336. case CB_COLOR3_BASE:
  1337. case CB_COLOR4_BASE:
  1338. case CB_COLOR5_BASE:
  1339. case CB_COLOR6_BASE:
  1340. case CB_COLOR7_BASE:
  1341. r = r600_cs_packet_next_reloc(p, &reloc);
  1342. if (r) {
  1343. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1344. "0x%04X\n", reg);
  1345. return -EINVAL;
  1346. }
  1347. tmp = (reg - CB_COLOR0_BASE) / 4;
  1348. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
  1349. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1350. track->cb_color_base_last[tmp] = ib[idx];
  1351. track->cb_color_bo[tmp] = reloc->robj;
  1352. track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
  1353. track->cb_dirty = true;
  1354. break;
  1355. case DB_DEPTH_BASE:
  1356. r = r600_cs_packet_next_reloc(p, &reloc);
  1357. if (r) {
  1358. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1359. "0x%04X\n", reg);
  1360. return -EINVAL;
  1361. }
  1362. track->db_offset = radeon_get_ib_value(p, idx) << 8;
  1363. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1364. track->db_bo = reloc->robj;
  1365. track->db_bo_mc = reloc->lobj.gpu_offset;
  1366. track->db_dirty = true;
  1367. break;
  1368. case DB_HTILE_DATA_BASE:
  1369. r = r600_cs_packet_next_reloc(p, &reloc);
  1370. if (r) {
  1371. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1372. "0x%04X\n", reg);
  1373. return -EINVAL;
  1374. }
  1375. track->htile_offset = radeon_get_ib_value(p, idx) << 8;
  1376. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1377. track->htile_bo = reloc->robj;
  1378. track->db_dirty = true;
  1379. break;
  1380. case DB_HTILE_SURFACE:
  1381. track->htile_surface = radeon_get_ib_value(p, idx);
  1382. track->db_dirty = true;
  1383. break;
  1384. case SQ_PGM_START_FS:
  1385. case SQ_PGM_START_ES:
  1386. case SQ_PGM_START_VS:
  1387. case SQ_PGM_START_GS:
  1388. case SQ_PGM_START_PS:
  1389. case SQ_ALU_CONST_CACHE_GS_0:
  1390. case SQ_ALU_CONST_CACHE_GS_1:
  1391. case SQ_ALU_CONST_CACHE_GS_2:
  1392. case SQ_ALU_CONST_CACHE_GS_3:
  1393. case SQ_ALU_CONST_CACHE_GS_4:
  1394. case SQ_ALU_CONST_CACHE_GS_5:
  1395. case SQ_ALU_CONST_CACHE_GS_6:
  1396. case SQ_ALU_CONST_CACHE_GS_7:
  1397. case SQ_ALU_CONST_CACHE_GS_8:
  1398. case SQ_ALU_CONST_CACHE_GS_9:
  1399. case SQ_ALU_CONST_CACHE_GS_10:
  1400. case SQ_ALU_CONST_CACHE_GS_11:
  1401. case SQ_ALU_CONST_CACHE_GS_12:
  1402. case SQ_ALU_CONST_CACHE_GS_13:
  1403. case SQ_ALU_CONST_CACHE_GS_14:
  1404. case SQ_ALU_CONST_CACHE_GS_15:
  1405. case SQ_ALU_CONST_CACHE_PS_0:
  1406. case SQ_ALU_CONST_CACHE_PS_1:
  1407. case SQ_ALU_CONST_CACHE_PS_2:
  1408. case SQ_ALU_CONST_CACHE_PS_3:
  1409. case SQ_ALU_CONST_CACHE_PS_4:
  1410. case SQ_ALU_CONST_CACHE_PS_5:
  1411. case SQ_ALU_CONST_CACHE_PS_6:
  1412. case SQ_ALU_CONST_CACHE_PS_7:
  1413. case SQ_ALU_CONST_CACHE_PS_8:
  1414. case SQ_ALU_CONST_CACHE_PS_9:
  1415. case SQ_ALU_CONST_CACHE_PS_10:
  1416. case SQ_ALU_CONST_CACHE_PS_11:
  1417. case SQ_ALU_CONST_CACHE_PS_12:
  1418. case SQ_ALU_CONST_CACHE_PS_13:
  1419. case SQ_ALU_CONST_CACHE_PS_14:
  1420. case SQ_ALU_CONST_CACHE_PS_15:
  1421. case SQ_ALU_CONST_CACHE_VS_0:
  1422. case SQ_ALU_CONST_CACHE_VS_1:
  1423. case SQ_ALU_CONST_CACHE_VS_2:
  1424. case SQ_ALU_CONST_CACHE_VS_3:
  1425. case SQ_ALU_CONST_CACHE_VS_4:
  1426. case SQ_ALU_CONST_CACHE_VS_5:
  1427. case SQ_ALU_CONST_CACHE_VS_6:
  1428. case SQ_ALU_CONST_CACHE_VS_7:
  1429. case SQ_ALU_CONST_CACHE_VS_8:
  1430. case SQ_ALU_CONST_CACHE_VS_9:
  1431. case SQ_ALU_CONST_CACHE_VS_10:
  1432. case SQ_ALU_CONST_CACHE_VS_11:
  1433. case SQ_ALU_CONST_CACHE_VS_12:
  1434. case SQ_ALU_CONST_CACHE_VS_13:
  1435. case SQ_ALU_CONST_CACHE_VS_14:
  1436. case SQ_ALU_CONST_CACHE_VS_15:
  1437. r = r600_cs_packet_next_reloc(p, &reloc);
  1438. if (r) {
  1439. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1440. "0x%04X\n", reg);
  1441. return -EINVAL;
  1442. }
  1443. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1444. break;
  1445. case SX_MEMORY_EXPORT_BASE:
  1446. r = r600_cs_packet_next_reloc(p, &reloc);
  1447. if (r) {
  1448. dev_warn(p->dev, "bad SET_CONFIG_REG "
  1449. "0x%04X\n", reg);
  1450. return -EINVAL;
  1451. }
  1452. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1453. break;
  1454. case SX_MISC:
  1455. track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
  1456. break;
  1457. default:
  1458. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1459. return -EINVAL;
  1460. }
  1461. return 0;
  1462. }
  1463. unsigned r600_mip_minify(unsigned size, unsigned level)
  1464. {
  1465. unsigned val;
  1466. val = max(1U, size >> level);
  1467. if (level > 0)
  1468. val = roundup_pow_of_two(val);
  1469. return val;
  1470. }
  1471. static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
  1472. unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
  1473. unsigned block_align, unsigned height_align, unsigned base_align,
  1474. unsigned *l0_size, unsigned *mipmap_size)
  1475. {
  1476. unsigned offset, i, level;
  1477. unsigned width, height, depth, size;
  1478. unsigned blocksize;
  1479. unsigned nbx, nby;
  1480. unsigned nlevels = llevel - blevel + 1;
  1481. *l0_size = -1;
  1482. blocksize = r600_fmt_get_blocksize(format);
  1483. w0 = r600_mip_minify(w0, 0);
  1484. h0 = r600_mip_minify(h0, 0);
  1485. d0 = r600_mip_minify(d0, 0);
  1486. for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
  1487. width = r600_mip_minify(w0, i);
  1488. nbx = r600_fmt_get_nblocksx(format, width);
  1489. nbx = round_up(nbx, block_align);
  1490. height = r600_mip_minify(h0, i);
  1491. nby = r600_fmt_get_nblocksy(format, height);
  1492. nby = round_up(nby, height_align);
  1493. depth = r600_mip_minify(d0, i);
  1494. size = nbx * nby * blocksize * nsamples;
  1495. if (nfaces)
  1496. size *= nfaces;
  1497. else
  1498. size *= depth;
  1499. if (i == 0)
  1500. *l0_size = size;
  1501. if (i == 0 || i == 1)
  1502. offset = round_up(offset, base_align);
  1503. offset += size;
  1504. }
  1505. *mipmap_size = offset;
  1506. if (llevel == 0)
  1507. *mipmap_size = *l0_size;
  1508. if (!blevel)
  1509. *mipmap_size -= *l0_size;
  1510. }
  1511. /**
  1512. * r600_check_texture_resource() - check if register is authorized or not
  1513. * @p: parser structure holding parsing context
  1514. * @idx: index into the cs buffer
  1515. * @texture: texture's bo structure
  1516. * @mipmap: mipmap's bo structure
  1517. *
  1518. * This function will check that the resource has valid field and that
  1519. * the texture and mipmap bo object are big enough to cover this resource.
  1520. */
  1521. static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
  1522. struct radeon_bo *texture,
  1523. struct radeon_bo *mipmap,
  1524. u64 base_offset,
  1525. u64 mip_offset,
  1526. u32 tiling_flags)
  1527. {
  1528. struct r600_cs_track *track = p->track;
  1529. u32 dim, nfaces, llevel, blevel, w0, h0, d0;
  1530. u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
  1531. u32 height_align, pitch, pitch_align, depth_align;
  1532. u32 barray, larray;
  1533. u64 base_align;
  1534. struct array_mode_checker array_check;
  1535. u32 format;
  1536. bool is_array;
  1537. /* on legacy kernel we don't perform advanced check */
  1538. if (p->rdev == NULL)
  1539. return 0;
  1540. /* convert to bytes */
  1541. base_offset <<= 8;
  1542. mip_offset <<= 8;
  1543. word0 = radeon_get_ib_value(p, idx + 0);
  1544. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1545. if (tiling_flags & RADEON_TILING_MACRO)
  1546. word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
  1547. else if (tiling_flags & RADEON_TILING_MICRO)
  1548. word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
  1549. }
  1550. word1 = radeon_get_ib_value(p, idx + 1);
  1551. word2 = radeon_get_ib_value(p, idx + 2) << 8;
  1552. word3 = radeon_get_ib_value(p, idx + 3) << 8;
  1553. word4 = radeon_get_ib_value(p, idx + 4);
  1554. word5 = radeon_get_ib_value(p, idx + 5);
  1555. dim = G_038000_DIM(word0);
  1556. w0 = G_038000_TEX_WIDTH(word0) + 1;
  1557. pitch = (G_038000_PITCH(word0) + 1) * 8;
  1558. h0 = G_038004_TEX_HEIGHT(word1) + 1;
  1559. d0 = G_038004_TEX_DEPTH(word1);
  1560. format = G_038004_DATA_FORMAT(word1);
  1561. blevel = G_038010_BASE_LEVEL(word4);
  1562. llevel = G_038014_LAST_LEVEL(word5);
  1563. /* pitch in texels */
  1564. array_check.array_mode = G_038000_TILE_MODE(word0);
  1565. array_check.group_size = track->group_size;
  1566. array_check.nbanks = track->nbanks;
  1567. array_check.npipes = track->npipes;
  1568. array_check.nsamples = 1;
  1569. array_check.blocksize = r600_fmt_get_blocksize(format);
  1570. nfaces = 1;
  1571. is_array = false;
  1572. switch (dim) {
  1573. case V_038000_SQ_TEX_DIM_1D:
  1574. case V_038000_SQ_TEX_DIM_2D:
  1575. case V_038000_SQ_TEX_DIM_3D:
  1576. break;
  1577. case V_038000_SQ_TEX_DIM_CUBEMAP:
  1578. if (p->family >= CHIP_RV770)
  1579. nfaces = 8;
  1580. else
  1581. nfaces = 6;
  1582. break;
  1583. case V_038000_SQ_TEX_DIM_1D_ARRAY:
  1584. case V_038000_SQ_TEX_DIM_2D_ARRAY:
  1585. is_array = true;
  1586. break;
  1587. case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
  1588. is_array = true;
  1589. /* fall through */
  1590. case V_038000_SQ_TEX_DIM_2D_MSAA:
  1591. array_check.nsamples = 1 << llevel;
  1592. llevel = 0;
  1593. break;
  1594. default:
  1595. dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
  1596. return -EINVAL;
  1597. }
  1598. if (!r600_fmt_is_valid_texture(format, p->family)) {
  1599. dev_warn(p->dev, "%s:%d texture invalid format %d\n",
  1600. __func__, __LINE__, format);
  1601. return -EINVAL;
  1602. }
  1603. if (r600_get_array_mode_alignment(&array_check,
  1604. &pitch_align, &height_align, &depth_align, &base_align)) {
  1605. dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
  1606. __func__, __LINE__, G_038000_TILE_MODE(word0));
  1607. return -EINVAL;
  1608. }
  1609. /* XXX check height as well... */
  1610. if (!IS_ALIGNED(pitch, pitch_align)) {
  1611. dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
  1612. __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
  1613. return -EINVAL;
  1614. }
  1615. if (!IS_ALIGNED(base_offset, base_align)) {
  1616. dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
  1617. __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
  1618. return -EINVAL;
  1619. }
  1620. if (!IS_ALIGNED(mip_offset, base_align)) {
  1621. dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
  1622. __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
  1623. return -EINVAL;
  1624. }
  1625. if (blevel > llevel) {
  1626. dev_warn(p->dev, "texture blevel %d > llevel %d\n",
  1627. blevel, llevel);
  1628. }
  1629. if (is_array) {
  1630. barray = G_038014_BASE_ARRAY(word5);
  1631. larray = G_038014_LAST_ARRAY(word5);
  1632. nfaces = larray - barray + 1;
  1633. }
  1634. r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
  1635. pitch_align, height_align, base_align,
  1636. &l0_size, &mipmap_size);
  1637. /* using get ib will give us the offset into the texture bo */
  1638. if ((l0_size + word2) > radeon_bo_size(texture)) {
  1639. dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
  1640. w0, h0, pitch_align, height_align,
  1641. array_check.array_mode, format, word2,
  1642. l0_size, radeon_bo_size(texture));
  1643. dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
  1644. return -EINVAL;
  1645. }
  1646. /* using get ib will give us the offset into the mipmap bo */
  1647. if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
  1648. /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
  1649. w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
  1650. }
  1651. return 0;
  1652. }
  1653. static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  1654. {
  1655. u32 m, i;
  1656. i = (reg >> 7);
  1657. if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
  1658. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1659. return false;
  1660. }
  1661. m = 1 << ((reg >> 2) & 31);
  1662. if (!(r600_reg_safe_bm[i] & m))
  1663. return true;
  1664. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1665. return false;
  1666. }
  1667. static int r600_packet3_check(struct radeon_cs_parser *p,
  1668. struct radeon_cs_packet *pkt)
  1669. {
  1670. struct radeon_cs_reloc *reloc;
  1671. struct r600_cs_track *track;
  1672. volatile u32 *ib;
  1673. unsigned idx;
  1674. unsigned i;
  1675. unsigned start_reg, end_reg, reg;
  1676. int r;
  1677. u32 idx_value;
  1678. track = (struct r600_cs_track *)p->track;
  1679. ib = p->ib.ptr;
  1680. idx = pkt->idx + 1;
  1681. idx_value = radeon_get_ib_value(p, idx);
  1682. switch (pkt->opcode) {
  1683. case PACKET3_SET_PREDICATION:
  1684. {
  1685. int pred_op;
  1686. int tmp;
  1687. uint64_t offset;
  1688. if (pkt->count != 1) {
  1689. DRM_ERROR("bad SET PREDICATION\n");
  1690. return -EINVAL;
  1691. }
  1692. tmp = radeon_get_ib_value(p, idx + 1);
  1693. pred_op = (tmp >> 16) & 0x7;
  1694. /* for the clear predicate operation */
  1695. if (pred_op == 0)
  1696. return 0;
  1697. if (pred_op > 2) {
  1698. DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
  1699. return -EINVAL;
  1700. }
  1701. r = r600_cs_packet_next_reloc(p, &reloc);
  1702. if (r) {
  1703. DRM_ERROR("bad SET PREDICATION\n");
  1704. return -EINVAL;
  1705. }
  1706. offset = reloc->lobj.gpu_offset +
  1707. (idx_value & 0xfffffff0) +
  1708. ((u64)(tmp & 0xff) << 32);
  1709. ib[idx + 0] = offset;
  1710. ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  1711. }
  1712. break;
  1713. case PACKET3_START_3D_CMDBUF:
  1714. if (p->family >= CHIP_RV770 || pkt->count) {
  1715. DRM_ERROR("bad START_3D\n");
  1716. return -EINVAL;
  1717. }
  1718. break;
  1719. case PACKET3_CONTEXT_CONTROL:
  1720. if (pkt->count != 1) {
  1721. DRM_ERROR("bad CONTEXT_CONTROL\n");
  1722. return -EINVAL;
  1723. }
  1724. break;
  1725. case PACKET3_INDEX_TYPE:
  1726. case PACKET3_NUM_INSTANCES:
  1727. if (pkt->count) {
  1728. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
  1729. return -EINVAL;
  1730. }
  1731. break;
  1732. case PACKET3_DRAW_INDEX:
  1733. {
  1734. uint64_t offset;
  1735. if (pkt->count != 3) {
  1736. DRM_ERROR("bad DRAW_INDEX\n");
  1737. return -EINVAL;
  1738. }
  1739. r = r600_cs_packet_next_reloc(p, &reloc);
  1740. if (r) {
  1741. DRM_ERROR("bad DRAW_INDEX\n");
  1742. return -EINVAL;
  1743. }
  1744. offset = reloc->lobj.gpu_offset +
  1745. idx_value +
  1746. ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
  1747. ib[idx+0] = offset;
  1748. ib[idx+1] = upper_32_bits(offset) & 0xff;
  1749. r = r600_cs_track_check(p);
  1750. if (r) {
  1751. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1752. return r;
  1753. }
  1754. break;
  1755. }
  1756. case PACKET3_DRAW_INDEX_AUTO:
  1757. if (pkt->count != 1) {
  1758. DRM_ERROR("bad DRAW_INDEX_AUTO\n");
  1759. return -EINVAL;
  1760. }
  1761. r = r600_cs_track_check(p);
  1762. if (r) {
  1763. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1764. return r;
  1765. }
  1766. break;
  1767. case PACKET3_DRAW_INDEX_IMMD_BE:
  1768. case PACKET3_DRAW_INDEX_IMMD:
  1769. if (pkt->count < 2) {
  1770. DRM_ERROR("bad DRAW_INDEX_IMMD\n");
  1771. return -EINVAL;
  1772. }
  1773. r = r600_cs_track_check(p);
  1774. if (r) {
  1775. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1776. return r;
  1777. }
  1778. break;
  1779. case PACKET3_WAIT_REG_MEM:
  1780. if (pkt->count != 5) {
  1781. DRM_ERROR("bad WAIT_REG_MEM\n");
  1782. return -EINVAL;
  1783. }
  1784. /* bit 4 is reg (0) or mem (1) */
  1785. if (idx_value & 0x10) {
  1786. uint64_t offset;
  1787. r = r600_cs_packet_next_reloc(p, &reloc);
  1788. if (r) {
  1789. DRM_ERROR("bad WAIT_REG_MEM\n");
  1790. return -EINVAL;
  1791. }
  1792. offset = reloc->lobj.gpu_offset +
  1793. (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
  1794. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1795. ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
  1796. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1797. }
  1798. break;
  1799. case PACKET3_SURFACE_SYNC:
  1800. if (pkt->count != 3) {
  1801. DRM_ERROR("bad SURFACE_SYNC\n");
  1802. return -EINVAL;
  1803. }
  1804. /* 0xffffffff/0x0 is flush all cache flag */
  1805. if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
  1806. radeon_get_ib_value(p, idx + 2) != 0) {
  1807. r = r600_cs_packet_next_reloc(p, &reloc);
  1808. if (r) {
  1809. DRM_ERROR("bad SURFACE_SYNC\n");
  1810. return -EINVAL;
  1811. }
  1812. ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1813. }
  1814. break;
  1815. case PACKET3_EVENT_WRITE:
  1816. if (pkt->count != 2 && pkt->count != 0) {
  1817. DRM_ERROR("bad EVENT_WRITE\n");
  1818. return -EINVAL;
  1819. }
  1820. if (pkt->count) {
  1821. uint64_t offset;
  1822. r = r600_cs_packet_next_reloc(p, &reloc);
  1823. if (r) {
  1824. DRM_ERROR("bad EVENT_WRITE\n");
  1825. return -EINVAL;
  1826. }
  1827. offset = reloc->lobj.gpu_offset +
  1828. (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
  1829. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1830. ib[idx+1] = offset & 0xfffffff8;
  1831. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1832. }
  1833. break;
  1834. case PACKET3_EVENT_WRITE_EOP:
  1835. {
  1836. uint64_t offset;
  1837. if (pkt->count != 4) {
  1838. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  1839. return -EINVAL;
  1840. }
  1841. r = r600_cs_packet_next_reloc(p, &reloc);
  1842. if (r) {
  1843. DRM_ERROR("bad EVENT_WRITE\n");
  1844. return -EINVAL;
  1845. }
  1846. offset = reloc->lobj.gpu_offset +
  1847. (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
  1848. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1849. ib[idx+1] = offset & 0xfffffffc;
  1850. ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  1851. break;
  1852. }
  1853. case PACKET3_SET_CONFIG_REG:
  1854. start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
  1855. end_reg = 4 * pkt->count + start_reg - 4;
  1856. if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
  1857. (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
  1858. (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
  1859. DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
  1860. return -EINVAL;
  1861. }
  1862. for (i = 0; i < pkt->count; i++) {
  1863. reg = start_reg + (4 * i);
  1864. r = r600_cs_check_reg(p, reg, idx+1+i);
  1865. if (r)
  1866. return r;
  1867. }
  1868. break;
  1869. case PACKET3_SET_CONTEXT_REG:
  1870. start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
  1871. end_reg = 4 * pkt->count + start_reg - 4;
  1872. if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
  1873. (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
  1874. (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
  1875. DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
  1876. return -EINVAL;
  1877. }
  1878. for (i = 0; i < pkt->count; i++) {
  1879. reg = start_reg + (4 * i);
  1880. r = r600_cs_check_reg(p, reg, idx+1+i);
  1881. if (r)
  1882. return r;
  1883. }
  1884. break;
  1885. case PACKET3_SET_RESOURCE:
  1886. if (pkt->count % 7) {
  1887. DRM_ERROR("bad SET_RESOURCE\n");
  1888. return -EINVAL;
  1889. }
  1890. start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
  1891. end_reg = 4 * pkt->count + start_reg - 4;
  1892. if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
  1893. (start_reg >= PACKET3_SET_RESOURCE_END) ||
  1894. (end_reg >= PACKET3_SET_RESOURCE_END)) {
  1895. DRM_ERROR("bad SET_RESOURCE\n");
  1896. return -EINVAL;
  1897. }
  1898. for (i = 0; i < (pkt->count / 7); i++) {
  1899. struct radeon_bo *texture, *mipmap;
  1900. u32 size, offset, base_offset, mip_offset;
  1901. switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
  1902. case SQ_TEX_VTX_VALID_TEXTURE:
  1903. /* tex base */
  1904. r = r600_cs_packet_next_reloc(p, &reloc);
  1905. if (r) {
  1906. DRM_ERROR("bad SET_RESOURCE\n");
  1907. return -EINVAL;
  1908. }
  1909. base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1910. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1911. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
  1912. ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
  1913. else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
  1914. ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
  1915. }
  1916. texture = reloc->robj;
  1917. /* tex mip base */
  1918. r = r600_cs_packet_next_reloc(p, &reloc);
  1919. if (r) {
  1920. DRM_ERROR("bad SET_RESOURCE\n");
  1921. return -EINVAL;
  1922. }
  1923. mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1924. mipmap = reloc->robj;
  1925. r = r600_check_texture_resource(p, idx+(i*7)+1,
  1926. texture, mipmap,
  1927. base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
  1928. mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
  1929. reloc->lobj.tiling_flags);
  1930. if (r)
  1931. return r;
  1932. ib[idx+1+(i*7)+2] += base_offset;
  1933. ib[idx+1+(i*7)+3] += mip_offset;
  1934. break;
  1935. case SQ_TEX_VTX_VALID_BUFFER:
  1936. {
  1937. uint64_t offset64;
  1938. /* vtx base */
  1939. r = r600_cs_packet_next_reloc(p, &reloc);
  1940. if (r) {
  1941. DRM_ERROR("bad SET_RESOURCE\n");
  1942. return -EINVAL;
  1943. }
  1944. offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
  1945. size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
  1946. if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
  1947. /* force size to size of the buffer */
  1948. dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
  1949. size + offset, radeon_bo_size(reloc->robj));
  1950. ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
  1951. }
  1952. offset64 = reloc->lobj.gpu_offset + offset;
  1953. ib[idx+1+(i*8)+0] = offset64;
  1954. ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
  1955. (upper_32_bits(offset64) & 0xff);
  1956. break;
  1957. }
  1958. case SQ_TEX_VTX_INVALID_TEXTURE:
  1959. case SQ_TEX_VTX_INVALID_BUFFER:
  1960. default:
  1961. DRM_ERROR("bad SET_RESOURCE\n");
  1962. return -EINVAL;
  1963. }
  1964. }
  1965. break;
  1966. case PACKET3_SET_ALU_CONST:
  1967. if (track->sq_config & DX9_CONSTS) {
  1968. start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
  1969. end_reg = 4 * pkt->count + start_reg - 4;
  1970. if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
  1971. (start_reg >= PACKET3_SET_ALU_CONST_END) ||
  1972. (end_reg >= PACKET3_SET_ALU_CONST_END)) {
  1973. DRM_ERROR("bad SET_ALU_CONST\n");
  1974. return -EINVAL;
  1975. }
  1976. }
  1977. break;
  1978. case PACKET3_SET_BOOL_CONST:
  1979. start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
  1980. end_reg = 4 * pkt->count + start_reg - 4;
  1981. if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
  1982. (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
  1983. (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
  1984. DRM_ERROR("bad SET_BOOL_CONST\n");
  1985. return -EINVAL;
  1986. }
  1987. break;
  1988. case PACKET3_SET_LOOP_CONST:
  1989. start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
  1990. end_reg = 4 * pkt->count + start_reg - 4;
  1991. if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
  1992. (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
  1993. (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
  1994. DRM_ERROR("bad SET_LOOP_CONST\n");
  1995. return -EINVAL;
  1996. }
  1997. break;
  1998. case PACKET3_SET_CTL_CONST:
  1999. start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
  2000. end_reg = 4 * pkt->count + start_reg - 4;
  2001. if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
  2002. (start_reg >= PACKET3_SET_CTL_CONST_END) ||
  2003. (end_reg >= PACKET3_SET_CTL_CONST_END)) {
  2004. DRM_ERROR("bad SET_CTL_CONST\n");
  2005. return -EINVAL;
  2006. }
  2007. break;
  2008. case PACKET3_SET_SAMPLER:
  2009. if (pkt->count % 3) {
  2010. DRM_ERROR("bad SET_SAMPLER\n");
  2011. return -EINVAL;
  2012. }
  2013. start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
  2014. end_reg = 4 * pkt->count + start_reg - 4;
  2015. if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
  2016. (start_reg >= PACKET3_SET_SAMPLER_END) ||
  2017. (end_reg >= PACKET3_SET_SAMPLER_END)) {
  2018. DRM_ERROR("bad SET_SAMPLER\n");
  2019. return -EINVAL;
  2020. }
  2021. break;
  2022. case PACKET3_STRMOUT_BASE_UPDATE:
  2023. if (p->family < CHIP_RV770) {
  2024. DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
  2025. return -EINVAL;
  2026. }
  2027. if (pkt->count != 1) {
  2028. DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
  2029. return -EINVAL;
  2030. }
  2031. if (idx_value > 3) {
  2032. DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
  2033. return -EINVAL;
  2034. }
  2035. {
  2036. u64 offset;
  2037. r = r600_cs_packet_next_reloc(p, &reloc);
  2038. if (r) {
  2039. DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
  2040. return -EINVAL;
  2041. }
  2042. if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
  2043. DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
  2044. return -EINVAL;
  2045. }
  2046. offset = radeon_get_ib_value(p, idx+1) << 8;
  2047. if (offset != track->vgt_strmout_bo_offset[idx_value]) {
  2048. DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
  2049. offset, track->vgt_strmout_bo_offset[idx_value]);
  2050. return -EINVAL;
  2051. }
  2052. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2053. DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
  2054. offset + 4, radeon_bo_size(reloc->robj));
  2055. return -EINVAL;
  2056. }
  2057. ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  2058. }
  2059. break;
  2060. case PACKET3_SURFACE_BASE_UPDATE:
  2061. if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
  2062. DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
  2063. return -EINVAL;
  2064. }
  2065. if (pkt->count) {
  2066. DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
  2067. return -EINVAL;
  2068. }
  2069. break;
  2070. case PACKET3_STRMOUT_BUFFER_UPDATE:
  2071. if (pkt->count != 4) {
  2072. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
  2073. return -EINVAL;
  2074. }
  2075. /* Updating memory at DST_ADDRESS. */
  2076. if (idx_value & 0x1) {
  2077. u64 offset;
  2078. r = r600_cs_packet_next_reloc(p, &reloc);
  2079. if (r) {
  2080. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
  2081. return -EINVAL;
  2082. }
  2083. offset = radeon_get_ib_value(p, idx+1);
  2084. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  2085. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2086. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
  2087. offset + 4, radeon_bo_size(reloc->robj));
  2088. return -EINVAL;
  2089. }
  2090. offset += reloc->lobj.gpu_offset;
  2091. ib[idx+1] = offset;
  2092. ib[idx+2] = upper_32_bits(offset) & 0xff;
  2093. }
  2094. /* Reading data from SRC_ADDRESS. */
  2095. if (((idx_value >> 1) & 0x3) == 2) {
  2096. u64 offset;
  2097. r = r600_cs_packet_next_reloc(p, &reloc);
  2098. if (r) {
  2099. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
  2100. return -EINVAL;
  2101. }
  2102. offset = radeon_get_ib_value(p, idx+3);
  2103. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2104. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2105. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
  2106. offset + 4, radeon_bo_size(reloc->robj));
  2107. return -EINVAL;
  2108. }
  2109. offset += reloc->lobj.gpu_offset;
  2110. ib[idx+3] = offset;
  2111. ib[idx+4] = upper_32_bits(offset) & 0xff;
  2112. }
  2113. break;
  2114. case PACKET3_COPY_DW:
  2115. if (pkt->count != 4) {
  2116. DRM_ERROR("bad COPY_DW (invalid count)\n");
  2117. return -EINVAL;
  2118. }
  2119. if (idx_value & 0x1) {
  2120. u64 offset;
  2121. /* SRC is memory. */
  2122. r = r600_cs_packet_next_reloc(p, &reloc);
  2123. if (r) {
  2124. DRM_ERROR("bad COPY_DW (missing src reloc)\n");
  2125. return -EINVAL;
  2126. }
  2127. offset = radeon_get_ib_value(p, idx+1);
  2128. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  2129. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2130. DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
  2131. offset + 4, radeon_bo_size(reloc->robj));
  2132. return -EINVAL;
  2133. }
  2134. offset += reloc->lobj.gpu_offset;
  2135. ib[idx+1] = offset;
  2136. ib[idx+2] = upper_32_bits(offset) & 0xff;
  2137. } else {
  2138. /* SRC is a reg. */
  2139. reg = radeon_get_ib_value(p, idx+1) << 2;
  2140. if (!r600_is_safe_reg(p, reg, idx+1))
  2141. return -EINVAL;
  2142. }
  2143. if (idx_value & 0x2) {
  2144. u64 offset;
  2145. /* DST is memory. */
  2146. r = r600_cs_packet_next_reloc(p, &reloc);
  2147. if (r) {
  2148. DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
  2149. return -EINVAL;
  2150. }
  2151. offset = radeon_get_ib_value(p, idx+3);
  2152. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2153. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2154. DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
  2155. offset + 4, radeon_bo_size(reloc->robj));
  2156. return -EINVAL;
  2157. }
  2158. offset += reloc->lobj.gpu_offset;
  2159. ib[idx+3] = offset;
  2160. ib[idx+4] = upper_32_bits(offset) & 0xff;
  2161. } else {
  2162. /* DST is a reg. */
  2163. reg = radeon_get_ib_value(p, idx+3) << 2;
  2164. if (!r600_is_safe_reg(p, reg, idx+3))
  2165. return -EINVAL;
  2166. }
  2167. break;
  2168. case PACKET3_NOP:
  2169. break;
  2170. default:
  2171. DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
  2172. return -EINVAL;
  2173. }
  2174. return 0;
  2175. }
  2176. int r600_cs_parse(struct radeon_cs_parser *p)
  2177. {
  2178. struct radeon_cs_packet pkt;
  2179. struct r600_cs_track *track;
  2180. int r;
  2181. if (p->track == NULL) {
  2182. /* initialize tracker, we are in kms */
  2183. track = kzalloc(sizeof(*track), GFP_KERNEL);
  2184. if (track == NULL)
  2185. return -ENOMEM;
  2186. r600_cs_track_init(track);
  2187. if (p->rdev->family < CHIP_RV770) {
  2188. track->npipes = p->rdev->config.r600.tiling_npipes;
  2189. track->nbanks = p->rdev->config.r600.tiling_nbanks;
  2190. track->group_size = p->rdev->config.r600.tiling_group_size;
  2191. } else if (p->rdev->family <= CHIP_RV740) {
  2192. track->npipes = p->rdev->config.rv770.tiling_npipes;
  2193. track->nbanks = p->rdev->config.rv770.tiling_nbanks;
  2194. track->group_size = p->rdev->config.rv770.tiling_group_size;
  2195. }
  2196. p->track = track;
  2197. }
  2198. do {
  2199. r = r600_cs_packet_parse(p, &pkt, p->idx);
  2200. if (r) {
  2201. kfree(p->track);
  2202. p->track = NULL;
  2203. return r;
  2204. }
  2205. p->idx += pkt.count + 2;
  2206. switch (pkt.type) {
  2207. case PACKET_TYPE0:
  2208. r = r600_cs_parse_packet0(p, &pkt);
  2209. break;
  2210. case PACKET_TYPE2:
  2211. break;
  2212. case PACKET_TYPE3:
  2213. r = r600_packet3_check(p, &pkt);
  2214. break;
  2215. default:
  2216. DRM_ERROR("Unknown packet type %d !\n", pkt.type);
  2217. kfree(p->track);
  2218. p->track = NULL;
  2219. return -EINVAL;
  2220. }
  2221. if (r) {
  2222. kfree(p->track);
  2223. p->track = NULL;
  2224. return r;
  2225. }
  2226. } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
  2227. #if 0
  2228. for (r = 0; r < p->ib.length_dw; r++) {
  2229. printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
  2230. mdelay(1);
  2231. }
  2232. #endif
  2233. kfree(p->track);
  2234. p->track = NULL;
  2235. return 0;
  2236. }
  2237. static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
  2238. {
  2239. if (p->chunk_relocs_idx == -1) {
  2240. return 0;
  2241. }
  2242. p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
  2243. if (p->relocs == NULL) {
  2244. return -ENOMEM;
  2245. }
  2246. return 0;
  2247. }
  2248. /**
  2249. * cs_parser_fini() - clean parser states
  2250. * @parser: parser structure holding parsing context.
  2251. * @error: error number
  2252. *
  2253. * If error is set than unvalidate buffer, otherwise just free memory
  2254. * used by parsing context.
  2255. **/
  2256. static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
  2257. {
  2258. unsigned i;
  2259. kfree(parser->relocs);
  2260. for (i = 0; i < parser->nchunks; i++) {
  2261. kfree(parser->chunks[i].kdata);
  2262. kfree(parser->chunks[i].kpage[0]);
  2263. kfree(parser->chunks[i].kpage[1]);
  2264. }
  2265. kfree(parser->chunks);
  2266. kfree(parser->chunks_array);
  2267. }
  2268. int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
  2269. unsigned family, u32 *ib, int *l)
  2270. {
  2271. struct radeon_cs_parser parser;
  2272. struct radeon_cs_chunk *ib_chunk;
  2273. struct r600_cs_track *track;
  2274. int r;
  2275. /* initialize tracker */
  2276. track = kzalloc(sizeof(*track), GFP_KERNEL);
  2277. if (track == NULL)
  2278. return -ENOMEM;
  2279. r600_cs_track_init(track);
  2280. r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
  2281. /* initialize parser */
  2282. memset(&parser, 0, sizeof(struct radeon_cs_parser));
  2283. parser.filp = filp;
  2284. parser.dev = &dev->pdev->dev;
  2285. parser.rdev = NULL;
  2286. parser.family = family;
  2287. parser.track = track;
  2288. parser.ib.ptr = ib;
  2289. r = radeon_cs_parser_init(&parser, data);
  2290. if (r) {
  2291. DRM_ERROR("Failed to initialize parser !\n");
  2292. r600_cs_parser_fini(&parser, r);
  2293. return r;
  2294. }
  2295. r = r600_cs_parser_relocs_legacy(&parser);
  2296. if (r) {
  2297. DRM_ERROR("Failed to parse relocation !\n");
  2298. r600_cs_parser_fini(&parser, r);
  2299. return r;
  2300. }
  2301. /* Copy the packet into the IB, the parser will read from the
  2302. * input memory (cached) and write to the IB (which can be
  2303. * uncached). */
  2304. ib_chunk = &parser.chunks[parser.chunk_ib_idx];
  2305. parser.ib.length_dw = ib_chunk->length_dw;
  2306. *l = parser.ib.length_dw;
  2307. r = r600_cs_parse(&parser);
  2308. if (r) {
  2309. DRM_ERROR("Invalid command stream !\n");
  2310. r600_cs_parser_fini(&parser, r);
  2311. return r;
  2312. }
  2313. r = radeon_cs_finish_pages(&parser);
  2314. if (r) {
  2315. DRM_ERROR("Invalid command stream !\n");
  2316. r600_cs_parser_fini(&parser, r);
  2317. return r;
  2318. }
  2319. r600_cs_parser_fini(&parser, r);
  2320. return r;
  2321. }
  2322. void r600_cs_legacy_init(void)
  2323. {
  2324. r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
  2325. }