evergreen_cs.c 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683
  1. /*
  2. * Copyright 2010 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include "drmP.h"
  29. #include "radeon.h"
  30. #include "evergreend.h"
  31. #include "evergreen_reg_safe.h"
  32. #include "cayman_reg_safe.h"
  33. #define MAX(a,b) (((a)>(b))?(a):(b))
  34. #define MIN(a,b) (((a)<(b))?(a):(b))
  35. static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
  36. struct radeon_cs_reloc **cs_reloc);
  37. struct evergreen_cs_track {
  38. u32 group_size;
  39. u32 nbanks;
  40. u32 npipes;
  41. u32 row_size;
  42. /* value we track */
  43. u32 nsamples;
  44. u32 cb_color_base_last[12];
  45. struct radeon_bo *cb_color_bo[12];
  46. u32 cb_color_bo_offset[12];
  47. struct radeon_bo *cb_color_fmask_bo[8];
  48. struct radeon_bo *cb_color_cmask_bo[8];
  49. u32 cb_color_info[12];
  50. u32 cb_color_view[12];
  51. u32 cb_color_pitch_idx[12];
  52. u32 cb_color_slice_idx[12];
  53. u32 cb_color_dim_idx[12];
  54. u32 cb_color_dim[12];
  55. u32 cb_color_pitch[12];
  56. u32 cb_color_slice[12];
  57. u32 cb_color_attrib[12];
  58. u32 cb_color_cmask_slice[8];
  59. u32 cb_color_fmask_slice[8];
  60. u32 cb_target_mask;
  61. u32 cb_shader_mask;
  62. u32 vgt_strmout_config;
  63. u32 vgt_strmout_buffer_config;
  64. struct radeon_bo *vgt_strmout_bo[4];
  65. u64 vgt_strmout_bo_mc[4];
  66. u32 vgt_strmout_bo_offset[4];
  67. u32 vgt_strmout_size[4];
  68. u32 db_depth_control;
  69. u32 db_depth_view;
  70. u32 db_depth_slice;
  71. u32 db_depth_size;
  72. u32 db_depth_size_idx;
  73. u32 db_z_info;
  74. u32 db_z_idx;
  75. u32 db_z_read_offset;
  76. u32 db_z_write_offset;
  77. struct radeon_bo *db_z_read_bo;
  78. struct radeon_bo *db_z_write_bo;
  79. u32 db_s_info;
  80. u32 db_s_idx;
  81. u32 db_s_read_offset;
  82. u32 db_s_write_offset;
  83. struct radeon_bo *db_s_read_bo;
  84. struct radeon_bo *db_s_write_bo;
  85. };
  86. static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
  87. {
  88. if (tiling_flags & RADEON_TILING_MACRO)
  89. return ARRAY_2D_TILED_THIN1;
  90. else if (tiling_flags & RADEON_TILING_MICRO)
  91. return ARRAY_1D_TILED_THIN1;
  92. else
  93. return ARRAY_LINEAR_GENERAL;
  94. }
  95. static u32 evergreen_cs_get_num_banks(u32 nbanks)
  96. {
  97. switch (nbanks) {
  98. case 2:
  99. return ADDR_SURF_2_BANK;
  100. case 4:
  101. return ADDR_SURF_4_BANK;
  102. case 8:
  103. default:
  104. return ADDR_SURF_8_BANK;
  105. case 16:
  106. return ADDR_SURF_16_BANK;
  107. }
  108. }
  109. static void evergreen_cs_track_init(struct evergreen_cs_track *track)
  110. {
  111. int i;
  112. for (i = 0; i < 8; i++) {
  113. track->cb_color_fmask_bo[i] = NULL;
  114. track->cb_color_cmask_bo[i] = NULL;
  115. track->cb_color_cmask_slice[i] = 0;
  116. track->cb_color_fmask_slice[i] = 0;
  117. }
  118. for (i = 0; i < 12; i++) {
  119. track->cb_color_base_last[i] = 0;
  120. track->cb_color_bo[i] = NULL;
  121. track->cb_color_bo_offset[i] = 0xFFFFFFFF;
  122. track->cb_color_info[i] = 0;
  123. track->cb_color_view[i] = 0xFFFFFFFF;
  124. track->cb_color_pitch_idx[i] = 0;
  125. track->cb_color_slice_idx[i] = 0;
  126. track->cb_color_dim[i] = 0;
  127. track->cb_color_pitch[i] = 0;
  128. track->cb_color_slice[i] = 0;
  129. track->cb_color_dim[i] = 0;
  130. }
  131. track->cb_target_mask = 0xFFFFFFFF;
  132. track->cb_shader_mask = 0xFFFFFFFF;
  133. track->db_depth_view = 0xFFFFC000;
  134. track->db_depth_size = 0xFFFFFFFF;
  135. track->db_depth_size_idx = 0;
  136. track->db_depth_control = 0xFFFFFFFF;
  137. track->db_z_info = 0xFFFFFFFF;
  138. track->db_z_idx = 0xFFFFFFFF;
  139. track->db_z_read_offset = 0xFFFFFFFF;
  140. track->db_z_write_offset = 0xFFFFFFFF;
  141. track->db_z_read_bo = NULL;
  142. track->db_z_write_bo = NULL;
  143. track->db_s_info = 0xFFFFFFFF;
  144. track->db_s_idx = 0xFFFFFFFF;
  145. track->db_s_read_offset = 0xFFFFFFFF;
  146. track->db_s_write_offset = 0xFFFFFFFF;
  147. track->db_s_read_bo = NULL;
  148. track->db_s_write_bo = NULL;
  149. for (i = 0; i < 4; i++) {
  150. track->vgt_strmout_size[i] = 0;
  151. track->vgt_strmout_bo[i] = NULL;
  152. track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
  153. track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
  154. }
  155. }
  156. struct eg_surface {
  157. /* value gathered from cs */
  158. unsigned nbx;
  159. unsigned nby;
  160. unsigned format;
  161. unsigned mode;
  162. unsigned nbanks;
  163. unsigned bankw;
  164. unsigned bankh;
  165. unsigned tsplit;
  166. unsigned mtilea;
  167. unsigned nsamples;
  168. /* output value */
  169. unsigned bpe;
  170. unsigned layer_size;
  171. unsigned palign;
  172. unsigned halign;
  173. unsigned long base_align;
  174. };
  175. static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
  176. struct eg_surface *surf,
  177. const char *prefix)
  178. {
  179. surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
  180. surf->base_align = surf->bpe;
  181. surf->palign = 1;
  182. surf->halign = 1;
  183. return 0;
  184. }
  185. static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
  186. struct eg_surface *surf,
  187. const char *prefix)
  188. {
  189. struct evergreen_cs_track *track = p->track;
  190. unsigned palign;
  191. palign = MAX(64, track->group_size / surf->bpe);
  192. surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
  193. surf->base_align = track->group_size;
  194. surf->palign = palign;
  195. surf->halign = 1;
  196. if (surf->nbx & (palign - 1)) {
  197. if (prefix) {
  198. dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
  199. __func__, __LINE__, prefix, surf->nbx, palign);
  200. }
  201. return -EINVAL;
  202. }
  203. return 0;
  204. }
  205. static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
  206. struct eg_surface *surf,
  207. const char *prefix)
  208. {
  209. struct evergreen_cs_track *track = p->track;
  210. unsigned palign;
  211. palign = track->group_size / (8 * surf->bpe * surf->nsamples);
  212. palign = MAX(8, palign);
  213. surf->layer_size = surf->nbx * surf->nby * surf->bpe;
  214. surf->base_align = track->group_size;
  215. surf->palign = palign;
  216. surf->halign = 8;
  217. if ((surf->nbx & (palign - 1))) {
  218. if (prefix) {
  219. dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
  220. __func__, __LINE__, prefix, surf->nbx, palign,
  221. track->group_size, surf->bpe, surf->nsamples);
  222. }
  223. return -EINVAL;
  224. }
  225. if ((surf->nby & (8 - 1))) {
  226. if (prefix) {
  227. dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
  228. __func__, __LINE__, prefix, surf->nby);
  229. }
  230. return -EINVAL;
  231. }
  232. return 0;
  233. }
  234. static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
  235. struct eg_surface *surf,
  236. const char *prefix)
  237. {
  238. struct evergreen_cs_track *track = p->track;
  239. unsigned palign, halign, tileb, slice_pt;
  240. tileb = 64 * surf->bpe * surf->nsamples;
  241. palign = track->group_size / (8 * surf->bpe * surf->nsamples);
  242. palign = MAX(8, palign);
  243. slice_pt = 1;
  244. if (tileb > surf->tsplit) {
  245. slice_pt = tileb / surf->tsplit;
  246. }
  247. tileb = tileb / slice_pt;
  248. /* macro tile width & height */
  249. palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
  250. halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
  251. surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
  252. surf->base_align = (palign / 8) * (halign / 8) * tileb;
  253. surf->palign = palign;
  254. surf->halign = halign;
  255. if ((surf->nbx & (palign - 1))) {
  256. if (prefix) {
  257. dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
  258. __func__, __LINE__, prefix, surf->nbx, palign);
  259. }
  260. return -EINVAL;
  261. }
  262. if ((surf->nby & (halign - 1))) {
  263. if (prefix) {
  264. dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
  265. __func__, __LINE__, prefix, surf->nby, halign);
  266. }
  267. return -EINVAL;
  268. }
  269. return 0;
  270. }
  271. static int evergreen_surface_check(struct radeon_cs_parser *p,
  272. struct eg_surface *surf,
  273. const char *prefix)
  274. {
  275. /* some common value computed here */
  276. surf->bpe = r600_fmt_get_blocksize(surf->format);
  277. switch (surf->mode) {
  278. case ARRAY_LINEAR_GENERAL:
  279. return evergreen_surface_check_linear(p, surf, prefix);
  280. case ARRAY_LINEAR_ALIGNED:
  281. return evergreen_surface_check_linear_aligned(p, surf, prefix);
  282. case ARRAY_1D_TILED_THIN1:
  283. return evergreen_surface_check_1d(p, surf, prefix);
  284. case ARRAY_2D_TILED_THIN1:
  285. return evergreen_surface_check_2d(p, surf, prefix);
  286. default:
  287. dev_warn(p->dev, "%s:%d invalid array mode %d\n",
  288. __func__, __LINE__, surf->mode);
  289. return -EINVAL;
  290. }
  291. return -EINVAL;
  292. }
  293. static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
  294. struct eg_surface *surf,
  295. const char *prefix)
  296. {
  297. switch (surf->mode) {
  298. case ARRAY_2D_TILED_THIN1:
  299. break;
  300. case ARRAY_LINEAR_GENERAL:
  301. case ARRAY_LINEAR_ALIGNED:
  302. case ARRAY_1D_TILED_THIN1:
  303. return 0;
  304. default:
  305. dev_warn(p->dev, "%s:%d invalid array mode %d\n",
  306. __func__, __LINE__, surf->mode);
  307. return -EINVAL;
  308. }
  309. switch (surf->nbanks) {
  310. case 0: surf->nbanks = 2; break;
  311. case 1: surf->nbanks = 4; break;
  312. case 2: surf->nbanks = 8; break;
  313. case 3: surf->nbanks = 16; break;
  314. default:
  315. dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
  316. __func__, __LINE__, prefix, surf->nbanks);
  317. return -EINVAL;
  318. }
  319. switch (surf->bankw) {
  320. case 0: surf->bankw = 1; break;
  321. case 1: surf->bankw = 2; break;
  322. case 2: surf->bankw = 4; break;
  323. case 3: surf->bankw = 8; break;
  324. default:
  325. dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
  326. __func__, __LINE__, prefix, surf->bankw);
  327. return -EINVAL;
  328. }
  329. switch (surf->bankh) {
  330. case 0: surf->bankh = 1; break;
  331. case 1: surf->bankh = 2; break;
  332. case 2: surf->bankh = 4; break;
  333. case 3: surf->bankh = 8; break;
  334. default:
  335. dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
  336. __func__, __LINE__, prefix, surf->bankh);
  337. return -EINVAL;
  338. }
  339. switch (surf->mtilea) {
  340. case 0: surf->mtilea = 1; break;
  341. case 1: surf->mtilea = 2; break;
  342. case 2: surf->mtilea = 4; break;
  343. case 3: surf->mtilea = 8; break;
  344. default:
  345. dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
  346. __func__, __LINE__, prefix, surf->mtilea);
  347. return -EINVAL;
  348. }
  349. switch (surf->tsplit) {
  350. case 0: surf->tsplit = 64; break;
  351. case 1: surf->tsplit = 128; break;
  352. case 2: surf->tsplit = 256; break;
  353. case 3: surf->tsplit = 512; break;
  354. case 4: surf->tsplit = 1024; break;
  355. case 5: surf->tsplit = 2048; break;
  356. case 6: surf->tsplit = 4096; break;
  357. default:
  358. dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
  359. __func__, __LINE__, prefix, surf->tsplit);
  360. return -EINVAL;
  361. }
  362. return 0;
  363. }
  364. static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
  365. {
  366. struct evergreen_cs_track *track = p->track;
  367. struct eg_surface surf;
  368. unsigned pitch, slice, mslice;
  369. unsigned long offset;
  370. int r;
  371. mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
  372. pitch = track->cb_color_pitch[id];
  373. slice = track->cb_color_slice[id];
  374. surf.nbx = (pitch + 1) * 8;
  375. surf.nby = ((slice + 1) * 64) / surf.nbx;
  376. surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
  377. surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
  378. surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
  379. surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
  380. surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
  381. surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
  382. surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
  383. surf.nsamples = 1;
  384. if (!r600_fmt_is_valid_color(surf.format)) {
  385. dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
  386. __func__, __LINE__, surf.format,
  387. id, track->cb_color_info[id]);
  388. return -EINVAL;
  389. }
  390. r = evergreen_surface_value_conv_check(p, &surf, "cb");
  391. if (r) {
  392. return r;
  393. }
  394. r = evergreen_surface_check(p, &surf, "cb");
  395. if (r) {
  396. dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
  397. __func__, __LINE__, id, track->cb_color_pitch[id],
  398. track->cb_color_slice[id], track->cb_color_attrib[id],
  399. track->cb_color_info[id]);
  400. return r;
  401. }
  402. offset = track->cb_color_bo_offset[id] << 8;
  403. if (offset & (surf.base_align - 1)) {
  404. dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
  405. __func__, __LINE__, id, offset, surf.base_align);
  406. return -EINVAL;
  407. }
  408. offset += surf.layer_size * mslice;
  409. if (offset > radeon_bo_size(track->cb_color_bo[id])) {
  410. dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
  411. "offset %d, max layer %d, bo size %ld, slice %d)\n",
  412. __func__, __LINE__, id, surf.layer_size,
  413. track->cb_color_bo_offset[id] << 8, mslice,
  414. radeon_bo_size(track->cb_color_bo[id]), slice);
  415. dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
  416. __func__, __LINE__, surf.nbx, surf.nby,
  417. surf.mode, surf.bpe, surf.nsamples,
  418. surf.bankw, surf.bankh,
  419. surf.tsplit, surf.mtilea);
  420. return -EINVAL;
  421. }
  422. return 0;
  423. }
  424. static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
  425. {
  426. struct evergreen_cs_track *track = p->track;
  427. struct eg_surface surf;
  428. unsigned pitch, slice, mslice;
  429. unsigned long offset;
  430. int r;
  431. mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
  432. pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
  433. slice = track->db_depth_slice;
  434. surf.nbx = (pitch + 1) * 8;
  435. surf.nby = ((slice + 1) * 64) / surf.nbx;
  436. surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
  437. surf.format = G_028044_FORMAT(track->db_s_info);
  438. surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
  439. surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
  440. surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
  441. surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
  442. surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
  443. surf.nsamples = 1;
  444. if (surf.format != 1) {
  445. dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
  446. __func__, __LINE__, surf.format);
  447. return -EINVAL;
  448. }
  449. /* replace by color format so we can use same code */
  450. surf.format = V_028C70_COLOR_8;
  451. r = evergreen_surface_value_conv_check(p, &surf, "stencil");
  452. if (r) {
  453. return r;
  454. }
  455. r = evergreen_surface_check(p, &surf, NULL);
  456. if (r) {
  457. /* old userspace doesn't compute proper depth/stencil alignment
  458. * check that alignment against a bigger byte per elements and
  459. * only report if that alignment is wrong too.
  460. */
  461. surf.format = V_028C70_COLOR_8_8_8_8;
  462. r = evergreen_surface_check(p, &surf, "stencil");
  463. if (r) {
  464. dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
  465. __func__, __LINE__, track->db_depth_size,
  466. track->db_depth_slice, track->db_s_info, track->db_z_info);
  467. }
  468. return r;
  469. }
  470. offset = track->db_s_read_offset << 8;
  471. if (offset & (surf.base_align - 1)) {
  472. dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
  473. __func__, __LINE__, offset, surf.base_align);
  474. return -EINVAL;
  475. }
  476. offset += surf.layer_size * mslice;
  477. if (offset > radeon_bo_size(track->db_s_read_bo)) {
  478. dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
  479. "offset %ld, max layer %d, bo size %ld)\n",
  480. __func__, __LINE__, surf.layer_size,
  481. (unsigned long)track->db_s_read_offset << 8, mslice,
  482. radeon_bo_size(track->db_s_read_bo));
  483. dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
  484. __func__, __LINE__, track->db_depth_size,
  485. track->db_depth_slice, track->db_s_info, track->db_z_info);
  486. return -EINVAL;
  487. }
  488. offset = track->db_s_write_offset << 8;
  489. if (offset & (surf.base_align - 1)) {
  490. dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
  491. __func__, __LINE__, offset, surf.base_align);
  492. return -EINVAL;
  493. }
  494. offset += surf.layer_size * mslice;
  495. if (offset > radeon_bo_size(track->db_s_write_bo)) {
  496. dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
  497. "offset %ld, max layer %d, bo size %ld)\n",
  498. __func__, __LINE__, surf.layer_size,
  499. (unsigned long)track->db_s_write_offset << 8, mslice,
  500. radeon_bo_size(track->db_s_write_bo));
  501. return -EINVAL;
  502. }
  503. return 0;
  504. }
  505. static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
  506. {
  507. struct evergreen_cs_track *track = p->track;
  508. struct eg_surface surf;
  509. unsigned pitch, slice, mslice;
  510. unsigned long offset;
  511. int r;
  512. mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
  513. pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
  514. slice = track->db_depth_slice;
  515. surf.nbx = (pitch + 1) * 8;
  516. surf.nby = ((slice + 1) * 64) / surf.nbx;
  517. surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
  518. surf.format = G_028040_FORMAT(track->db_z_info);
  519. surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
  520. surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
  521. surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
  522. surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
  523. surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
  524. surf.nsamples = 1;
  525. switch (surf.format) {
  526. case V_028040_Z_16:
  527. surf.format = V_028C70_COLOR_16;
  528. break;
  529. case V_028040_Z_24:
  530. case V_028040_Z_32_FLOAT:
  531. surf.format = V_028C70_COLOR_8_8_8_8;
  532. break;
  533. default:
  534. dev_warn(p->dev, "%s:%d depth invalid format %d\n",
  535. __func__, __LINE__, surf.format);
  536. return -EINVAL;
  537. }
  538. r = evergreen_surface_value_conv_check(p, &surf, "depth");
  539. if (r) {
  540. dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
  541. __func__, __LINE__, track->db_depth_size,
  542. track->db_depth_slice, track->db_z_info);
  543. return r;
  544. }
  545. r = evergreen_surface_check(p, &surf, "depth");
  546. if (r) {
  547. dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
  548. __func__, __LINE__, track->db_depth_size,
  549. track->db_depth_slice, track->db_z_info);
  550. return r;
  551. }
  552. offset = track->db_z_read_offset << 8;
  553. if (offset & (surf.base_align - 1)) {
  554. dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
  555. __func__, __LINE__, offset, surf.base_align);
  556. return -EINVAL;
  557. }
  558. offset += surf.layer_size * mslice;
  559. if (offset > radeon_bo_size(track->db_z_read_bo)) {
  560. dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
  561. "offset %ld, max layer %d, bo size %ld)\n",
  562. __func__, __LINE__, surf.layer_size,
  563. (unsigned long)track->db_z_read_offset << 8, mslice,
  564. radeon_bo_size(track->db_z_read_bo));
  565. return -EINVAL;
  566. }
  567. offset = track->db_z_write_offset << 8;
  568. if (offset & (surf.base_align - 1)) {
  569. dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
  570. __func__, __LINE__, offset, surf.base_align);
  571. return -EINVAL;
  572. }
  573. offset += surf.layer_size * mslice;
  574. if (offset > radeon_bo_size(track->db_z_write_bo)) {
  575. dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
  576. "offset %ld, max layer %d, bo size %ld)\n",
  577. __func__, __LINE__, surf.layer_size,
  578. (unsigned long)track->db_z_write_offset << 8, mslice,
  579. radeon_bo_size(track->db_z_write_bo));
  580. return -EINVAL;
  581. }
  582. return 0;
  583. }
  584. static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
  585. struct radeon_bo *texture,
  586. struct radeon_bo *mipmap,
  587. unsigned idx)
  588. {
  589. struct eg_surface surf;
  590. unsigned long toffset, moffset;
  591. unsigned dim, llevel, mslice, width, height, depth, i;
  592. u32 texdw[8];
  593. int r;
  594. texdw[0] = radeon_get_ib_value(p, idx + 0);
  595. texdw[1] = radeon_get_ib_value(p, idx + 1);
  596. texdw[2] = radeon_get_ib_value(p, idx + 2);
  597. texdw[3] = radeon_get_ib_value(p, idx + 3);
  598. texdw[4] = radeon_get_ib_value(p, idx + 4);
  599. texdw[5] = radeon_get_ib_value(p, idx + 5);
  600. texdw[6] = radeon_get_ib_value(p, idx + 6);
  601. texdw[7] = radeon_get_ib_value(p, idx + 7);
  602. dim = G_030000_DIM(texdw[0]);
  603. llevel = G_030014_LAST_LEVEL(texdw[5]);
  604. mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
  605. width = G_030000_TEX_WIDTH(texdw[0]) + 1;
  606. height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
  607. depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
  608. surf.format = G_03001C_DATA_FORMAT(texdw[7]);
  609. surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
  610. surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
  611. surf.nby = r600_fmt_get_nblocksy(surf.format, height);
  612. surf.mode = G_030004_ARRAY_MODE(texdw[1]);
  613. surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
  614. surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
  615. surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
  616. surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
  617. surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
  618. surf.nsamples = 1;
  619. toffset = texdw[2] << 8;
  620. moffset = texdw[3] << 8;
  621. if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
  622. dev_warn(p->dev, "%s:%d texture invalid format %d\n",
  623. __func__, __LINE__, surf.format);
  624. return -EINVAL;
  625. }
  626. switch (dim) {
  627. case V_030000_SQ_TEX_DIM_1D:
  628. case V_030000_SQ_TEX_DIM_2D:
  629. case V_030000_SQ_TEX_DIM_CUBEMAP:
  630. case V_030000_SQ_TEX_DIM_1D_ARRAY:
  631. case V_030000_SQ_TEX_DIM_2D_ARRAY:
  632. depth = 1;
  633. case V_030000_SQ_TEX_DIM_3D:
  634. break;
  635. default:
  636. dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
  637. __func__, __LINE__, dim);
  638. return -EINVAL;
  639. }
  640. r = evergreen_surface_value_conv_check(p, &surf, "texture");
  641. if (r) {
  642. return r;
  643. }
  644. /* align height */
  645. evergreen_surface_check(p, &surf, NULL);
  646. surf.nby = ALIGN(surf.nby, surf.halign);
  647. r = evergreen_surface_check(p, &surf, "texture");
  648. if (r) {
  649. dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  650. __func__, __LINE__, texdw[0], texdw[1], texdw[4],
  651. texdw[5], texdw[6], texdw[7]);
  652. return r;
  653. }
  654. /* check texture size */
  655. if (toffset & (surf.base_align - 1)) {
  656. dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
  657. __func__, __LINE__, toffset, surf.base_align);
  658. return -EINVAL;
  659. }
  660. if (moffset & (surf.base_align - 1)) {
  661. dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
  662. __func__, __LINE__, moffset, surf.base_align);
  663. return -EINVAL;
  664. }
  665. if (dim == SQ_TEX_DIM_3D) {
  666. toffset += surf.layer_size * depth;
  667. } else {
  668. toffset += surf.layer_size * mslice;
  669. }
  670. if (toffset > radeon_bo_size(texture)) {
  671. dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
  672. "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
  673. __func__, __LINE__, surf.layer_size,
  674. (unsigned long)texdw[2] << 8, mslice,
  675. depth, radeon_bo_size(texture),
  676. surf.nbx, surf.nby);
  677. return -EINVAL;
  678. }
  679. /* check mipmap size */
  680. for (i = 1; i <= llevel; i++) {
  681. unsigned w, h, d;
  682. w = r600_mip_minify(width, i);
  683. h = r600_mip_minify(height, i);
  684. d = r600_mip_minify(depth, i);
  685. surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
  686. surf.nby = r600_fmt_get_nblocksy(surf.format, h);
  687. switch (surf.mode) {
  688. case ARRAY_2D_TILED_THIN1:
  689. if (surf.nbx < surf.palign || surf.nby < surf.halign) {
  690. surf.mode = ARRAY_1D_TILED_THIN1;
  691. }
  692. /* recompute alignment */
  693. evergreen_surface_check(p, &surf, NULL);
  694. break;
  695. case ARRAY_LINEAR_GENERAL:
  696. case ARRAY_LINEAR_ALIGNED:
  697. case ARRAY_1D_TILED_THIN1:
  698. break;
  699. default:
  700. dev_warn(p->dev, "%s:%d invalid array mode %d\n",
  701. __func__, __LINE__, surf.mode);
  702. return -EINVAL;
  703. }
  704. surf.nbx = ALIGN(surf.nbx, surf.palign);
  705. surf.nby = ALIGN(surf.nby, surf.halign);
  706. r = evergreen_surface_check(p, &surf, "mipmap");
  707. if (r) {
  708. return r;
  709. }
  710. if (dim == SQ_TEX_DIM_3D) {
  711. moffset += surf.layer_size * d;
  712. } else {
  713. moffset += surf.layer_size * mslice;
  714. }
  715. if (moffset > radeon_bo_size(mipmap)) {
  716. dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
  717. "offset %ld, coffset %ld, max layer %d, depth %d, "
  718. "bo size %ld) level0 (%d %d %d)\n",
  719. __func__, __LINE__, i, surf.layer_size,
  720. (unsigned long)texdw[3] << 8, moffset, mslice,
  721. d, radeon_bo_size(mipmap),
  722. width, height, depth);
  723. dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
  724. __func__, __LINE__, surf.nbx, surf.nby,
  725. surf.mode, surf.bpe, surf.nsamples,
  726. surf.bankw, surf.bankh,
  727. surf.tsplit, surf.mtilea);
  728. return -EINVAL;
  729. }
  730. }
  731. return 0;
  732. }
  733. static int evergreen_cs_track_check(struct radeon_cs_parser *p)
  734. {
  735. struct evergreen_cs_track *track = p->track;
  736. unsigned tmp, i, j;
  737. int r;
  738. /* check streamout */
  739. for (i = 0; i < 4; i++) {
  740. if (track->vgt_strmout_config & (1 << i)) {
  741. for (j = 0; j < 4; j++) {
  742. if ((track->vgt_strmout_buffer_config >> (i * 4)) & (1 << j)) {
  743. if (track->vgt_strmout_bo[j]) {
  744. u64 offset = (u64)track->vgt_strmout_bo_offset[j] +
  745. (u64)track->vgt_strmout_size[j];
  746. if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
  747. DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
  748. j, offset,
  749. radeon_bo_size(track->vgt_strmout_bo[j]));
  750. return -EINVAL;
  751. }
  752. } else {
  753. dev_warn(p->dev, "No buffer for streamout %d\n", j);
  754. return -EINVAL;
  755. }
  756. }
  757. }
  758. }
  759. }
  760. /* check that we have a cb for each enabled target
  761. */
  762. tmp = track->cb_target_mask;
  763. for (i = 0; i < 8; i++) {
  764. if ((tmp >> (i * 4)) & 0xF) {
  765. /* at least one component is enabled */
  766. if (track->cb_color_bo[i] == NULL) {
  767. dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
  768. __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
  769. return -EINVAL;
  770. }
  771. /* check cb */
  772. r = evergreen_cs_track_validate_cb(p, i);
  773. if (r) {
  774. return r;
  775. }
  776. }
  777. }
  778. /* Check stencil buffer */
  779. if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
  780. r = evergreen_cs_track_validate_stencil(p);
  781. if (r)
  782. return r;
  783. }
  784. /* Check depth buffer */
  785. if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) {
  786. r = evergreen_cs_track_validate_depth(p);
  787. if (r)
  788. return r;
  789. }
  790. return 0;
  791. }
  792. /**
  793. * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
  794. * @parser: parser structure holding parsing context.
  795. * @pkt: where to store packet informations
  796. *
  797. * Assume that chunk_ib_index is properly set. Will return -EINVAL
  798. * if packet is bigger than remaining ib size. or if packets is unknown.
  799. **/
  800. int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
  801. struct radeon_cs_packet *pkt,
  802. unsigned idx)
  803. {
  804. struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
  805. uint32_t header;
  806. if (idx >= ib_chunk->length_dw) {
  807. DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
  808. idx, ib_chunk->length_dw);
  809. return -EINVAL;
  810. }
  811. header = radeon_get_ib_value(p, idx);
  812. pkt->idx = idx;
  813. pkt->type = CP_PACKET_GET_TYPE(header);
  814. pkt->count = CP_PACKET_GET_COUNT(header);
  815. pkt->one_reg_wr = 0;
  816. switch (pkt->type) {
  817. case PACKET_TYPE0:
  818. pkt->reg = CP_PACKET0_GET_REG(header);
  819. break;
  820. case PACKET_TYPE3:
  821. pkt->opcode = CP_PACKET3_GET_OPCODE(header);
  822. break;
  823. case PACKET_TYPE2:
  824. pkt->count = -1;
  825. break;
  826. default:
  827. DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
  828. return -EINVAL;
  829. }
  830. if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
  831. DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
  832. pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
  833. return -EINVAL;
  834. }
  835. return 0;
  836. }
  837. /**
  838. * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
  839. * @parser: parser structure holding parsing context.
  840. * @data: pointer to relocation data
  841. * @offset_start: starting offset
  842. * @offset_mask: offset mask (to align start offset on)
  843. * @reloc: reloc informations
  844. *
  845. * Check next packet is relocation packet3, do bo validation and compute
  846. * GPU offset using the provided start.
  847. **/
  848. static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
  849. struct radeon_cs_reloc **cs_reloc)
  850. {
  851. struct radeon_cs_chunk *relocs_chunk;
  852. struct radeon_cs_packet p3reloc;
  853. unsigned idx;
  854. int r;
  855. if (p->chunk_relocs_idx == -1) {
  856. DRM_ERROR("No relocation chunk !\n");
  857. return -EINVAL;
  858. }
  859. *cs_reloc = NULL;
  860. relocs_chunk = &p->chunks[p->chunk_relocs_idx];
  861. r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
  862. if (r) {
  863. return r;
  864. }
  865. p->idx += p3reloc.count + 2;
  866. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  867. DRM_ERROR("No packet3 for relocation for packet at %d.\n",
  868. p3reloc.idx);
  869. return -EINVAL;
  870. }
  871. idx = radeon_get_ib_value(p, p3reloc.idx + 1);
  872. if (idx >= relocs_chunk->length_dw) {
  873. DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
  874. idx, relocs_chunk->length_dw);
  875. return -EINVAL;
  876. }
  877. /* FIXME: we assume reloc size is 4 dwords */
  878. *cs_reloc = p->relocs_ptr[(idx / 4)];
  879. return 0;
  880. }
  881. /**
  882. * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
  883. * @parser: parser structure holding parsing context.
  884. *
  885. * Userspace sends a special sequence for VLINE waits.
  886. * PACKET0 - VLINE_START_END + value
  887. * PACKET3 - WAIT_REG_MEM poll vline status reg
  888. * RELOC (P3) - crtc_id in reloc.
  889. *
  890. * This function parses this and relocates the VLINE START END
  891. * and WAIT_REG_MEM packets to the correct crtc.
  892. * It also detects a switched off crtc and nulls out the
  893. * wait in that case.
  894. */
  895. static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
  896. {
  897. struct drm_mode_object *obj;
  898. struct drm_crtc *crtc;
  899. struct radeon_crtc *radeon_crtc;
  900. struct radeon_cs_packet p3reloc, wait_reg_mem;
  901. int crtc_id;
  902. int r;
  903. uint32_t header, h_idx, reg, wait_reg_mem_info;
  904. volatile uint32_t *ib;
  905. ib = p->ib->ptr;
  906. /* parse the WAIT_REG_MEM */
  907. r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
  908. if (r)
  909. return r;
  910. /* check its a WAIT_REG_MEM */
  911. if (wait_reg_mem.type != PACKET_TYPE3 ||
  912. wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
  913. DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
  914. return -EINVAL;
  915. }
  916. wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
  917. /* bit 4 is reg (0) or mem (1) */
  918. if (wait_reg_mem_info & 0x10) {
  919. DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
  920. return -EINVAL;
  921. }
  922. /* waiting for value to be equal */
  923. if ((wait_reg_mem_info & 0x7) != 0x3) {
  924. DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
  925. return -EINVAL;
  926. }
  927. if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
  928. DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
  929. return -EINVAL;
  930. }
  931. if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
  932. DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
  933. return -EINVAL;
  934. }
  935. /* jump over the NOP */
  936. r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
  937. if (r)
  938. return r;
  939. h_idx = p->idx - 2;
  940. p->idx += wait_reg_mem.count + 2;
  941. p->idx += p3reloc.count + 2;
  942. header = radeon_get_ib_value(p, h_idx);
  943. crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
  944. reg = CP_PACKET0_GET_REG(header);
  945. obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
  946. if (!obj) {
  947. DRM_ERROR("cannot find crtc %d\n", crtc_id);
  948. return -EINVAL;
  949. }
  950. crtc = obj_to_crtc(obj);
  951. radeon_crtc = to_radeon_crtc(crtc);
  952. crtc_id = radeon_crtc->crtc_id;
  953. if (!crtc->enabled) {
  954. /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
  955. ib[h_idx + 2] = PACKET2(0);
  956. ib[h_idx + 3] = PACKET2(0);
  957. ib[h_idx + 4] = PACKET2(0);
  958. ib[h_idx + 5] = PACKET2(0);
  959. ib[h_idx + 6] = PACKET2(0);
  960. ib[h_idx + 7] = PACKET2(0);
  961. ib[h_idx + 8] = PACKET2(0);
  962. } else {
  963. switch (reg) {
  964. case EVERGREEN_VLINE_START_END:
  965. header &= ~R600_CP_PACKET0_REG_MASK;
  966. header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
  967. ib[h_idx] = header;
  968. ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
  969. break;
  970. default:
  971. DRM_ERROR("unknown crtc reloc\n");
  972. return -EINVAL;
  973. }
  974. }
  975. return 0;
  976. }
  977. static int evergreen_packet0_check(struct radeon_cs_parser *p,
  978. struct radeon_cs_packet *pkt,
  979. unsigned idx, unsigned reg)
  980. {
  981. int r;
  982. switch (reg) {
  983. case EVERGREEN_VLINE_START_END:
  984. r = evergreen_cs_packet_parse_vline(p);
  985. if (r) {
  986. DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
  987. idx, reg);
  988. return r;
  989. }
  990. break;
  991. default:
  992. printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
  993. reg, idx);
  994. return -EINVAL;
  995. }
  996. return 0;
  997. }
  998. static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
  999. struct radeon_cs_packet *pkt)
  1000. {
  1001. unsigned reg, i;
  1002. unsigned idx;
  1003. int r;
  1004. idx = pkt->idx + 1;
  1005. reg = pkt->reg;
  1006. for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
  1007. r = evergreen_packet0_check(p, pkt, idx, reg);
  1008. if (r) {
  1009. return r;
  1010. }
  1011. }
  1012. return 0;
  1013. }
  1014. /**
  1015. * evergreen_cs_check_reg() - check if register is authorized or not
  1016. * @parser: parser structure holding parsing context
  1017. * @reg: register we are testing
  1018. * @idx: index into the cs buffer
  1019. *
  1020. * This function will test against evergreen_reg_safe_bm and return 0
  1021. * if register is safe. If register is not flag as safe this function
  1022. * will test it against a list of register needind special handling.
  1023. */
  1024. static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  1025. {
  1026. struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
  1027. struct radeon_cs_reloc *reloc;
  1028. u32 last_reg;
  1029. u32 m, i, tmp, *ib;
  1030. int r;
  1031. if (p->rdev->family >= CHIP_CAYMAN)
  1032. last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
  1033. else
  1034. last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
  1035. i = (reg >> 7);
  1036. if (i >= last_reg) {
  1037. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1038. return -EINVAL;
  1039. }
  1040. m = 1 << ((reg >> 2) & 31);
  1041. if (p->rdev->family >= CHIP_CAYMAN) {
  1042. if (!(cayman_reg_safe_bm[i] & m))
  1043. return 0;
  1044. } else {
  1045. if (!(evergreen_reg_safe_bm[i] & m))
  1046. return 0;
  1047. }
  1048. ib = p->ib->ptr;
  1049. switch (reg) {
  1050. /* force following reg to 0 in an attempt to disable out buffer
  1051. * which will need us to better understand how it works to perform
  1052. * security check on it (Jerome)
  1053. */
  1054. case SQ_ESGS_RING_SIZE:
  1055. case SQ_GSVS_RING_SIZE:
  1056. case SQ_ESTMP_RING_SIZE:
  1057. case SQ_GSTMP_RING_SIZE:
  1058. case SQ_HSTMP_RING_SIZE:
  1059. case SQ_LSTMP_RING_SIZE:
  1060. case SQ_PSTMP_RING_SIZE:
  1061. case SQ_VSTMP_RING_SIZE:
  1062. case SQ_ESGS_RING_ITEMSIZE:
  1063. case SQ_ESTMP_RING_ITEMSIZE:
  1064. case SQ_GSTMP_RING_ITEMSIZE:
  1065. case SQ_GSVS_RING_ITEMSIZE:
  1066. case SQ_GS_VERT_ITEMSIZE:
  1067. case SQ_GS_VERT_ITEMSIZE_1:
  1068. case SQ_GS_VERT_ITEMSIZE_2:
  1069. case SQ_GS_VERT_ITEMSIZE_3:
  1070. case SQ_GSVS_RING_OFFSET_1:
  1071. case SQ_GSVS_RING_OFFSET_2:
  1072. case SQ_GSVS_RING_OFFSET_3:
  1073. case SQ_HSTMP_RING_ITEMSIZE:
  1074. case SQ_LSTMP_RING_ITEMSIZE:
  1075. case SQ_PSTMP_RING_ITEMSIZE:
  1076. case SQ_VSTMP_RING_ITEMSIZE:
  1077. case VGT_TF_RING_SIZE:
  1078. /* get value to populate the IB don't remove */
  1079. /*tmp =radeon_get_ib_value(p, idx);
  1080. ib[idx] = 0;*/
  1081. break;
  1082. case SQ_ESGS_RING_BASE:
  1083. case SQ_GSVS_RING_BASE:
  1084. case SQ_ESTMP_RING_BASE:
  1085. case SQ_GSTMP_RING_BASE:
  1086. case SQ_HSTMP_RING_BASE:
  1087. case SQ_LSTMP_RING_BASE:
  1088. case SQ_PSTMP_RING_BASE:
  1089. case SQ_VSTMP_RING_BASE:
  1090. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1091. if (r) {
  1092. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1093. "0x%04X\n", reg);
  1094. return -EINVAL;
  1095. }
  1096. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1097. break;
  1098. case DB_DEPTH_CONTROL:
  1099. track->db_depth_control = radeon_get_ib_value(p, idx);
  1100. break;
  1101. case CAYMAN_DB_EQAA:
  1102. if (p->rdev->family < CHIP_CAYMAN) {
  1103. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1104. "0x%04X\n", reg);
  1105. return -EINVAL;
  1106. }
  1107. break;
  1108. case CAYMAN_DB_DEPTH_INFO:
  1109. if (p->rdev->family < CHIP_CAYMAN) {
  1110. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1111. "0x%04X\n", reg);
  1112. return -EINVAL;
  1113. }
  1114. break;
  1115. case DB_Z_INFO:
  1116. track->db_z_info = radeon_get_ib_value(p, idx);
  1117. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1118. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1119. if (r) {
  1120. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1121. "0x%04X\n", reg);
  1122. return -EINVAL;
  1123. }
  1124. ib[idx] &= ~Z_ARRAY_MODE(0xf);
  1125. track->db_z_info &= ~Z_ARRAY_MODE(0xf);
  1126. ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  1127. track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  1128. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  1129. unsigned bankw, bankh, mtaspect, tile_split;
  1130. evergreen_tiling_fields(reloc->lobj.tiling_flags,
  1131. &bankw, &bankh, &mtaspect,
  1132. &tile_split);
  1133. ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  1134. ib[idx] |= DB_TILE_SPLIT(tile_split) |
  1135. DB_BANK_WIDTH(bankw) |
  1136. DB_BANK_HEIGHT(bankh) |
  1137. DB_MACRO_TILE_ASPECT(mtaspect);
  1138. }
  1139. }
  1140. break;
  1141. case DB_STENCIL_INFO:
  1142. track->db_s_info = radeon_get_ib_value(p, idx);
  1143. break;
  1144. case DB_DEPTH_VIEW:
  1145. track->db_depth_view = radeon_get_ib_value(p, idx);
  1146. break;
  1147. case DB_DEPTH_SIZE:
  1148. track->db_depth_size = radeon_get_ib_value(p, idx);
  1149. track->db_depth_size_idx = idx;
  1150. break;
  1151. case R_02805C_DB_DEPTH_SLICE:
  1152. track->db_depth_slice = radeon_get_ib_value(p, idx);
  1153. break;
  1154. case DB_Z_READ_BASE:
  1155. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1156. if (r) {
  1157. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1158. "0x%04X\n", reg);
  1159. return -EINVAL;
  1160. }
  1161. track->db_z_read_offset = radeon_get_ib_value(p, idx);
  1162. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1163. track->db_z_read_bo = reloc->robj;
  1164. break;
  1165. case DB_Z_WRITE_BASE:
  1166. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1167. if (r) {
  1168. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1169. "0x%04X\n", reg);
  1170. return -EINVAL;
  1171. }
  1172. track->db_z_write_offset = radeon_get_ib_value(p, idx);
  1173. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1174. track->db_z_write_bo = reloc->robj;
  1175. break;
  1176. case DB_STENCIL_READ_BASE:
  1177. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1178. if (r) {
  1179. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1180. "0x%04X\n", reg);
  1181. return -EINVAL;
  1182. }
  1183. track->db_s_read_offset = radeon_get_ib_value(p, idx);
  1184. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1185. track->db_s_read_bo = reloc->robj;
  1186. break;
  1187. case DB_STENCIL_WRITE_BASE:
  1188. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1189. if (r) {
  1190. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1191. "0x%04X\n", reg);
  1192. return -EINVAL;
  1193. }
  1194. track->db_s_write_offset = radeon_get_ib_value(p, idx);
  1195. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1196. track->db_s_write_bo = reloc->robj;
  1197. break;
  1198. case VGT_STRMOUT_CONFIG:
  1199. track->vgt_strmout_config = radeon_get_ib_value(p, idx);
  1200. break;
  1201. case VGT_STRMOUT_BUFFER_CONFIG:
  1202. track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
  1203. break;
  1204. case VGT_STRMOUT_BUFFER_BASE_0:
  1205. case VGT_STRMOUT_BUFFER_BASE_1:
  1206. case VGT_STRMOUT_BUFFER_BASE_2:
  1207. case VGT_STRMOUT_BUFFER_BASE_3:
  1208. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1209. if (r) {
  1210. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1211. "0x%04X\n", reg);
  1212. return -EINVAL;
  1213. }
  1214. tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
  1215. track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
  1216. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1217. track->vgt_strmout_bo[tmp] = reloc->robj;
  1218. track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
  1219. break;
  1220. case VGT_STRMOUT_BUFFER_SIZE_0:
  1221. case VGT_STRMOUT_BUFFER_SIZE_1:
  1222. case VGT_STRMOUT_BUFFER_SIZE_2:
  1223. case VGT_STRMOUT_BUFFER_SIZE_3:
  1224. tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
  1225. /* size in register is DWs, convert to bytes */
  1226. track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
  1227. break;
  1228. case CP_COHER_BASE:
  1229. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1230. if (r) {
  1231. dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
  1232. "0x%04X\n", reg);
  1233. return -EINVAL;
  1234. }
  1235. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1236. case CB_TARGET_MASK:
  1237. track->cb_target_mask = radeon_get_ib_value(p, idx);
  1238. break;
  1239. case CB_SHADER_MASK:
  1240. track->cb_shader_mask = radeon_get_ib_value(p, idx);
  1241. break;
  1242. case PA_SC_AA_CONFIG:
  1243. if (p->rdev->family >= CHIP_CAYMAN) {
  1244. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1245. "0x%04X\n", reg);
  1246. return -EINVAL;
  1247. }
  1248. tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
  1249. track->nsamples = 1 << tmp;
  1250. break;
  1251. case CAYMAN_PA_SC_AA_CONFIG:
  1252. if (p->rdev->family < CHIP_CAYMAN) {
  1253. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1254. "0x%04X\n", reg);
  1255. return -EINVAL;
  1256. }
  1257. tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
  1258. track->nsamples = 1 << tmp;
  1259. break;
  1260. case CB_COLOR0_VIEW:
  1261. case CB_COLOR1_VIEW:
  1262. case CB_COLOR2_VIEW:
  1263. case CB_COLOR3_VIEW:
  1264. case CB_COLOR4_VIEW:
  1265. case CB_COLOR5_VIEW:
  1266. case CB_COLOR6_VIEW:
  1267. case CB_COLOR7_VIEW:
  1268. tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
  1269. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  1270. break;
  1271. case CB_COLOR8_VIEW:
  1272. case CB_COLOR9_VIEW:
  1273. case CB_COLOR10_VIEW:
  1274. case CB_COLOR11_VIEW:
  1275. tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
  1276. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  1277. break;
  1278. case CB_COLOR0_INFO:
  1279. case CB_COLOR1_INFO:
  1280. case CB_COLOR2_INFO:
  1281. case CB_COLOR3_INFO:
  1282. case CB_COLOR4_INFO:
  1283. case CB_COLOR5_INFO:
  1284. case CB_COLOR6_INFO:
  1285. case CB_COLOR7_INFO:
  1286. tmp = (reg - CB_COLOR0_INFO) / 0x3c;
  1287. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1288. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1289. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1290. if (r) {
  1291. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1292. "0x%04X\n", reg);
  1293. return -EINVAL;
  1294. }
  1295. ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  1296. track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  1297. }
  1298. break;
  1299. case CB_COLOR8_INFO:
  1300. case CB_COLOR9_INFO:
  1301. case CB_COLOR10_INFO:
  1302. case CB_COLOR11_INFO:
  1303. tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
  1304. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1305. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1306. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1307. if (r) {
  1308. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1309. "0x%04X\n", reg);
  1310. return -EINVAL;
  1311. }
  1312. ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  1313. track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  1314. }
  1315. break;
  1316. case CB_COLOR0_PITCH:
  1317. case CB_COLOR1_PITCH:
  1318. case CB_COLOR2_PITCH:
  1319. case CB_COLOR3_PITCH:
  1320. case CB_COLOR4_PITCH:
  1321. case CB_COLOR5_PITCH:
  1322. case CB_COLOR6_PITCH:
  1323. case CB_COLOR7_PITCH:
  1324. tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
  1325. track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
  1326. track->cb_color_pitch_idx[tmp] = idx;
  1327. break;
  1328. case CB_COLOR8_PITCH:
  1329. case CB_COLOR9_PITCH:
  1330. case CB_COLOR10_PITCH:
  1331. case CB_COLOR11_PITCH:
  1332. tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
  1333. track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
  1334. track->cb_color_pitch_idx[tmp] = idx;
  1335. break;
  1336. case CB_COLOR0_SLICE:
  1337. case CB_COLOR1_SLICE:
  1338. case CB_COLOR2_SLICE:
  1339. case CB_COLOR3_SLICE:
  1340. case CB_COLOR4_SLICE:
  1341. case CB_COLOR5_SLICE:
  1342. case CB_COLOR6_SLICE:
  1343. case CB_COLOR7_SLICE:
  1344. tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
  1345. track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
  1346. track->cb_color_slice_idx[tmp] = idx;
  1347. break;
  1348. case CB_COLOR8_SLICE:
  1349. case CB_COLOR9_SLICE:
  1350. case CB_COLOR10_SLICE:
  1351. case CB_COLOR11_SLICE:
  1352. tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
  1353. track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
  1354. track->cb_color_slice_idx[tmp] = idx;
  1355. break;
  1356. case CB_COLOR0_ATTRIB:
  1357. case CB_COLOR1_ATTRIB:
  1358. case CB_COLOR2_ATTRIB:
  1359. case CB_COLOR3_ATTRIB:
  1360. case CB_COLOR4_ATTRIB:
  1361. case CB_COLOR5_ATTRIB:
  1362. case CB_COLOR6_ATTRIB:
  1363. case CB_COLOR7_ATTRIB:
  1364. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1365. if (r) {
  1366. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1367. "0x%04X\n", reg);
  1368. return -EINVAL;
  1369. }
  1370. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1371. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  1372. unsigned bankw, bankh, mtaspect, tile_split;
  1373. evergreen_tiling_fields(reloc->lobj.tiling_flags,
  1374. &bankw, &bankh, &mtaspect,
  1375. &tile_split);
  1376. ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  1377. ib[idx] |= CB_TILE_SPLIT(tile_split) |
  1378. CB_BANK_WIDTH(bankw) |
  1379. CB_BANK_HEIGHT(bankh) |
  1380. CB_MACRO_TILE_ASPECT(mtaspect);
  1381. }
  1382. }
  1383. tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
  1384. track->cb_color_attrib[tmp] = ib[idx];
  1385. break;
  1386. case CB_COLOR8_ATTRIB:
  1387. case CB_COLOR9_ATTRIB:
  1388. case CB_COLOR10_ATTRIB:
  1389. case CB_COLOR11_ATTRIB:
  1390. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1391. if (r) {
  1392. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1393. "0x%04X\n", reg);
  1394. return -EINVAL;
  1395. }
  1396. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1397. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  1398. unsigned bankw, bankh, mtaspect, tile_split;
  1399. evergreen_tiling_fields(reloc->lobj.tiling_flags,
  1400. &bankw, &bankh, &mtaspect,
  1401. &tile_split);
  1402. ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  1403. ib[idx] |= CB_TILE_SPLIT(tile_split) |
  1404. CB_BANK_WIDTH(bankw) |
  1405. CB_BANK_HEIGHT(bankh) |
  1406. CB_MACRO_TILE_ASPECT(mtaspect);
  1407. }
  1408. }
  1409. tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
  1410. track->cb_color_attrib[tmp] = ib[idx];
  1411. break;
  1412. case CB_COLOR0_DIM:
  1413. case CB_COLOR1_DIM:
  1414. case CB_COLOR2_DIM:
  1415. case CB_COLOR3_DIM:
  1416. case CB_COLOR4_DIM:
  1417. case CB_COLOR5_DIM:
  1418. case CB_COLOR6_DIM:
  1419. case CB_COLOR7_DIM:
  1420. tmp = (reg - CB_COLOR0_DIM) / 0x3c;
  1421. track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
  1422. track->cb_color_dim_idx[tmp] = idx;
  1423. break;
  1424. case CB_COLOR8_DIM:
  1425. case CB_COLOR9_DIM:
  1426. case CB_COLOR10_DIM:
  1427. case CB_COLOR11_DIM:
  1428. tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
  1429. track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
  1430. track->cb_color_dim_idx[tmp] = idx;
  1431. break;
  1432. case CB_COLOR0_FMASK:
  1433. case CB_COLOR1_FMASK:
  1434. case CB_COLOR2_FMASK:
  1435. case CB_COLOR3_FMASK:
  1436. case CB_COLOR4_FMASK:
  1437. case CB_COLOR5_FMASK:
  1438. case CB_COLOR6_FMASK:
  1439. case CB_COLOR7_FMASK:
  1440. tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
  1441. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1442. if (r) {
  1443. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1444. return -EINVAL;
  1445. }
  1446. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1447. track->cb_color_fmask_bo[tmp] = reloc->robj;
  1448. break;
  1449. case CB_COLOR0_CMASK:
  1450. case CB_COLOR1_CMASK:
  1451. case CB_COLOR2_CMASK:
  1452. case CB_COLOR3_CMASK:
  1453. case CB_COLOR4_CMASK:
  1454. case CB_COLOR5_CMASK:
  1455. case CB_COLOR6_CMASK:
  1456. case CB_COLOR7_CMASK:
  1457. tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
  1458. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1459. if (r) {
  1460. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1461. return -EINVAL;
  1462. }
  1463. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1464. track->cb_color_cmask_bo[tmp] = reloc->robj;
  1465. break;
  1466. case CB_COLOR0_FMASK_SLICE:
  1467. case CB_COLOR1_FMASK_SLICE:
  1468. case CB_COLOR2_FMASK_SLICE:
  1469. case CB_COLOR3_FMASK_SLICE:
  1470. case CB_COLOR4_FMASK_SLICE:
  1471. case CB_COLOR5_FMASK_SLICE:
  1472. case CB_COLOR6_FMASK_SLICE:
  1473. case CB_COLOR7_FMASK_SLICE:
  1474. tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
  1475. track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
  1476. break;
  1477. case CB_COLOR0_CMASK_SLICE:
  1478. case CB_COLOR1_CMASK_SLICE:
  1479. case CB_COLOR2_CMASK_SLICE:
  1480. case CB_COLOR3_CMASK_SLICE:
  1481. case CB_COLOR4_CMASK_SLICE:
  1482. case CB_COLOR5_CMASK_SLICE:
  1483. case CB_COLOR6_CMASK_SLICE:
  1484. case CB_COLOR7_CMASK_SLICE:
  1485. tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
  1486. track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
  1487. break;
  1488. case CB_COLOR0_BASE:
  1489. case CB_COLOR1_BASE:
  1490. case CB_COLOR2_BASE:
  1491. case CB_COLOR3_BASE:
  1492. case CB_COLOR4_BASE:
  1493. case CB_COLOR5_BASE:
  1494. case CB_COLOR6_BASE:
  1495. case CB_COLOR7_BASE:
  1496. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1497. if (r) {
  1498. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1499. "0x%04X\n", reg);
  1500. return -EINVAL;
  1501. }
  1502. tmp = (reg - CB_COLOR0_BASE) / 0x3c;
  1503. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
  1504. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1505. track->cb_color_base_last[tmp] = ib[idx];
  1506. track->cb_color_bo[tmp] = reloc->robj;
  1507. break;
  1508. case CB_COLOR8_BASE:
  1509. case CB_COLOR9_BASE:
  1510. case CB_COLOR10_BASE:
  1511. case CB_COLOR11_BASE:
  1512. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1513. if (r) {
  1514. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1515. "0x%04X\n", reg);
  1516. return -EINVAL;
  1517. }
  1518. tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
  1519. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
  1520. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1521. track->cb_color_base_last[tmp] = ib[idx];
  1522. track->cb_color_bo[tmp] = reloc->robj;
  1523. break;
  1524. case CB_IMMED0_BASE:
  1525. case CB_IMMED1_BASE:
  1526. case CB_IMMED2_BASE:
  1527. case CB_IMMED3_BASE:
  1528. case CB_IMMED4_BASE:
  1529. case CB_IMMED5_BASE:
  1530. case CB_IMMED6_BASE:
  1531. case CB_IMMED7_BASE:
  1532. case CB_IMMED8_BASE:
  1533. case CB_IMMED9_BASE:
  1534. case CB_IMMED10_BASE:
  1535. case CB_IMMED11_BASE:
  1536. case DB_HTILE_DATA_BASE:
  1537. case SQ_PGM_START_FS:
  1538. case SQ_PGM_START_ES:
  1539. case SQ_PGM_START_VS:
  1540. case SQ_PGM_START_GS:
  1541. case SQ_PGM_START_PS:
  1542. case SQ_PGM_START_HS:
  1543. case SQ_PGM_START_LS:
  1544. case SQ_CONST_MEM_BASE:
  1545. case SQ_ALU_CONST_CACHE_GS_0:
  1546. case SQ_ALU_CONST_CACHE_GS_1:
  1547. case SQ_ALU_CONST_CACHE_GS_2:
  1548. case SQ_ALU_CONST_CACHE_GS_3:
  1549. case SQ_ALU_CONST_CACHE_GS_4:
  1550. case SQ_ALU_CONST_CACHE_GS_5:
  1551. case SQ_ALU_CONST_CACHE_GS_6:
  1552. case SQ_ALU_CONST_CACHE_GS_7:
  1553. case SQ_ALU_CONST_CACHE_GS_8:
  1554. case SQ_ALU_CONST_CACHE_GS_9:
  1555. case SQ_ALU_CONST_CACHE_GS_10:
  1556. case SQ_ALU_CONST_CACHE_GS_11:
  1557. case SQ_ALU_CONST_CACHE_GS_12:
  1558. case SQ_ALU_CONST_CACHE_GS_13:
  1559. case SQ_ALU_CONST_CACHE_GS_14:
  1560. case SQ_ALU_CONST_CACHE_GS_15:
  1561. case SQ_ALU_CONST_CACHE_PS_0:
  1562. case SQ_ALU_CONST_CACHE_PS_1:
  1563. case SQ_ALU_CONST_CACHE_PS_2:
  1564. case SQ_ALU_CONST_CACHE_PS_3:
  1565. case SQ_ALU_CONST_CACHE_PS_4:
  1566. case SQ_ALU_CONST_CACHE_PS_5:
  1567. case SQ_ALU_CONST_CACHE_PS_6:
  1568. case SQ_ALU_CONST_CACHE_PS_7:
  1569. case SQ_ALU_CONST_CACHE_PS_8:
  1570. case SQ_ALU_CONST_CACHE_PS_9:
  1571. case SQ_ALU_CONST_CACHE_PS_10:
  1572. case SQ_ALU_CONST_CACHE_PS_11:
  1573. case SQ_ALU_CONST_CACHE_PS_12:
  1574. case SQ_ALU_CONST_CACHE_PS_13:
  1575. case SQ_ALU_CONST_CACHE_PS_14:
  1576. case SQ_ALU_CONST_CACHE_PS_15:
  1577. case SQ_ALU_CONST_CACHE_VS_0:
  1578. case SQ_ALU_CONST_CACHE_VS_1:
  1579. case SQ_ALU_CONST_CACHE_VS_2:
  1580. case SQ_ALU_CONST_CACHE_VS_3:
  1581. case SQ_ALU_CONST_CACHE_VS_4:
  1582. case SQ_ALU_CONST_CACHE_VS_5:
  1583. case SQ_ALU_CONST_CACHE_VS_6:
  1584. case SQ_ALU_CONST_CACHE_VS_7:
  1585. case SQ_ALU_CONST_CACHE_VS_8:
  1586. case SQ_ALU_CONST_CACHE_VS_9:
  1587. case SQ_ALU_CONST_CACHE_VS_10:
  1588. case SQ_ALU_CONST_CACHE_VS_11:
  1589. case SQ_ALU_CONST_CACHE_VS_12:
  1590. case SQ_ALU_CONST_CACHE_VS_13:
  1591. case SQ_ALU_CONST_CACHE_VS_14:
  1592. case SQ_ALU_CONST_CACHE_VS_15:
  1593. case SQ_ALU_CONST_CACHE_HS_0:
  1594. case SQ_ALU_CONST_CACHE_HS_1:
  1595. case SQ_ALU_CONST_CACHE_HS_2:
  1596. case SQ_ALU_CONST_CACHE_HS_3:
  1597. case SQ_ALU_CONST_CACHE_HS_4:
  1598. case SQ_ALU_CONST_CACHE_HS_5:
  1599. case SQ_ALU_CONST_CACHE_HS_6:
  1600. case SQ_ALU_CONST_CACHE_HS_7:
  1601. case SQ_ALU_CONST_CACHE_HS_8:
  1602. case SQ_ALU_CONST_CACHE_HS_9:
  1603. case SQ_ALU_CONST_CACHE_HS_10:
  1604. case SQ_ALU_CONST_CACHE_HS_11:
  1605. case SQ_ALU_CONST_CACHE_HS_12:
  1606. case SQ_ALU_CONST_CACHE_HS_13:
  1607. case SQ_ALU_CONST_CACHE_HS_14:
  1608. case SQ_ALU_CONST_CACHE_HS_15:
  1609. case SQ_ALU_CONST_CACHE_LS_0:
  1610. case SQ_ALU_CONST_CACHE_LS_1:
  1611. case SQ_ALU_CONST_CACHE_LS_2:
  1612. case SQ_ALU_CONST_CACHE_LS_3:
  1613. case SQ_ALU_CONST_CACHE_LS_4:
  1614. case SQ_ALU_CONST_CACHE_LS_5:
  1615. case SQ_ALU_CONST_CACHE_LS_6:
  1616. case SQ_ALU_CONST_CACHE_LS_7:
  1617. case SQ_ALU_CONST_CACHE_LS_8:
  1618. case SQ_ALU_CONST_CACHE_LS_9:
  1619. case SQ_ALU_CONST_CACHE_LS_10:
  1620. case SQ_ALU_CONST_CACHE_LS_11:
  1621. case SQ_ALU_CONST_CACHE_LS_12:
  1622. case SQ_ALU_CONST_CACHE_LS_13:
  1623. case SQ_ALU_CONST_CACHE_LS_14:
  1624. case SQ_ALU_CONST_CACHE_LS_15:
  1625. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1626. if (r) {
  1627. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1628. "0x%04X\n", reg);
  1629. return -EINVAL;
  1630. }
  1631. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1632. break;
  1633. case SX_MEMORY_EXPORT_BASE:
  1634. if (p->rdev->family >= CHIP_CAYMAN) {
  1635. dev_warn(p->dev, "bad SET_CONFIG_REG "
  1636. "0x%04X\n", reg);
  1637. return -EINVAL;
  1638. }
  1639. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1640. if (r) {
  1641. dev_warn(p->dev, "bad SET_CONFIG_REG "
  1642. "0x%04X\n", reg);
  1643. return -EINVAL;
  1644. }
  1645. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1646. break;
  1647. case CAYMAN_SX_SCATTER_EXPORT_BASE:
  1648. if (p->rdev->family < CHIP_CAYMAN) {
  1649. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1650. "0x%04X\n", reg);
  1651. return -EINVAL;
  1652. }
  1653. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1654. if (r) {
  1655. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1656. "0x%04X\n", reg);
  1657. return -EINVAL;
  1658. }
  1659. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1660. break;
  1661. default:
  1662. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1663. return -EINVAL;
  1664. }
  1665. return 0;
  1666. }
  1667. static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  1668. {
  1669. u32 last_reg, m, i;
  1670. if (p->rdev->family >= CHIP_CAYMAN)
  1671. last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
  1672. else
  1673. last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
  1674. i = (reg >> 7);
  1675. if (i >= last_reg) {
  1676. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1677. return false;
  1678. }
  1679. m = 1 << ((reg >> 2) & 31);
  1680. if (p->rdev->family >= CHIP_CAYMAN) {
  1681. if (!(cayman_reg_safe_bm[i] & m))
  1682. return true;
  1683. } else {
  1684. if (!(evergreen_reg_safe_bm[i] & m))
  1685. return true;
  1686. }
  1687. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1688. return false;
  1689. }
  1690. static int evergreen_packet3_check(struct radeon_cs_parser *p,
  1691. struct radeon_cs_packet *pkt)
  1692. {
  1693. struct radeon_cs_reloc *reloc;
  1694. struct evergreen_cs_track *track;
  1695. volatile u32 *ib;
  1696. unsigned idx;
  1697. unsigned i;
  1698. unsigned start_reg, end_reg, reg;
  1699. int r;
  1700. u32 idx_value;
  1701. track = (struct evergreen_cs_track *)p->track;
  1702. ib = p->ib->ptr;
  1703. idx = pkt->idx + 1;
  1704. idx_value = radeon_get_ib_value(p, idx);
  1705. switch (pkt->opcode) {
  1706. case PACKET3_SET_PREDICATION:
  1707. {
  1708. int pred_op;
  1709. int tmp;
  1710. if (pkt->count != 1) {
  1711. DRM_ERROR("bad SET PREDICATION\n");
  1712. return -EINVAL;
  1713. }
  1714. tmp = radeon_get_ib_value(p, idx + 1);
  1715. pred_op = (tmp >> 16) & 0x7;
  1716. /* for the clear predicate operation */
  1717. if (pred_op == 0)
  1718. return 0;
  1719. if (pred_op > 2) {
  1720. DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
  1721. return -EINVAL;
  1722. }
  1723. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1724. if (r) {
  1725. DRM_ERROR("bad SET PREDICATION\n");
  1726. return -EINVAL;
  1727. }
  1728. ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1729. ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
  1730. }
  1731. break;
  1732. case PACKET3_CONTEXT_CONTROL:
  1733. if (pkt->count != 1) {
  1734. DRM_ERROR("bad CONTEXT_CONTROL\n");
  1735. return -EINVAL;
  1736. }
  1737. break;
  1738. case PACKET3_INDEX_TYPE:
  1739. case PACKET3_NUM_INSTANCES:
  1740. case PACKET3_CLEAR_STATE:
  1741. if (pkt->count) {
  1742. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
  1743. return -EINVAL;
  1744. }
  1745. break;
  1746. case CAYMAN_PACKET3_DEALLOC_STATE:
  1747. if (p->rdev->family < CHIP_CAYMAN) {
  1748. DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
  1749. return -EINVAL;
  1750. }
  1751. if (pkt->count) {
  1752. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
  1753. return -EINVAL;
  1754. }
  1755. break;
  1756. case PACKET3_INDEX_BASE:
  1757. if (pkt->count != 1) {
  1758. DRM_ERROR("bad INDEX_BASE\n");
  1759. return -EINVAL;
  1760. }
  1761. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1762. if (r) {
  1763. DRM_ERROR("bad INDEX_BASE\n");
  1764. return -EINVAL;
  1765. }
  1766. ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1767. ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1768. r = evergreen_cs_track_check(p);
  1769. if (r) {
  1770. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1771. return r;
  1772. }
  1773. break;
  1774. case PACKET3_DRAW_INDEX:
  1775. if (pkt->count != 3) {
  1776. DRM_ERROR("bad DRAW_INDEX\n");
  1777. return -EINVAL;
  1778. }
  1779. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1780. if (r) {
  1781. DRM_ERROR("bad DRAW_INDEX\n");
  1782. return -EINVAL;
  1783. }
  1784. ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1785. ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1786. r = evergreen_cs_track_check(p);
  1787. if (r) {
  1788. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1789. return r;
  1790. }
  1791. break;
  1792. case PACKET3_DRAW_INDEX_2:
  1793. if (pkt->count != 4) {
  1794. DRM_ERROR("bad DRAW_INDEX_2\n");
  1795. return -EINVAL;
  1796. }
  1797. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1798. if (r) {
  1799. DRM_ERROR("bad DRAW_INDEX_2\n");
  1800. return -EINVAL;
  1801. }
  1802. ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1803. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1804. r = evergreen_cs_track_check(p);
  1805. if (r) {
  1806. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1807. return r;
  1808. }
  1809. break;
  1810. case PACKET3_DRAW_INDEX_AUTO:
  1811. if (pkt->count != 1) {
  1812. DRM_ERROR("bad DRAW_INDEX_AUTO\n");
  1813. return -EINVAL;
  1814. }
  1815. r = evergreen_cs_track_check(p);
  1816. if (r) {
  1817. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1818. return r;
  1819. }
  1820. break;
  1821. case PACKET3_DRAW_INDEX_MULTI_AUTO:
  1822. if (pkt->count != 2) {
  1823. DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
  1824. return -EINVAL;
  1825. }
  1826. r = evergreen_cs_track_check(p);
  1827. if (r) {
  1828. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1829. return r;
  1830. }
  1831. break;
  1832. case PACKET3_DRAW_INDEX_IMMD:
  1833. if (pkt->count < 2) {
  1834. DRM_ERROR("bad DRAW_INDEX_IMMD\n");
  1835. return -EINVAL;
  1836. }
  1837. r = evergreen_cs_track_check(p);
  1838. if (r) {
  1839. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1840. return r;
  1841. }
  1842. break;
  1843. case PACKET3_DRAW_INDEX_OFFSET:
  1844. if (pkt->count != 2) {
  1845. DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
  1846. return -EINVAL;
  1847. }
  1848. r = evergreen_cs_track_check(p);
  1849. if (r) {
  1850. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1851. return r;
  1852. }
  1853. break;
  1854. case PACKET3_DRAW_INDEX_OFFSET_2:
  1855. if (pkt->count != 3) {
  1856. DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
  1857. return -EINVAL;
  1858. }
  1859. r = evergreen_cs_track_check(p);
  1860. if (r) {
  1861. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1862. return r;
  1863. }
  1864. break;
  1865. case PACKET3_DISPATCH_DIRECT:
  1866. if (pkt->count != 3) {
  1867. DRM_ERROR("bad DISPATCH_DIRECT\n");
  1868. return -EINVAL;
  1869. }
  1870. r = evergreen_cs_track_check(p);
  1871. if (r) {
  1872. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1873. return r;
  1874. }
  1875. break;
  1876. case PACKET3_DISPATCH_INDIRECT:
  1877. if (pkt->count != 1) {
  1878. DRM_ERROR("bad DISPATCH_INDIRECT\n");
  1879. return -EINVAL;
  1880. }
  1881. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1882. if (r) {
  1883. DRM_ERROR("bad DISPATCH_INDIRECT\n");
  1884. return -EINVAL;
  1885. }
  1886. ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1887. r = evergreen_cs_track_check(p);
  1888. if (r) {
  1889. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1890. return r;
  1891. }
  1892. break;
  1893. case PACKET3_WAIT_REG_MEM:
  1894. if (pkt->count != 5) {
  1895. DRM_ERROR("bad WAIT_REG_MEM\n");
  1896. return -EINVAL;
  1897. }
  1898. /* bit 4 is reg (0) or mem (1) */
  1899. if (idx_value & 0x10) {
  1900. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1901. if (r) {
  1902. DRM_ERROR("bad WAIT_REG_MEM\n");
  1903. return -EINVAL;
  1904. }
  1905. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1906. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1907. }
  1908. break;
  1909. case PACKET3_SURFACE_SYNC:
  1910. if (pkt->count != 3) {
  1911. DRM_ERROR("bad SURFACE_SYNC\n");
  1912. return -EINVAL;
  1913. }
  1914. /* 0xffffffff/0x0 is flush all cache flag */
  1915. if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
  1916. radeon_get_ib_value(p, idx + 2) != 0) {
  1917. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1918. if (r) {
  1919. DRM_ERROR("bad SURFACE_SYNC\n");
  1920. return -EINVAL;
  1921. }
  1922. ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1923. }
  1924. break;
  1925. case PACKET3_EVENT_WRITE:
  1926. if (pkt->count != 2 && pkt->count != 0) {
  1927. DRM_ERROR("bad EVENT_WRITE\n");
  1928. return -EINVAL;
  1929. }
  1930. if (pkt->count) {
  1931. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1932. if (r) {
  1933. DRM_ERROR("bad EVENT_WRITE\n");
  1934. return -EINVAL;
  1935. }
  1936. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1937. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1938. }
  1939. break;
  1940. case PACKET3_EVENT_WRITE_EOP:
  1941. if (pkt->count != 4) {
  1942. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  1943. return -EINVAL;
  1944. }
  1945. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1946. if (r) {
  1947. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  1948. return -EINVAL;
  1949. }
  1950. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1951. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1952. break;
  1953. case PACKET3_EVENT_WRITE_EOS:
  1954. if (pkt->count != 3) {
  1955. DRM_ERROR("bad EVENT_WRITE_EOS\n");
  1956. return -EINVAL;
  1957. }
  1958. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1959. if (r) {
  1960. DRM_ERROR("bad EVENT_WRITE_EOS\n");
  1961. return -EINVAL;
  1962. }
  1963. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1964. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1965. break;
  1966. case PACKET3_SET_CONFIG_REG:
  1967. start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
  1968. end_reg = 4 * pkt->count + start_reg - 4;
  1969. if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
  1970. (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
  1971. (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
  1972. DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
  1973. return -EINVAL;
  1974. }
  1975. for (i = 0; i < pkt->count; i++) {
  1976. reg = start_reg + (4 * i);
  1977. r = evergreen_cs_check_reg(p, reg, idx+1+i);
  1978. if (r)
  1979. return r;
  1980. }
  1981. break;
  1982. case PACKET3_SET_CONTEXT_REG:
  1983. start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
  1984. end_reg = 4 * pkt->count + start_reg - 4;
  1985. if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
  1986. (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
  1987. (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
  1988. DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
  1989. return -EINVAL;
  1990. }
  1991. for (i = 0; i < pkt->count; i++) {
  1992. reg = start_reg + (4 * i);
  1993. r = evergreen_cs_check_reg(p, reg, idx+1+i);
  1994. if (r)
  1995. return r;
  1996. }
  1997. break;
  1998. case PACKET3_SET_RESOURCE:
  1999. if (pkt->count % 8) {
  2000. DRM_ERROR("bad SET_RESOURCE\n");
  2001. return -EINVAL;
  2002. }
  2003. start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
  2004. end_reg = 4 * pkt->count + start_reg - 4;
  2005. if ((start_reg < PACKET3_SET_RESOURCE_START) ||
  2006. (start_reg >= PACKET3_SET_RESOURCE_END) ||
  2007. (end_reg >= PACKET3_SET_RESOURCE_END)) {
  2008. DRM_ERROR("bad SET_RESOURCE\n");
  2009. return -EINVAL;
  2010. }
  2011. for (i = 0; i < (pkt->count / 8); i++) {
  2012. struct radeon_bo *texture, *mipmap;
  2013. u32 toffset, moffset;
  2014. u32 size, offset;
  2015. switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
  2016. case SQ_TEX_VTX_VALID_TEXTURE:
  2017. /* tex base */
  2018. r = evergreen_cs_packet_next_reloc(p, &reloc);
  2019. if (r) {
  2020. DRM_ERROR("bad SET_RESOURCE (tex)\n");
  2021. return -EINVAL;
  2022. }
  2023. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  2024. ib[idx+1+(i*8)+1] |=
  2025. TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  2026. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  2027. unsigned bankw, bankh, mtaspect, tile_split;
  2028. evergreen_tiling_fields(reloc->lobj.tiling_flags,
  2029. &bankw, &bankh, &mtaspect,
  2030. &tile_split);
  2031. ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
  2032. ib[idx+1+(i*8)+7] |=
  2033. TEX_BANK_WIDTH(bankw) |
  2034. TEX_BANK_HEIGHT(bankh) |
  2035. MACRO_TILE_ASPECT(mtaspect) |
  2036. TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  2037. }
  2038. }
  2039. texture = reloc->robj;
  2040. toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  2041. /* tex mip base */
  2042. r = evergreen_cs_packet_next_reloc(p, &reloc);
  2043. if (r) {
  2044. DRM_ERROR("bad SET_RESOURCE (tex)\n");
  2045. return -EINVAL;
  2046. }
  2047. moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  2048. mipmap = reloc->robj;
  2049. r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
  2050. if (r)
  2051. return r;
  2052. ib[idx+1+(i*8)+2] += toffset;
  2053. ib[idx+1+(i*8)+3] += moffset;
  2054. break;
  2055. case SQ_TEX_VTX_VALID_BUFFER:
  2056. /* vtx base */
  2057. r = evergreen_cs_packet_next_reloc(p, &reloc);
  2058. if (r) {
  2059. DRM_ERROR("bad SET_RESOURCE (vtx)\n");
  2060. return -EINVAL;
  2061. }
  2062. offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
  2063. size = radeon_get_ib_value(p, idx+1+(i*8)+1);
  2064. if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
  2065. /* force size to size of the buffer */
  2066. dev_warn(p->dev, "vbo resource seems too big for the bo\n");
  2067. ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
  2068. }
  2069. ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
  2070. ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  2071. break;
  2072. case SQ_TEX_VTX_INVALID_TEXTURE:
  2073. case SQ_TEX_VTX_INVALID_BUFFER:
  2074. default:
  2075. DRM_ERROR("bad SET_RESOURCE\n");
  2076. return -EINVAL;
  2077. }
  2078. }
  2079. break;
  2080. case PACKET3_SET_ALU_CONST:
  2081. /* XXX fix me ALU const buffers only */
  2082. break;
  2083. case PACKET3_SET_BOOL_CONST:
  2084. start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
  2085. end_reg = 4 * pkt->count + start_reg - 4;
  2086. if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
  2087. (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
  2088. (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
  2089. DRM_ERROR("bad SET_BOOL_CONST\n");
  2090. return -EINVAL;
  2091. }
  2092. break;
  2093. case PACKET3_SET_LOOP_CONST:
  2094. start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
  2095. end_reg = 4 * pkt->count + start_reg - 4;
  2096. if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
  2097. (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
  2098. (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
  2099. DRM_ERROR("bad SET_LOOP_CONST\n");
  2100. return -EINVAL;
  2101. }
  2102. break;
  2103. case PACKET3_SET_CTL_CONST:
  2104. start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
  2105. end_reg = 4 * pkt->count + start_reg - 4;
  2106. if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
  2107. (start_reg >= PACKET3_SET_CTL_CONST_END) ||
  2108. (end_reg >= PACKET3_SET_CTL_CONST_END)) {
  2109. DRM_ERROR("bad SET_CTL_CONST\n");
  2110. return -EINVAL;
  2111. }
  2112. break;
  2113. case PACKET3_SET_SAMPLER:
  2114. if (pkt->count % 3) {
  2115. DRM_ERROR("bad SET_SAMPLER\n");
  2116. return -EINVAL;
  2117. }
  2118. start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
  2119. end_reg = 4 * pkt->count + start_reg - 4;
  2120. if ((start_reg < PACKET3_SET_SAMPLER_START) ||
  2121. (start_reg >= PACKET3_SET_SAMPLER_END) ||
  2122. (end_reg >= PACKET3_SET_SAMPLER_END)) {
  2123. DRM_ERROR("bad SET_SAMPLER\n");
  2124. return -EINVAL;
  2125. }
  2126. break;
  2127. case PACKET3_STRMOUT_BUFFER_UPDATE:
  2128. if (pkt->count != 4) {
  2129. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
  2130. return -EINVAL;
  2131. }
  2132. /* Updating memory at DST_ADDRESS. */
  2133. if (idx_value & 0x1) {
  2134. u64 offset;
  2135. r = evergreen_cs_packet_next_reloc(p, &reloc);
  2136. if (r) {
  2137. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
  2138. return -EINVAL;
  2139. }
  2140. offset = radeon_get_ib_value(p, idx+1);
  2141. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  2142. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2143. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
  2144. offset + 4, radeon_bo_size(reloc->robj));
  2145. return -EINVAL;
  2146. }
  2147. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  2148. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  2149. }
  2150. /* Reading data from SRC_ADDRESS. */
  2151. if (((idx_value >> 1) & 0x3) == 2) {
  2152. u64 offset;
  2153. r = evergreen_cs_packet_next_reloc(p, &reloc);
  2154. if (r) {
  2155. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
  2156. return -EINVAL;
  2157. }
  2158. offset = radeon_get_ib_value(p, idx+3);
  2159. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2160. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2161. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
  2162. offset + 4, radeon_bo_size(reloc->robj));
  2163. return -EINVAL;
  2164. }
  2165. ib[idx+3] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  2166. ib[idx+4] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  2167. }
  2168. break;
  2169. case PACKET3_COPY_DW:
  2170. if (pkt->count != 4) {
  2171. DRM_ERROR("bad COPY_DW (invalid count)\n");
  2172. return -EINVAL;
  2173. }
  2174. if (idx_value & 0x1) {
  2175. u64 offset;
  2176. /* SRC is memory. */
  2177. r = evergreen_cs_packet_next_reloc(p, &reloc);
  2178. if (r) {
  2179. DRM_ERROR("bad COPY_DW (missing src reloc)\n");
  2180. return -EINVAL;
  2181. }
  2182. offset = radeon_get_ib_value(p, idx+1);
  2183. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  2184. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2185. DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
  2186. offset + 4, radeon_bo_size(reloc->robj));
  2187. return -EINVAL;
  2188. }
  2189. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  2190. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  2191. } else {
  2192. /* SRC is a reg. */
  2193. reg = radeon_get_ib_value(p, idx+1) << 2;
  2194. if (!evergreen_is_safe_reg(p, reg, idx+1))
  2195. return -EINVAL;
  2196. }
  2197. if (idx_value & 0x2) {
  2198. u64 offset;
  2199. /* DST is memory. */
  2200. r = evergreen_cs_packet_next_reloc(p, &reloc);
  2201. if (r) {
  2202. DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
  2203. return -EINVAL;
  2204. }
  2205. offset = radeon_get_ib_value(p, idx+3);
  2206. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2207. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2208. DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
  2209. offset + 4, radeon_bo_size(reloc->robj));
  2210. return -EINVAL;
  2211. }
  2212. ib[idx+3] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  2213. ib[idx+4] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  2214. } else {
  2215. /* DST is a reg. */
  2216. reg = radeon_get_ib_value(p, idx+3) << 2;
  2217. if (!evergreen_is_safe_reg(p, reg, idx+3))
  2218. return -EINVAL;
  2219. }
  2220. break;
  2221. case PACKET3_NOP:
  2222. break;
  2223. default:
  2224. DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
  2225. return -EINVAL;
  2226. }
  2227. return 0;
  2228. }
  2229. int evergreen_cs_parse(struct radeon_cs_parser *p)
  2230. {
  2231. struct radeon_cs_packet pkt;
  2232. struct evergreen_cs_track *track;
  2233. u32 tmp;
  2234. int r;
  2235. if (p->track == NULL) {
  2236. /* initialize tracker, we are in kms */
  2237. track = kzalloc(sizeof(*track), GFP_KERNEL);
  2238. if (track == NULL)
  2239. return -ENOMEM;
  2240. evergreen_cs_track_init(track);
  2241. if (p->rdev->family >= CHIP_CAYMAN)
  2242. tmp = p->rdev->config.cayman.tile_config;
  2243. else
  2244. tmp = p->rdev->config.evergreen.tile_config;
  2245. switch (tmp & 0xf) {
  2246. case 0:
  2247. track->npipes = 1;
  2248. break;
  2249. case 1:
  2250. default:
  2251. track->npipes = 2;
  2252. break;
  2253. case 2:
  2254. track->npipes = 4;
  2255. break;
  2256. case 3:
  2257. track->npipes = 8;
  2258. break;
  2259. }
  2260. switch ((tmp & 0xf0) >> 4) {
  2261. case 0:
  2262. track->nbanks = 4;
  2263. break;
  2264. case 1:
  2265. default:
  2266. track->nbanks = 8;
  2267. break;
  2268. case 2:
  2269. track->nbanks = 16;
  2270. break;
  2271. }
  2272. switch ((tmp & 0xf00) >> 8) {
  2273. case 0:
  2274. track->group_size = 256;
  2275. break;
  2276. case 1:
  2277. default:
  2278. track->group_size = 512;
  2279. break;
  2280. }
  2281. switch ((tmp & 0xf000) >> 12) {
  2282. case 0:
  2283. track->row_size = 1;
  2284. break;
  2285. case 1:
  2286. default:
  2287. track->row_size = 2;
  2288. break;
  2289. case 2:
  2290. track->row_size = 4;
  2291. break;
  2292. }
  2293. p->track = track;
  2294. }
  2295. do {
  2296. r = evergreen_cs_packet_parse(p, &pkt, p->idx);
  2297. if (r) {
  2298. kfree(p->track);
  2299. p->track = NULL;
  2300. return r;
  2301. }
  2302. p->idx += pkt.count + 2;
  2303. switch (pkt.type) {
  2304. case PACKET_TYPE0:
  2305. r = evergreen_cs_parse_packet0(p, &pkt);
  2306. break;
  2307. case PACKET_TYPE2:
  2308. break;
  2309. case PACKET_TYPE3:
  2310. r = evergreen_packet3_check(p, &pkt);
  2311. break;
  2312. default:
  2313. DRM_ERROR("Unknown packet type %d !\n", pkt.type);
  2314. kfree(p->track);
  2315. p->track = NULL;
  2316. return -EINVAL;
  2317. }
  2318. if (r) {
  2319. kfree(p->track);
  2320. p->track = NULL;
  2321. return r;
  2322. }
  2323. } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
  2324. #if 0
  2325. for (r = 0; r < p->ib->length_dw; r++) {
  2326. printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
  2327. mdelay(1);
  2328. }
  2329. #endif
  2330. kfree(p->track);
  2331. p->track = NULL;
  2332. return 0;
  2333. }
  2334. /* vm parser */
  2335. static bool evergreen_vm_reg_valid(u32 reg)
  2336. {
  2337. /* context regs are fine */
  2338. if (reg >= 0x28000)
  2339. return true;
  2340. /* check config regs */
  2341. switch (reg) {
  2342. case GRBM_GFX_INDEX:
  2343. case VGT_VTX_VECT_EJECT_REG:
  2344. case VGT_CACHE_INVALIDATION:
  2345. case VGT_GS_VERTEX_REUSE:
  2346. case VGT_PRIMITIVE_TYPE:
  2347. case VGT_INDEX_TYPE:
  2348. case VGT_NUM_INDICES:
  2349. case VGT_NUM_INSTANCES:
  2350. case VGT_COMPUTE_DIM_X:
  2351. case VGT_COMPUTE_DIM_Y:
  2352. case VGT_COMPUTE_DIM_Z:
  2353. case VGT_COMPUTE_START_X:
  2354. case VGT_COMPUTE_START_Y:
  2355. case VGT_COMPUTE_START_Z:
  2356. case VGT_COMPUTE_INDEX:
  2357. case VGT_COMPUTE_THREAD_GROUP_SIZE:
  2358. case VGT_HS_OFFCHIP_PARAM:
  2359. case PA_CL_ENHANCE:
  2360. case PA_SU_LINE_STIPPLE_VALUE:
  2361. case PA_SC_LINE_STIPPLE_STATE:
  2362. case PA_SC_ENHANCE:
  2363. case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
  2364. case SQ_DYN_GPR_SIMD_LOCK_EN:
  2365. case SQ_CONFIG:
  2366. case SQ_GPR_RESOURCE_MGMT_1:
  2367. case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
  2368. case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
  2369. case SQ_CONST_MEM_BASE:
  2370. case SQ_STATIC_THREAD_MGMT_1:
  2371. case SQ_STATIC_THREAD_MGMT_2:
  2372. case SQ_STATIC_THREAD_MGMT_3:
  2373. case SPI_CONFIG_CNTL:
  2374. case SPI_CONFIG_CNTL_1:
  2375. case TA_CNTL_AUX:
  2376. case DB_DEBUG:
  2377. case DB_DEBUG2:
  2378. case DB_DEBUG3:
  2379. case DB_DEBUG4:
  2380. case DB_WATERMARKS:
  2381. case TD_PS_BORDER_COLOR_INDEX:
  2382. case TD_PS_BORDER_COLOR_RED:
  2383. case TD_PS_BORDER_COLOR_GREEN:
  2384. case TD_PS_BORDER_COLOR_BLUE:
  2385. case TD_PS_BORDER_COLOR_ALPHA:
  2386. case TD_VS_BORDER_COLOR_INDEX:
  2387. case TD_VS_BORDER_COLOR_RED:
  2388. case TD_VS_BORDER_COLOR_GREEN:
  2389. case TD_VS_BORDER_COLOR_BLUE:
  2390. case TD_VS_BORDER_COLOR_ALPHA:
  2391. case TD_GS_BORDER_COLOR_INDEX:
  2392. case TD_GS_BORDER_COLOR_RED:
  2393. case TD_GS_BORDER_COLOR_GREEN:
  2394. case TD_GS_BORDER_COLOR_BLUE:
  2395. case TD_GS_BORDER_COLOR_ALPHA:
  2396. case TD_HS_BORDER_COLOR_INDEX:
  2397. case TD_HS_BORDER_COLOR_RED:
  2398. case TD_HS_BORDER_COLOR_GREEN:
  2399. case TD_HS_BORDER_COLOR_BLUE:
  2400. case TD_HS_BORDER_COLOR_ALPHA:
  2401. case TD_LS_BORDER_COLOR_INDEX:
  2402. case TD_LS_BORDER_COLOR_RED:
  2403. case TD_LS_BORDER_COLOR_GREEN:
  2404. case TD_LS_BORDER_COLOR_BLUE:
  2405. case TD_LS_BORDER_COLOR_ALPHA:
  2406. case TD_CS_BORDER_COLOR_INDEX:
  2407. case TD_CS_BORDER_COLOR_RED:
  2408. case TD_CS_BORDER_COLOR_GREEN:
  2409. case TD_CS_BORDER_COLOR_BLUE:
  2410. case TD_CS_BORDER_COLOR_ALPHA:
  2411. case SQ_ESGS_RING_SIZE:
  2412. case SQ_GSVS_RING_SIZE:
  2413. case SQ_ESTMP_RING_SIZE:
  2414. case SQ_GSTMP_RING_SIZE:
  2415. case SQ_HSTMP_RING_SIZE:
  2416. case SQ_LSTMP_RING_SIZE:
  2417. case SQ_PSTMP_RING_SIZE:
  2418. case SQ_VSTMP_RING_SIZE:
  2419. case SQ_ESGS_RING_ITEMSIZE:
  2420. case SQ_ESTMP_RING_ITEMSIZE:
  2421. case SQ_GSTMP_RING_ITEMSIZE:
  2422. case SQ_GSVS_RING_ITEMSIZE:
  2423. case SQ_GS_VERT_ITEMSIZE:
  2424. case SQ_GS_VERT_ITEMSIZE_1:
  2425. case SQ_GS_VERT_ITEMSIZE_2:
  2426. case SQ_GS_VERT_ITEMSIZE_3:
  2427. case SQ_GSVS_RING_OFFSET_1:
  2428. case SQ_GSVS_RING_OFFSET_2:
  2429. case SQ_GSVS_RING_OFFSET_3:
  2430. case SQ_HSTMP_RING_ITEMSIZE:
  2431. case SQ_LSTMP_RING_ITEMSIZE:
  2432. case SQ_PSTMP_RING_ITEMSIZE:
  2433. case SQ_VSTMP_RING_ITEMSIZE:
  2434. case VGT_TF_RING_SIZE:
  2435. case SQ_ESGS_RING_BASE:
  2436. case SQ_GSVS_RING_BASE:
  2437. case SQ_ESTMP_RING_BASE:
  2438. case SQ_GSTMP_RING_BASE:
  2439. case SQ_HSTMP_RING_BASE:
  2440. case SQ_LSTMP_RING_BASE:
  2441. case SQ_PSTMP_RING_BASE:
  2442. case SQ_VSTMP_RING_BASE:
  2443. case CAYMAN_VGT_OFFCHIP_LDS_BASE:
  2444. case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
  2445. return true;
  2446. default:
  2447. return false;
  2448. }
  2449. }
  2450. static int evergreen_vm_packet3_check(struct radeon_device *rdev,
  2451. u32 *ib, struct radeon_cs_packet *pkt)
  2452. {
  2453. u32 idx = pkt->idx + 1;
  2454. u32 idx_value = ib[idx];
  2455. u32 start_reg, end_reg, reg, i;
  2456. switch (pkt->opcode) {
  2457. case PACKET3_NOP:
  2458. case PACKET3_SET_BASE:
  2459. case PACKET3_CLEAR_STATE:
  2460. case PACKET3_INDEX_BUFFER_SIZE:
  2461. case PACKET3_DISPATCH_DIRECT:
  2462. case PACKET3_DISPATCH_INDIRECT:
  2463. case PACKET3_MODE_CONTROL:
  2464. case PACKET3_SET_PREDICATION:
  2465. case PACKET3_COND_EXEC:
  2466. case PACKET3_PRED_EXEC:
  2467. case PACKET3_DRAW_INDIRECT:
  2468. case PACKET3_DRAW_INDEX_INDIRECT:
  2469. case PACKET3_INDEX_BASE:
  2470. case PACKET3_DRAW_INDEX_2:
  2471. case PACKET3_CONTEXT_CONTROL:
  2472. case PACKET3_DRAW_INDEX_OFFSET:
  2473. case PACKET3_INDEX_TYPE:
  2474. case PACKET3_DRAW_INDEX:
  2475. case PACKET3_DRAW_INDEX_AUTO:
  2476. case PACKET3_DRAW_INDEX_IMMD:
  2477. case PACKET3_NUM_INSTANCES:
  2478. case PACKET3_DRAW_INDEX_MULTI_AUTO:
  2479. case PACKET3_STRMOUT_BUFFER_UPDATE:
  2480. case PACKET3_DRAW_INDEX_OFFSET_2:
  2481. case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
  2482. case PACKET3_MPEG_INDEX:
  2483. case PACKET3_WAIT_REG_MEM:
  2484. case PACKET3_MEM_WRITE:
  2485. case PACKET3_SURFACE_SYNC:
  2486. case PACKET3_EVENT_WRITE:
  2487. case PACKET3_EVENT_WRITE_EOP:
  2488. case PACKET3_EVENT_WRITE_EOS:
  2489. case PACKET3_SET_CONTEXT_REG:
  2490. case PACKET3_SET_BOOL_CONST:
  2491. case PACKET3_SET_LOOP_CONST:
  2492. case PACKET3_SET_RESOURCE:
  2493. case PACKET3_SET_SAMPLER:
  2494. case PACKET3_SET_CTL_CONST:
  2495. case PACKET3_SET_RESOURCE_OFFSET:
  2496. case PACKET3_SET_CONTEXT_REG_INDIRECT:
  2497. case PACKET3_SET_RESOURCE_INDIRECT:
  2498. case CAYMAN_PACKET3_DEALLOC_STATE:
  2499. break;
  2500. case PACKET3_COND_WRITE:
  2501. if (idx_value & 0x100) {
  2502. reg = ib[idx + 5] * 4;
  2503. if (!evergreen_vm_reg_valid(reg))
  2504. return -EINVAL;
  2505. }
  2506. break;
  2507. case PACKET3_COPY_DW:
  2508. if (idx_value & 0x2) {
  2509. reg = ib[idx + 3] * 4;
  2510. if (!evergreen_vm_reg_valid(reg))
  2511. return -EINVAL;
  2512. }
  2513. break;
  2514. case PACKET3_SET_CONFIG_REG:
  2515. start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
  2516. end_reg = 4 * pkt->count + start_reg - 4;
  2517. if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
  2518. (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
  2519. (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
  2520. DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
  2521. return -EINVAL;
  2522. }
  2523. for (i = 0; i < pkt->count; i++) {
  2524. reg = start_reg + (4 * i);
  2525. if (!evergreen_vm_reg_valid(reg))
  2526. return -EINVAL;
  2527. }
  2528. break;
  2529. default:
  2530. return -EINVAL;
  2531. }
  2532. return 0;
  2533. }
  2534. int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  2535. {
  2536. int ret = 0;
  2537. u32 idx = 0;
  2538. struct radeon_cs_packet pkt;
  2539. do {
  2540. pkt.idx = idx;
  2541. pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
  2542. pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
  2543. pkt.one_reg_wr = 0;
  2544. switch (pkt.type) {
  2545. case PACKET_TYPE0:
  2546. dev_err(rdev->dev, "Packet0 not allowed!\n");
  2547. ret = -EINVAL;
  2548. break;
  2549. case PACKET_TYPE2:
  2550. idx += 1;
  2551. break;
  2552. case PACKET_TYPE3:
  2553. pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
  2554. ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
  2555. idx += pkt.count + 2;
  2556. break;
  2557. default:
  2558. dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
  2559. ret = -EINVAL;
  2560. break;
  2561. }
  2562. if (ret)
  2563. break;
  2564. } while (idx < ib->length_dw);
  2565. return ret;
  2566. }