evergreen_cs.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574
  1. /*
  2. * Copyright 2010 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include "drmP.h"
  29. #include "radeon.h"
  30. #include "evergreend.h"
  31. #include "evergreen_reg_safe.h"
  32. #include "cayman_reg_safe.h"
  33. static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
  34. struct radeon_cs_reloc **cs_reloc);
  35. struct evergreen_cs_track {
  36. u32 group_size;
  37. u32 nbanks;
  38. u32 npipes;
  39. u32 row_size;
  40. /* value we track */
  41. u32 nsamples;
  42. u32 cb_color_base_last[12];
  43. struct radeon_bo *cb_color_bo[12];
  44. u32 cb_color_bo_offset[12];
  45. struct radeon_bo *cb_color_fmask_bo[8];
  46. struct radeon_bo *cb_color_cmask_bo[8];
  47. u32 cb_color_info[12];
  48. u32 cb_color_view[12];
  49. u32 cb_color_pitch_idx[12];
  50. u32 cb_color_slice_idx[12];
  51. u32 cb_color_dim_idx[12];
  52. u32 cb_color_dim[12];
  53. u32 cb_color_pitch[12];
  54. u32 cb_color_slice[12];
  55. u32 cb_color_cmask_slice[8];
  56. u32 cb_color_fmask_slice[8];
  57. u32 cb_target_mask;
  58. u32 cb_shader_mask;
  59. u32 vgt_strmout_config;
  60. u32 vgt_strmout_buffer_config;
  61. u32 db_depth_control;
  62. u32 db_depth_view;
  63. u32 db_depth_size;
  64. u32 db_depth_size_idx;
  65. u32 db_z_info;
  66. u32 db_z_idx;
  67. u32 db_z_read_offset;
  68. u32 db_z_write_offset;
  69. struct radeon_bo *db_z_read_bo;
  70. struct radeon_bo *db_z_write_bo;
  71. u32 db_s_info;
  72. u32 db_s_idx;
  73. u32 db_s_read_offset;
  74. u32 db_s_write_offset;
  75. struct radeon_bo *db_s_read_bo;
  76. struct radeon_bo *db_s_write_bo;
  77. };
  78. static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
  79. {
  80. if (tiling_flags & RADEON_TILING_MACRO)
  81. return ARRAY_2D_TILED_THIN1;
  82. else if (tiling_flags & RADEON_TILING_MICRO)
  83. return ARRAY_1D_TILED_THIN1;
  84. else
  85. return ARRAY_LINEAR_GENERAL;
  86. }
  87. static u32 evergreen_cs_get_num_banks(u32 nbanks)
  88. {
  89. switch (nbanks) {
  90. case 2:
  91. return ADDR_SURF_2_BANK;
  92. case 4:
  93. return ADDR_SURF_4_BANK;
  94. case 8:
  95. default:
  96. return ADDR_SURF_8_BANK;
  97. case 16:
  98. return ADDR_SURF_16_BANK;
  99. }
  100. }
  101. static u32 evergreen_cs_get_tile_split(u32 row_size)
  102. {
  103. switch (row_size) {
  104. case 1:
  105. default:
  106. return ADDR_SURF_TILE_SPLIT_1KB;
  107. case 2:
  108. return ADDR_SURF_TILE_SPLIT_2KB;
  109. case 4:
  110. return ADDR_SURF_TILE_SPLIT_4KB;
  111. }
  112. }
  113. static void evergreen_cs_track_init(struct evergreen_cs_track *track)
  114. {
  115. int i;
  116. for (i = 0; i < 8; i++) {
  117. track->cb_color_fmask_bo[i] = NULL;
  118. track->cb_color_cmask_bo[i] = NULL;
  119. track->cb_color_cmask_slice[i] = 0;
  120. track->cb_color_fmask_slice[i] = 0;
  121. }
  122. for (i = 0; i < 12; i++) {
  123. track->cb_color_base_last[i] = 0;
  124. track->cb_color_bo[i] = NULL;
  125. track->cb_color_bo_offset[i] = 0xFFFFFFFF;
  126. track->cb_color_info[i] = 0;
  127. track->cb_color_view[i] = 0;
  128. track->cb_color_pitch_idx[i] = 0;
  129. track->cb_color_slice_idx[i] = 0;
  130. track->cb_color_dim[i] = 0;
  131. track->cb_color_pitch[i] = 0;
  132. track->cb_color_slice[i] = 0;
  133. track->cb_color_dim[i] = 0;
  134. }
  135. track->cb_target_mask = 0xFFFFFFFF;
  136. track->cb_shader_mask = 0xFFFFFFFF;
  137. track->db_depth_view = 0xFFFFC000;
  138. track->db_depth_size = 0xFFFFFFFF;
  139. track->db_depth_size_idx = 0;
  140. track->db_depth_control = 0xFFFFFFFF;
  141. track->db_z_info = 0xFFFFFFFF;
  142. track->db_z_idx = 0xFFFFFFFF;
  143. track->db_z_read_offset = 0xFFFFFFFF;
  144. track->db_z_write_offset = 0xFFFFFFFF;
  145. track->db_z_read_bo = NULL;
  146. track->db_z_write_bo = NULL;
  147. track->db_s_info = 0xFFFFFFFF;
  148. track->db_s_idx = 0xFFFFFFFF;
  149. track->db_s_read_offset = 0xFFFFFFFF;
  150. track->db_s_write_offset = 0xFFFFFFFF;
  151. track->db_s_read_bo = NULL;
  152. track->db_s_write_bo = NULL;
  153. }
  154. static int evergreen_cs_track_check(struct radeon_cs_parser *p)
  155. {
  156. struct evergreen_cs_track *track = p->track;
  157. /* we don't support stream out buffer yet */
  158. if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
  159. dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
  160. return -EINVAL;
  161. }
  162. /* XXX fill in */
  163. return 0;
  164. }
  165. /**
  166. * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
  167. * @parser: parser structure holding parsing context.
  168. * @pkt: where to store packet informations
  169. *
  170. * Assume that chunk_ib_index is properly set. Will return -EINVAL
  171. * if packet is bigger than remaining ib size. or if packets is unknown.
  172. **/
  173. int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
  174. struct radeon_cs_packet *pkt,
  175. unsigned idx)
  176. {
  177. struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
  178. uint32_t header;
  179. if (idx >= ib_chunk->length_dw) {
  180. DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
  181. idx, ib_chunk->length_dw);
  182. return -EINVAL;
  183. }
  184. header = radeon_get_ib_value(p, idx);
  185. pkt->idx = idx;
  186. pkt->type = CP_PACKET_GET_TYPE(header);
  187. pkt->count = CP_PACKET_GET_COUNT(header);
  188. pkt->one_reg_wr = 0;
  189. switch (pkt->type) {
  190. case PACKET_TYPE0:
  191. pkt->reg = CP_PACKET0_GET_REG(header);
  192. break;
  193. case PACKET_TYPE3:
  194. pkt->opcode = CP_PACKET3_GET_OPCODE(header);
  195. break;
  196. case PACKET_TYPE2:
  197. pkt->count = -1;
  198. break;
  199. default:
  200. DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
  201. return -EINVAL;
  202. }
  203. if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
  204. DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
  205. pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
  206. return -EINVAL;
  207. }
  208. return 0;
  209. }
  210. /**
  211. * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
  212. * @parser: parser structure holding parsing context.
  213. * @data: pointer to relocation data
  214. * @offset_start: starting offset
  215. * @offset_mask: offset mask (to align start offset on)
  216. * @reloc: reloc informations
  217. *
  218. * Check next packet is relocation packet3, do bo validation and compute
  219. * GPU offset using the provided start.
  220. **/
  221. static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
  222. struct radeon_cs_reloc **cs_reloc)
  223. {
  224. struct radeon_cs_chunk *relocs_chunk;
  225. struct radeon_cs_packet p3reloc;
  226. unsigned idx;
  227. int r;
  228. if (p->chunk_relocs_idx == -1) {
  229. DRM_ERROR("No relocation chunk !\n");
  230. return -EINVAL;
  231. }
  232. *cs_reloc = NULL;
  233. relocs_chunk = &p->chunks[p->chunk_relocs_idx];
  234. r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
  235. if (r) {
  236. return r;
  237. }
  238. p->idx += p3reloc.count + 2;
  239. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  240. DRM_ERROR("No packet3 for relocation for packet at %d.\n",
  241. p3reloc.idx);
  242. return -EINVAL;
  243. }
  244. idx = radeon_get_ib_value(p, p3reloc.idx + 1);
  245. if (idx >= relocs_chunk->length_dw) {
  246. DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
  247. idx, relocs_chunk->length_dw);
  248. return -EINVAL;
  249. }
  250. /* FIXME: we assume reloc size is 4 dwords */
  251. *cs_reloc = p->relocs_ptr[(idx / 4)];
  252. return 0;
  253. }
  254. /**
  255. * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
  256. * @parser: parser structure holding parsing context.
  257. *
  258. * Userspace sends a special sequence for VLINE waits.
  259. * PACKET0 - VLINE_START_END + value
  260. * PACKET3 - WAIT_REG_MEM poll vline status reg
  261. * RELOC (P3) - crtc_id in reloc.
  262. *
  263. * This function parses this and relocates the VLINE START END
  264. * and WAIT_REG_MEM packets to the correct crtc.
  265. * It also detects a switched off crtc and nulls out the
  266. * wait in that case.
  267. */
  268. static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
  269. {
  270. struct drm_mode_object *obj;
  271. struct drm_crtc *crtc;
  272. struct radeon_crtc *radeon_crtc;
  273. struct radeon_cs_packet p3reloc, wait_reg_mem;
  274. int crtc_id;
  275. int r;
  276. uint32_t header, h_idx, reg, wait_reg_mem_info;
  277. volatile uint32_t *ib;
  278. ib = p->ib->ptr;
  279. /* parse the WAIT_REG_MEM */
  280. r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
  281. if (r)
  282. return r;
  283. /* check its a WAIT_REG_MEM */
  284. if (wait_reg_mem.type != PACKET_TYPE3 ||
  285. wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
  286. DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
  287. return -EINVAL;
  288. }
  289. wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
  290. /* bit 4 is reg (0) or mem (1) */
  291. if (wait_reg_mem_info & 0x10) {
  292. DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
  293. return -EINVAL;
  294. }
  295. /* waiting for value to be equal */
  296. if ((wait_reg_mem_info & 0x7) != 0x3) {
  297. DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
  298. return -EINVAL;
  299. }
  300. if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
  301. DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
  302. return -EINVAL;
  303. }
  304. if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
  305. DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
  306. return -EINVAL;
  307. }
  308. /* jump over the NOP */
  309. r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
  310. if (r)
  311. return r;
  312. h_idx = p->idx - 2;
  313. p->idx += wait_reg_mem.count + 2;
  314. p->idx += p3reloc.count + 2;
  315. header = radeon_get_ib_value(p, h_idx);
  316. crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
  317. reg = CP_PACKET0_GET_REG(header);
  318. obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
  319. if (!obj) {
  320. DRM_ERROR("cannot find crtc %d\n", crtc_id);
  321. return -EINVAL;
  322. }
  323. crtc = obj_to_crtc(obj);
  324. radeon_crtc = to_radeon_crtc(crtc);
  325. crtc_id = radeon_crtc->crtc_id;
  326. if (!crtc->enabled) {
  327. /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
  328. ib[h_idx + 2] = PACKET2(0);
  329. ib[h_idx + 3] = PACKET2(0);
  330. ib[h_idx + 4] = PACKET2(0);
  331. ib[h_idx + 5] = PACKET2(0);
  332. ib[h_idx + 6] = PACKET2(0);
  333. ib[h_idx + 7] = PACKET2(0);
  334. ib[h_idx + 8] = PACKET2(0);
  335. } else {
  336. switch (reg) {
  337. case EVERGREEN_VLINE_START_END:
  338. header &= ~R600_CP_PACKET0_REG_MASK;
  339. header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
  340. ib[h_idx] = header;
  341. ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
  342. break;
  343. default:
  344. DRM_ERROR("unknown crtc reloc\n");
  345. return -EINVAL;
  346. }
  347. }
  348. return 0;
  349. }
  350. static int evergreen_packet0_check(struct radeon_cs_parser *p,
  351. struct radeon_cs_packet *pkt,
  352. unsigned idx, unsigned reg)
  353. {
  354. int r;
  355. switch (reg) {
  356. case EVERGREEN_VLINE_START_END:
  357. r = evergreen_cs_packet_parse_vline(p);
  358. if (r) {
  359. DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
  360. idx, reg);
  361. return r;
  362. }
  363. break;
  364. default:
  365. printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
  366. reg, idx);
  367. return -EINVAL;
  368. }
  369. return 0;
  370. }
  371. static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
  372. struct radeon_cs_packet *pkt)
  373. {
  374. unsigned reg, i;
  375. unsigned idx;
  376. int r;
  377. idx = pkt->idx + 1;
  378. reg = pkt->reg;
  379. for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
  380. r = evergreen_packet0_check(p, pkt, idx, reg);
  381. if (r) {
  382. return r;
  383. }
  384. }
  385. return 0;
  386. }
  387. /**
  388. * evergreen_cs_check_reg() - check if register is authorized or not
  389. * @parser: parser structure holding parsing context
  390. * @reg: register we are testing
  391. * @idx: index into the cs buffer
  392. *
  393. * This function will test against evergreen_reg_safe_bm and return 0
  394. * if register is safe. If register is not flag as safe this function
  395. * will test it against a list of register needind special handling.
  396. */
  397. static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  398. {
  399. struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
  400. struct radeon_cs_reloc *reloc;
  401. u32 last_reg;
  402. u32 m, i, tmp, *ib;
  403. int r;
  404. if (p->rdev->family >= CHIP_CAYMAN)
  405. last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
  406. else
  407. last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
  408. i = (reg >> 7);
  409. if (i >= last_reg) {
  410. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  411. return -EINVAL;
  412. }
  413. m = 1 << ((reg >> 2) & 31);
  414. if (p->rdev->family >= CHIP_CAYMAN) {
  415. if (!(cayman_reg_safe_bm[i] & m))
  416. return 0;
  417. } else {
  418. if (!(evergreen_reg_safe_bm[i] & m))
  419. return 0;
  420. }
  421. ib = p->ib->ptr;
  422. switch (reg) {
  423. /* force following reg to 0 in an attempt to disable out buffer
  424. * which will need us to better understand how it works to perform
  425. * security check on it (Jerome)
  426. */
  427. case SQ_ESGS_RING_SIZE:
  428. case SQ_GSVS_RING_SIZE:
  429. case SQ_ESTMP_RING_SIZE:
  430. case SQ_GSTMP_RING_SIZE:
  431. case SQ_HSTMP_RING_SIZE:
  432. case SQ_LSTMP_RING_SIZE:
  433. case SQ_PSTMP_RING_SIZE:
  434. case SQ_VSTMP_RING_SIZE:
  435. case SQ_ESGS_RING_ITEMSIZE:
  436. case SQ_ESTMP_RING_ITEMSIZE:
  437. case SQ_GSTMP_RING_ITEMSIZE:
  438. case SQ_GSVS_RING_ITEMSIZE:
  439. case SQ_GS_VERT_ITEMSIZE:
  440. case SQ_GS_VERT_ITEMSIZE_1:
  441. case SQ_GS_VERT_ITEMSIZE_2:
  442. case SQ_GS_VERT_ITEMSIZE_3:
  443. case SQ_GSVS_RING_OFFSET_1:
  444. case SQ_GSVS_RING_OFFSET_2:
  445. case SQ_GSVS_RING_OFFSET_3:
  446. case SQ_HSTMP_RING_ITEMSIZE:
  447. case SQ_LSTMP_RING_ITEMSIZE:
  448. case SQ_PSTMP_RING_ITEMSIZE:
  449. case SQ_VSTMP_RING_ITEMSIZE:
  450. case VGT_TF_RING_SIZE:
  451. /* get value to populate the IB don't remove */
  452. /*tmp =radeon_get_ib_value(p, idx);
  453. ib[idx] = 0;*/
  454. break;
  455. case SQ_ESGS_RING_BASE:
  456. case SQ_GSVS_RING_BASE:
  457. case SQ_ESTMP_RING_BASE:
  458. case SQ_GSTMP_RING_BASE:
  459. case SQ_HSTMP_RING_BASE:
  460. case SQ_LSTMP_RING_BASE:
  461. case SQ_PSTMP_RING_BASE:
  462. case SQ_VSTMP_RING_BASE:
  463. r = evergreen_cs_packet_next_reloc(p, &reloc);
  464. if (r) {
  465. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  466. "0x%04X\n", reg);
  467. return -EINVAL;
  468. }
  469. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  470. break;
  471. case DB_DEPTH_CONTROL:
  472. track->db_depth_control = radeon_get_ib_value(p, idx);
  473. break;
  474. case CAYMAN_DB_EQAA:
  475. if (p->rdev->family < CHIP_CAYMAN) {
  476. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  477. "0x%04X\n", reg);
  478. return -EINVAL;
  479. }
  480. break;
  481. case CAYMAN_DB_DEPTH_INFO:
  482. if (p->rdev->family < CHIP_CAYMAN) {
  483. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  484. "0x%04X\n", reg);
  485. return -EINVAL;
  486. }
  487. break;
  488. case DB_Z_INFO:
  489. track->db_z_info = radeon_get_ib_value(p, idx);
  490. if (!p->keep_tiling_flags) {
  491. r = evergreen_cs_packet_next_reloc(p, &reloc);
  492. if (r) {
  493. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  494. "0x%04X\n", reg);
  495. return -EINVAL;
  496. }
  497. ib[idx] &= ~Z_ARRAY_MODE(0xf);
  498. track->db_z_info &= ~Z_ARRAY_MODE(0xf);
  499. ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  500. track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  501. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  502. ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  503. ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
  504. }
  505. }
  506. break;
  507. case DB_STENCIL_INFO:
  508. track->db_s_info = radeon_get_ib_value(p, idx);
  509. break;
  510. case DB_DEPTH_VIEW:
  511. track->db_depth_view = radeon_get_ib_value(p, idx);
  512. break;
  513. case DB_DEPTH_SIZE:
  514. track->db_depth_size = radeon_get_ib_value(p, idx);
  515. track->db_depth_size_idx = idx;
  516. break;
  517. case DB_Z_READ_BASE:
  518. r = evergreen_cs_packet_next_reloc(p, &reloc);
  519. if (r) {
  520. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  521. "0x%04X\n", reg);
  522. return -EINVAL;
  523. }
  524. track->db_z_read_offset = radeon_get_ib_value(p, idx);
  525. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  526. track->db_z_read_bo = reloc->robj;
  527. break;
  528. case DB_Z_WRITE_BASE:
  529. r = evergreen_cs_packet_next_reloc(p, &reloc);
  530. if (r) {
  531. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  532. "0x%04X\n", reg);
  533. return -EINVAL;
  534. }
  535. track->db_z_write_offset = radeon_get_ib_value(p, idx);
  536. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  537. track->db_z_write_bo = reloc->robj;
  538. break;
  539. case DB_STENCIL_READ_BASE:
  540. r = evergreen_cs_packet_next_reloc(p, &reloc);
  541. if (r) {
  542. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  543. "0x%04X\n", reg);
  544. return -EINVAL;
  545. }
  546. track->db_s_read_offset = radeon_get_ib_value(p, idx);
  547. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  548. track->db_s_read_bo = reloc->robj;
  549. break;
  550. case DB_STENCIL_WRITE_BASE:
  551. r = evergreen_cs_packet_next_reloc(p, &reloc);
  552. if (r) {
  553. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  554. "0x%04X\n", reg);
  555. return -EINVAL;
  556. }
  557. track->db_s_write_offset = radeon_get_ib_value(p, idx);
  558. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  559. track->db_s_write_bo = reloc->robj;
  560. break;
  561. case VGT_STRMOUT_CONFIG:
  562. track->vgt_strmout_config = radeon_get_ib_value(p, idx);
  563. break;
  564. case VGT_STRMOUT_BUFFER_CONFIG:
  565. track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
  566. break;
  567. case CB_TARGET_MASK:
  568. track->cb_target_mask = radeon_get_ib_value(p, idx);
  569. break;
  570. case CB_SHADER_MASK:
  571. track->cb_shader_mask = radeon_get_ib_value(p, idx);
  572. break;
  573. case PA_SC_AA_CONFIG:
  574. if (p->rdev->family >= CHIP_CAYMAN) {
  575. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  576. "0x%04X\n", reg);
  577. return -EINVAL;
  578. }
  579. tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
  580. track->nsamples = 1 << tmp;
  581. break;
  582. case CAYMAN_PA_SC_AA_CONFIG:
  583. if (p->rdev->family < CHIP_CAYMAN) {
  584. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  585. "0x%04X\n", reg);
  586. return -EINVAL;
  587. }
  588. tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
  589. track->nsamples = 1 << tmp;
  590. break;
  591. case CB_COLOR0_VIEW:
  592. case CB_COLOR1_VIEW:
  593. case CB_COLOR2_VIEW:
  594. case CB_COLOR3_VIEW:
  595. case CB_COLOR4_VIEW:
  596. case CB_COLOR5_VIEW:
  597. case CB_COLOR6_VIEW:
  598. case CB_COLOR7_VIEW:
  599. tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
  600. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  601. break;
  602. case CB_COLOR8_VIEW:
  603. case CB_COLOR9_VIEW:
  604. case CB_COLOR10_VIEW:
  605. case CB_COLOR11_VIEW:
  606. tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
  607. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  608. break;
  609. case CB_COLOR0_INFO:
  610. case CB_COLOR1_INFO:
  611. case CB_COLOR2_INFO:
  612. case CB_COLOR3_INFO:
  613. case CB_COLOR4_INFO:
  614. case CB_COLOR5_INFO:
  615. case CB_COLOR6_INFO:
  616. case CB_COLOR7_INFO:
  617. tmp = (reg - CB_COLOR0_INFO) / 0x3c;
  618. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  619. if (!p->keep_tiling_flags) {
  620. r = evergreen_cs_packet_next_reloc(p, &reloc);
  621. if (r) {
  622. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  623. "0x%04X\n", reg);
  624. return -EINVAL;
  625. }
  626. ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  627. track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  628. }
  629. break;
  630. case CB_COLOR8_INFO:
  631. case CB_COLOR9_INFO:
  632. case CB_COLOR10_INFO:
  633. case CB_COLOR11_INFO:
  634. tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
  635. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  636. if (!p->keep_tiling_flags) {
  637. r = evergreen_cs_packet_next_reloc(p, &reloc);
  638. if (r) {
  639. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  640. "0x%04X\n", reg);
  641. return -EINVAL;
  642. }
  643. ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  644. track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  645. }
  646. break;
  647. case CB_COLOR0_PITCH:
  648. case CB_COLOR1_PITCH:
  649. case CB_COLOR2_PITCH:
  650. case CB_COLOR3_PITCH:
  651. case CB_COLOR4_PITCH:
  652. case CB_COLOR5_PITCH:
  653. case CB_COLOR6_PITCH:
  654. case CB_COLOR7_PITCH:
  655. tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
  656. track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
  657. track->cb_color_pitch_idx[tmp] = idx;
  658. break;
  659. case CB_COLOR8_PITCH:
  660. case CB_COLOR9_PITCH:
  661. case CB_COLOR10_PITCH:
  662. case CB_COLOR11_PITCH:
  663. tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
  664. track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
  665. track->cb_color_pitch_idx[tmp] = idx;
  666. break;
  667. case CB_COLOR0_SLICE:
  668. case CB_COLOR1_SLICE:
  669. case CB_COLOR2_SLICE:
  670. case CB_COLOR3_SLICE:
  671. case CB_COLOR4_SLICE:
  672. case CB_COLOR5_SLICE:
  673. case CB_COLOR6_SLICE:
  674. case CB_COLOR7_SLICE:
  675. tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
  676. track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
  677. track->cb_color_slice_idx[tmp] = idx;
  678. break;
  679. case CB_COLOR8_SLICE:
  680. case CB_COLOR9_SLICE:
  681. case CB_COLOR10_SLICE:
  682. case CB_COLOR11_SLICE:
  683. tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
  684. track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
  685. track->cb_color_slice_idx[tmp] = idx;
  686. break;
  687. case CB_COLOR0_ATTRIB:
  688. case CB_COLOR1_ATTRIB:
  689. case CB_COLOR2_ATTRIB:
  690. case CB_COLOR3_ATTRIB:
  691. case CB_COLOR4_ATTRIB:
  692. case CB_COLOR5_ATTRIB:
  693. case CB_COLOR6_ATTRIB:
  694. case CB_COLOR7_ATTRIB:
  695. case CB_COLOR8_ATTRIB:
  696. case CB_COLOR9_ATTRIB:
  697. case CB_COLOR10_ATTRIB:
  698. case CB_COLOR11_ATTRIB:
  699. r = evergreen_cs_packet_next_reloc(p, &reloc);
  700. if (r) {
  701. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  702. "0x%04X\n", reg);
  703. return -EINVAL;
  704. }
  705. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  706. ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  707. ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
  708. }
  709. break;
  710. case CB_COLOR0_DIM:
  711. case CB_COLOR1_DIM:
  712. case CB_COLOR2_DIM:
  713. case CB_COLOR3_DIM:
  714. case CB_COLOR4_DIM:
  715. case CB_COLOR5_DIM:
  716. case CB_COLOR6_DIM:
  717. case CB_COLOR7_DIM:
  718. tmp = (reg - CB_COLOR0_DIM) / 0x3c;
  719. track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
  720. track->cb_color_dim_idx[tmp] = idx;
  721. break;
  722. case CB_COLOR8_DIM:
  723. case CB_COLOR9_DIM:
  724. case CB_COLOR10_DIM:
  725. case CB_COLOR11_DIM:
  726. tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
  727. track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
  728. track->cb_color_dim_idx[tmp] = idx;
  729. break;
  730. case CB_COLOR0_FMASK:
  731. case CB_COLOR1_FMASK:
  732. case CB_COLOR2_FMASK:
  733. case CB_COLOR3_FMASK:
  734. case CB_COLOR4_FMASK:
  735. case CB_COLOR5_FMASK:
  736. case CB_COLOR6_FMASK:
  737. case CB_COLOR7_FMASK:
  738. tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
  739. r = evergreen_cs_packet_next_reloc(p, &reloc);
  740. if (r) {
  741. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  742. return -EINVAL;
  743. }
  744. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  745. track->cb_color_fmask_bo[tmp] = reloc->robj;
  746. break;
  747. case CB_COLOR0_CMASK:
  748. case CB_COLOR1_CMASK:
  749. case CB_COLOR2_CMASK:
  750. case CB_COLOR3_CMASK:
  751. case CB_COLOR4_CMASK:
  752. case CB_COLOR5_CMASK:
  753. case CB_COLOR6_CMASK:
  754. case CB_COLOR7_CMASK:
  755. tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
  756. r = evergreen_cs_packet_next_reloc(p, &reloc);
  757. if (r) {
  758. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  759. return -EINVAL;
  760. }
  761. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  762. track->cb_color_cmask_bo[tmp] = reloc->robj;
  763. break;
  764. case CB_COLOR0_FMASK_SLICE:
  765. case CB_COLOR1_FMASK_SLICE:
  766. case CB_COLOR2_FMASK_SLICE:
  767. case CB_COLOR3_FMASK_SLICE:
  768. case CB_COLOR4_FMASK_SLICE:
  769. case CB_COLOR5_FMASK_SLICE:
  770. case CB_COLOR6_FMASK_SLICE:
  771. case CB_COLOR7_FMASK_SLICE:
  772. tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
  773. track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
  774. break;
  775. case CB_COLOR0_CMASK_SLICE:
  776. case CB_COLOR1_CMASK_SLICE:
  777. case CB_COLOR2_CMASK_SLICE:
  778. case CB_COLOR3_CMASK_SLICE:
  779. case CB_COLOR4_CMASK_SLICE:
  780. case CB_COLOR5_CMASK_SLICE:
  781. case CB_COLOR6_CMASK_SLICE:
  782. case CB_COLOR7_CMASK_SLICE:
  783. tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
  784. track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
  785. break;
  786. case CB_COLOR0_BASE:
  787. case CB_COLOR1_BASE:
  788. case CB_COLOR2_BASE:
  789. case CB_COLOR3_BASE:
  790. case CB_COLOR4_BASE:
  791. case CB_COLOR5_BASE:
  792. case CB_COLOR6_BASE:
  793. case CB_COLOR7_BASE:
  794. r = evergreen_cs_packet_next_reloc(p, &reloc);
  795. if (r) {
  796. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  797. "0x%04X\n", reg);
  798. return -EINVAL;
  799. }
  800. tmp = (reg - CB_COLOR0_BASE) / 0x3c;
  801. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
  802. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  803. track->cb_color_base_last[tmp] = ib[idx];
  804. track->cb_color_bo[tmp] = reloc->robj;
  805. break;
  806. case CB_COLOR8_BASE:
  807. case CB_COLOR9_BASE:
  808. case CB_COLOR10_BASE:
  809. case CB_COLOR11_BASE:
  810. r = evergreen_cs_packet_next_reloc(p, &reloc);
  811. if (r) {
  812. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  813. "0x%04X\n", reg);
  814. return -EINVAL;
  815. }
  816. tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
  817. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
  818. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  819. track->cb_color_base_last[tmp] = ib[idx];
  820. track->cb_color_bo[tmp] = reloc->robj;
  821. break;
  822. case CB_IMMED0_BASE:
  823. case CB_IMMED1_BASE:
  824. case CB_IMMED2_BASE:
  825. case CB_IMMED3_BASE:
  826. case CB_IMMED4_BASE:
  827. case CB_IMMED5_BASE:
  828. case CB_IMMED6_BASE:
  829. case CB_IMMED7_BASE:
  830. case CB_IMMED8_BASE:
  831. case CB_IMMED9_BASE:
  832. case CB_IMMED10_BASE:
  833. case CB_IMMED11_BASE:
  834. case DB_HTILE_DATA_BASE:
  835. case SQ_PGM_START_FS:
  836. case SQ_PGM_START_ES:
  837. case SQ_PGM_START_VS:
  838. case SQ_PGM_START_GS:
  839. case SQ_PGM_START_PS:
  840. case SQ_PGM_START_HS:
  841. case SQ_PGM_START_LS:
  842. case SQ_CONST_MEM_BASE:
  843. case SQ_ALU_CONST_CACHE_GS_0:
  844. case SQ_ALU_CONST_CACHE_GS_1:
  845. case SQ_ALU_CONST_CACHE_GS_2:
  846. case SQ_ALU_CONST_CACHE_GS_3:
  847. case SQ_ALU_CONST_CACHE_GS_4:
  848. case SQ_ALU_CONST_CACHE_GS_5:
  849. case SQ_ALU_CONST_CACHE_GS_6:
  850. case SQ_ALU_CONST_CACHE_GS_7:
  851. case SQ_ALU_CONST_CACHE_GS_8:
  852. case SQ_ALU_CONST_CACHE_GS_9:
  853. case SQ_ALU_CONST_CACHE_GS_10:
  854. case SQ_ALU_CONST_CACHE_GS_11:
  855. case SQ_ALU_CONST_CACHE_GS_12:
  856. case SQ_ALU_CONST_CACHE_GS_13:
  857. case SQ_ALU_CONST_CACHE_GS_14:
  858. case SQ_ALU_CONST_CACHE_GS_15:
  859. case SQ_ALU_CONST_CACHE_PS_0:
  860. case SQ_ALU_CONST_CACHE_PS_1:
  861. case SQ_ALU_CONST_CACHE_PS_2:
  862. case SQ_ALU_CONST_CACHE_PS_3:
  863. case SQ_ALU_CONST_CACHE_PS_4:
  864. case SQ_ALU_CONST_CACHE_PS_5:
  865. case SQ_ALU_CONST_CACHE_PS_6:
  866. case SQ_ALU_CONST_CACHE_PS_7:
  867. case SQ_ALU_CONST_CACHE_PS_8:
  868. case SQ_ALU_CONST_CACHE_PS_9:
  869. case SQ_ALU_CONST_CACHE_PS_10:
  870. case SQ_ALU_CONST_CACHE_PS_11:
  871. case SQ_ALU_CONST_CACHE_PS_12:
  872. case SQ_ALU_CONST_CACHE_PS_13:
  873. case SQ_ALU_CONST_CACHE_PS_14:
  874. case SQ_ALU_CONST_CACHE_PS_15:
  875. case SQ_ALU_CONST_CACHE_VS_0:
  876. case SQ_ALU_CONST_CACHE_VS_1:
  877. case SQ_ALU_CONST_CACHE_VS_2:
  878. case SQ_ALU_CONST_CACHE_VS_3:
  879. case SQ_ALU_CONST_CACHE_VS_4:
  880. case SQ_ALU_CONST_CACHE_VS_5:
  881. case SQ_ALU_CONST_CACHE_VS_6:
  882. case SQ_ALU_CONST_CACHE_VS_7:
  883. case SQ_ALU_CONST_CACHE_VS_8:
  884. case SQ_ALU_CONST_CACHE_VS_9:
  885. case SQ_ALU_CONST_CACHE_VS_10:
  886. case SQ_ALU_CONST_CACHE_VS_11:
  887. case SQ_ALU_CONST_CACHE_VS_12:
  888. case SQ_ALU_CONST_CACHE_VS_13:
  889. case SQ_ALU_CONST_CACHE_VS_14:
  890. case SQ_ALU_CONST_CACHE_VS_15:
  891. case SQ_ALU_CONST_CACHE_HS_0:
  892. case SQ_ALU_CONST_CACHE_HS_1:
  893. case SQ_ALU_CONST_CACHE_HS_2:
  894. case SQ_ALU_CONST_CACHE_HS_3:
  895. case SQ_ALU_CONST_CACHE_HS_4:
  896. case SQ_ALU_CONST_CACHE_HS_5:
  897. case SQ_ALU_CONST_CACHE_HS_6:
  898. case SQ_ALU_CONST_CACHE_HS_7:
  899. case SQ_ALU_CONST_CACHE_HS_8:
  900. case SQ_ALU_CONST_CACHE_HS_9:
  901. case SQ_ALU_CONST_CACHE_HS_10:
  902. case SQ_ALU_CONST_CACHE_HS_11:
  903. case SQ_ALU_CONST_CACHE_HS_12:
  904. case SQ_ALU_CONST_CACHE_HS_13:
  905. case SQ_ALU_CONST_CACHE_HS_14:
  906. case SQ_ALU_CONST_CACHE_HS_15:
  907. case SQ_ALU_CONST_CACHE_LS_0:
  908. case SQ_ALU_CONST_CACHE_LS_1:
  909. case SQ_ALU_CONST_CACHE_LS_2:
  910. case SQ_ALU_CONST_CACHE_LS_3:
  911. case SQ_ALU_CONST_CACHE_LS_4:
  912. case SQ_ALU_CONST_CACHE_LS_5:
  913. case SQ_ALU_CONST_CACHE_LS_6:
  914. case SQ_ALU_CONST_CACHE_LS_7:
  915. case SQ_ALU_CONST_CACHE_LS_8:
  916. case SQ_ALU_CONST_CACHE_LS_9:
  917. case SQ_ALU_CONST_CACHE_LS_10:
  918. case SQ_ALU_CONST_CACHE_LS_11:
  919. case SQ_ALU_CONST_CACHE_LS_12:
  920. case SQ_ALU_CONST_CACHE_LS_13:
  921. case SQ_ALU_CONST_CACHE_LS_14:
  922. case SQ_ALU_CONST_CACHE_LS_15:
  923. r = evergreen_cs_packet_next_reloc(p, &reloc);
  924. if (r) {
  925. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  926. "0x%04X\n", reg);
  927. return -EINVAL;
  928. }
  929. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  930. break;
  931. case SX_MEMORY_EXPORT_BASE:
  932. if (p->rdev->family >= CHIP_CAYMAN) {
  933. dev_warn(p->dev, "bad SET_CONFIG_REG "
  934. "0x%04X\n", reg);
  935. return -EINVAL;
  936. }
  937. r = evergreen_cs_packet_next_reloc(p, &reloc);
  938. if (r) {
  939. dev_warn(p->dev, "bad SET_CONFIG_REG "
  940. "0x%04X\n", reg);
  941. return -EINVAL;
  942. }
  943. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  944. break;
  945. case CAYMAN_SX_SCATTER_EXPORT_BASE:
  946. if (p->rdev->family < CHIP_CAYMAN) {
  947. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  948. "0x%04X\n", reg);
  949. return -EINVAL;
  950. }
  951. r = evergreen_cs_packet_next_reloc(p, &reloc);
  952. if (r) {
  953. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  954. "0x%04X\n", reg);
  955. return -EINVAL;
  956. }
  957. ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  958. break;
  959. default:
  960. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  961. return -EINVAL;
  962. }
  963. return 0;
  964. }
  965. /**
  966. * evergreen_check_texture_resource() - check if register is authorized or not
  967. * @p: parser structure holding parsing context
  968. * @idx: index into the cs buffer
  969. * @texture: texture's bo structure
  970. * @mipmap: mipmap's bo structure
  971. *
  972. * This function will check that the resource has valid field and that
  973. * the texture and mipmap bo object are big enough to cover this resource.
  974. */
  975. static int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
  976. struct radeon_bo *texture,
  977. struct radeon_bo *mipmap)
  978. {
  979. /* XXX fill in */
  980. return 0;
  981. }
  982. static int evergreen_packet3_check(struct radeon_cs_parser *p,
  983. struct radeon_cs_packet *pkt)
  984. {
  985. struct radeon_cs_reloc *reloc;
  986. struct evergreen_cs_track *track;
  987. volatile u32 *ib;
  988. unsigned idx;
  989. unsigned i;
  990. unsigned start_reg, end_reg, reg;
  991. int r;
  992. u32 idx_value;
  993. track = (struct evergreen_cs_track *)p->track;
  994. ib = p->ib->ptr;
  995. idx = pkt->idx + 1;
  996. idx_value = radeon_get_ib_value(p, idx);
  997. switch (pkt->opcode) {
  998. case PACKET3_SET_PREDICATION:
  999. {
  1000. int pred_op;
  1001. int tmp;
  1002. if (pkt->count != 1) {
  1003. DRM_ERROR("bad SET PREDICATION\n");
  1004. return -EINVAL;
  1005. }
  1006. tmp = radeon_get_ib_value(p, idx + 1);
  1007. pred_op = (tmp >> 16) & 0x7;
  1008. /* for the clear predicate operation */
  1009. if (pred_op == 0)
  1010. return 0;
  1011. if (pred_op > 2) {
  1012. DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
  1013. return -EINVAL;
  1014. }
  1015. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1016. if (r) {
  1017. DRM_ERROR("bad SET PREDICATION\n");
  1018. return -EINVAL;
  1019. }
  1020. ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1021. ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
  1022. }
  1023. break;
  1024. case PACKET3_CONTEXT_CONTROL:
  1025. if (pkt->count != 1) {
  1026. DRM_ERROR("bad CONTEXT_CONTROL\n");
  1027. return -EINVAL;
  1028. }
  1029. break;
  1030. case PACKET3_INDEX_TYPE:
  1031. case PACKET3_NUM_INSTANCES:
  1032. case PACKET3_CLEAR_STATE:
  1033. if (pkt->count) {
  1034. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
  1035. return -EINVAL;
  1036. }
  1037. break;
  1038. case CAYMAN_PACKET3_DEALLOC_STATE:
  1039. if (p->rdev->family < CHIP_CAYMAN) {
  1040. DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
  1041. return -EINVAL;
  1042. }
  1043. if (pkt->count) {
  1044. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
  1045. return -EINVAL;
  1046. }
  1047. break;
  1048. case PACKET3_INDEX_BASE:
  1049. if (pkt->count != 1) {
  1050. DRM_ERROR("bad INDEX_BASE\n");
  1051. return -EINVAL;
  1052. }
  1053. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1054. if (r) {
  1055. DRM_ERROR("bad INDEX_BASE\n");
  1056. return -EINVAL;
  1057. }
  1058. ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1059. ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1060. r = evergreen_cs_track_check(p);
  1061. if (r) {
  1062. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1063. return r;
  1064. }
  1065. break;
  1066. case PACKET3_DRAW_INDEX:
  1067. if (pkt->count != 3) {
  1068. DRM_ERROR("bad DRAW_INDEX\n");
  1069. return -EINVAL;
  1070. }
  1071. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1072. if (r) {
  1073. DRM_ERROR("bad DRAW_INDEX\n");
  1074. return -EINVAL;
  1075. }
  1076. ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1077. ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1078. r = evergreen_cs_track_check(p);
  1079. if (r) {
  1080. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1081. return r;
  1082. }
  1083. break;
  1084. case PACKET3_DRAW_INDEX_2:
  1085. if (pkt->count != 4) {
  1086. DRM_ERROR("bad DRAW_INDEX_2\n");
  1087. return -EINVAL;
  1088. }
  1089. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1090. if (r) {
  1091. DRM_ERROR("bad DRAW_INDEX_2\n");
  1092. return -EINVAL;
  1093. }
  1094. ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1095. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1096. r = evergreen_cs_track_check(p);
  1097. if (r) {
  1098. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1099. return r;
  1100. }
  1101. break;
  1102. case PACKET3_DRAW_INDEX_AUTO:
  1103. if (pkt->count != 1) {
  1104. DRM_ERROR("bad DRAW_INDEX_AUTO\n");
  1105. return -EINVAL;
  1106. }
  1107. r = evergreen_cs_track_check(p);
  1108. if (r) {
  1109. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1110. return r;
  1111. }
  1112. break;
  1113. case PACKET3_DRAW_INDEX_MULTI_AUTO:
  1114. if (pkt->count != 2) {
  1115. DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
  1116. return -EINVAL;
  1117. }
  1118. r = evergreen_cs_track_check(p);
  1119. if (r) {
  1120. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1121. return r;
  1122. }
  1123. break;
  1124. case PACKET3_DRAW_INDEX_IMMD:
  1125. if (pkt->count < 2) {
  1126. DRM_ERROR("bad DRAW_INDEX_IMMD\n");
  1127. return -EINVAL;
  1128. }
  1129. r = evergreen_cs_track_check(p);
  1130. if (r) {
  1131. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1132. return r;
  1133. }
  1134. break;
  1135. case PACKET3_DRAW_INDEX_OFFSET:
  1136. if (pkt->count != 2) {
  1137. DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
  1138. return -EINVAL;
  1139. }
  1140. r = evergreen_cs_track_check(p);
  1141. if (r) {
  1142. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1143. return r;
  1144. }
  1145. break;
  1146. case PACKET3_DRAW_INDEX_OFFSET_2:
  1147. if (pkt->count != 3) {
  1148. DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
  1149. return -EINVAL;
  1150. }
  1151. r = evergreen_cs_track_check(p);
  1152. if (r) {
  1153. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1154. return r;
  1155. }
  1156. break;
  1157. case PACKET3_DISPATCH_DIRECT:
  1158. if (pkt->count != 3) {
  1159. DRM_ERROR("bad DISPATCH_DIRECT\n");
  1160. return -EINVAL;
  1161. }
  1162. r = evergreen_cs_track_check(p);
  1163. if (r) {
  1164. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1165. return r;
  1166. }
  1167. break;
  1168. case PACKET3_DISPATCH_INDIRECT:
  1169. if (pkt->count != 1) {
  1170. DRM_ERROR("bad DISPATCH_INDIRECT\n");
  1171. return -EINVAL;
  1172. }
  1173. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1174. if (r) {
  1175. DRM_ERROR("bad DISPATCH_INDIRECT\n");
  1176. return -EINVAL;
  1177. }
  1178. ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1179. r = evergreen_cs_track_check(p);
  1180. if (r) {
  1181. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1182. return r;
  1183. }
  1184. break;
  1185. case PACKET3_WAIT_REG_MEM:
  1186. if (pkt->count != 5) {
  1187. DRM_ERROR("bad WAIT_REG_MEM\n");
  1188. return -EINVAL;
  1189. }
  1190. /* bit 4 is reg (0) or mem (1) */
  1191. if (idx_value & 0x10) {
  1192. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1193. if (r) {
  1194. DRM_ERROR("bad WAIT_REG_MEM\n");
  1195. return -EINVAL;
  1196. }
  1197. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1198. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1199. }
  1200. break;
  1201. case PACKET3_SURFACE_SYNC:
  1202. if (pkt->count != 3) {
  1203. DRM_ERROR("bad SURFACE_SYNC\n");
  1204. return -EINVAL;
  1205. }
  1206. /* 0xffffffff/0x0 is flush all cache flag */
  1207. if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
  1208. radeon_get_ib_value(p, idx + 2) != 0) {
  1209. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1210. if (r) {
  1211. DRM_ERROR("bad SURFACE_SYNC\n");
  1212. return -EINVAL;
  1213. }
  1214. ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1215. }
  1216. break;
  1217. case PACKET3_EVENT_WRITE:
  1218. if (pkt->count != 2 && pkt->count != 0) {
  1219. DRM_ERROR("bad EVENT_WRITE\n");
  1220. return -EINVAL;
  1221. }
  1222. if (pkt->count) {
  1223. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1224. if (r) {
  1225. DRM_ERROR("bad EVENT_WRITE\n");
  1226. return -EINVAL;
  1227. }
  1228. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1229. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1230. }
  1231. break;
  1232. case PACKET3_EVENT_WRITE_EOP:
  1233. if (pkt->count != 4) {
  1234. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  1235. return -EINVAL;
  1236. }
  1237. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1238. if (r) {
  1239. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  1240. return -EINVAL;
  1241. }
  1242. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1243. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1244. break;
  1245. case PACKET3_EVENT_WRITE_EOS:
  1246. if (pkt->count != 3) {
  1247. DRM_ERROR("bad EVENT_WRITE_EOS\n");
  1248. return -EINVAL;
  1249. }
  1250. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1251. if (r) {
  1252. DRM_ERROR("bad EVENT_WRITE_EOS\n");
  1253. return -EINVAL;
  1254. }
  1255. ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
  1256. ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1257. break;
  1258. case PACKET3_SET_CONFIG_REG:
  1259. start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
  1260. end_reg = 4 * pkt->count + start_reg - 4;
  1261. if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
  1262. (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
  1263. (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
  1264. DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
  1265. return -EINVAL;
  1266. }
  1267. for (i = 0; i < pkt->count; i++) {
  1268. reg = start_reg + (4 * i);
  1269. r = evergreen_cs_check_reg(p, reg, idx+1+i);
  1270. if (r)
  1271. return r;
  1272. }
  1273. break;
  1274. case PACKET3_SET_CONTEXT_REG:
  1275. start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
  1276. end_reg = 4 * pkt->count + start_reg - 4;
  1277. if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
  1278. (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
  1279. (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
  1280. DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
  1281. return -EINVAL;
  1282. }
  1283. for (i = 0; i < pkt->count; i++) {
  1284. reg = start_reg + (4 * i);
  1285. r = evergreen_cs_check_reg(p, reg, idx+1+i);
  1286. if (r)
  1287. return r;
  1288. }
  1289. break;
  1290. case PACKET3_SET_RESOURCE:
  1291. if (pkt->count % 8) {
  1292. DRM_ERROR("bad SET_RESOURCE\n");
  1293. return -EINVAL;
  1294. }
  1295. start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
  1296. end_reg = 4 * pkt->count + start_reg - 4;
  1297. if ((start_reg < PACKET3_SET_RESOURCE_START) ||
  1298. (start_reg >= PACKET3_SET_RESOURCE_END) ||
  1299. (end_reg >= PACKET3_SET_RESOURCE_END)) {
  1300. DRM_ERROR("bad SET_RESOURCE\n");
  1301. return -EINVAL;
  1302. }
  1303. for (i = 0; i < (pkt->count / 8); i++) {
  1304. struct radeon_bo *texture, *mipmap;
  1305. u32 size, offset;
  1306. switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
  1307. case SQ_TEX_VTX_VALID_TEXTURE:
  1308. /* tex base */
  1309. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1310. if (r) {
  1311. DRM_ERROR("bad SET_RESOURCE (tex)\n");
  1312. return -EINVAL;
  1313. }
  1314. ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1315. if (!p->keep_tiling_flags) {
  1316. ib[idx+1+(i*8)+1] |=
  1317. TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
  1318. if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
  1319. ib[idx+1+(i*8)+6] |=
  1320. TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
  1321. ib[idx+1+(i*8)+7] |=
  1322. TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  1323. }
  1324. }
  1325. texture = reloc->robj;
  1326. /* tex mip base */
  1327. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1328. if (r) {
  1329. DRM_ERROR("bad SET_RESOURCE (tex)\n");
  1330. return -EINVAL;
  1331. }
  1332. ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
  1333. mipmap = reloc->robj;
  1334. r = evergreen_check_texture_resource(p, idx+1+(i*8),
  1335. texture, mipmap);
  1336. if (r)
  1337. return r;
  1338. break;
  1339. case SQ_TEX_VTX_VALID_BUFFER:
  1340. /* vtx base */
  1341. r = evergreen_cs_packet_next_reloc(p, &reloc);
  1342. if (r) {
  1343. DRM_ERROR("bad SET_RESOURCE (vtx)\n");
  1344. return -EINVAL;
  1345. }
  1346. offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
  1347. size = radeon_get_ib_value(p, idx+1+(i*8)+1);
  1348. if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
  1349. /* force size to size of the buffer */
  1350. dev_warn(p->dev, "vbo resource seems too big for the bo\n");
  1351. ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
  1352. }
  1353. ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
  1354. ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
  1355. break;
  1356. case SQ_TEX_VTX_INVALID_TEXTURE:
  1357. case SQ_TEX_VTX_INVALID_BUFFER:
  1358. default:
  1359. DRM_ERROR("bad SET_RESOURCE\n");
  1360. return -EINVAL;
  1361. }
  1362. }
  1363. break;
  1364. case PACKET3_SET_ALU_CONST:
  1365. /* XXX fix me ALU const buffers only */
  1366. break;
  1367. case PACKET3_SET_BOOL_CONST:
  1368. start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
  1369. end_reg = 4 * pkt->count + start_reg - 4;
  1370. if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
  1371. (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
  1372. (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
  1373. DRM_ERROR("bad SET_BOOL_CONST\n");
  1374. return -EINVAL;
  1375. }
  1376. break;
  1377. case PACKET3_SET_LOOP_CONST:
  1378. start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
  1379. end_reg = 4 * pkt->count + start_reg - 4;
  1380. if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
  1381. (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
  1382. (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
  1383. DRM_ERROR("bad SET_LOOP_CONST\n");
  1384. return -EINVAL;
  1385. }
  1386. break;
  1387. case PACKET3_SET_CTL_CONST:
  1388. start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
  1389. end_reg = 4 * pkt->count + start_reg - 4;
  1390. if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
  1391. (start_reg >= PACKET3_SET_CTL_CONST_END) ||
  1392. (end_reg >= PACKET3_SET_CTL_CONST_END)) {
  1393. DRM_ERROR("bad SET_CTL_CONST\n");
  1394. return -EINVAL;
  1395. }
  1396. break;
  1397. case PACKET3_SET_SAMPLER:
  1398. if (pkt->count % 3) {
  1399. DRM_ERROR("bad SET_SAMPLER\n");
  1400. return -EINVAL;
  1401. }
  1402. start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
  1403. end_reg = 4 * pkt->count + start_reg - 4;
  1404. if ((start_reg < PACKET3_SET_SAMPLER_START) ||
  1405. (start_reg >= PACKET3_SET_SAMPLER_END) ||
  1406. (end_reg >= PACKET3_SET_SAMPLER_END)) {
  1407. DRM_ERROR("bad SET_SAMPLER\n");
  1408. return -EINVAL;
  1409. }
  1410. break;
  1411. case PACKET3_NOP:
  1412. break;
  1413. default:
  1414. DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
  1415. return -EINVAL;
  1416. }
  1417. return 0;
  1418. }
  1419. int evergreen_cs_parse(struct radeon_cs_parser *p)
  1420. {
  1421. struct radeon_cs_packet pkt;
  1422. struct evergreen_cs_track *track;
  1423. u32 tmp;
  1424. int r;
  1425. if (p->track == NULL) {
  1426. /* initialize tracker, we are in kms */
  1427. track = kzalloc(sizeof(*track), GFP_KERNEL);
  1428. if (track == NULL)
  1429. return -ENOMEM;
  1430. evergreen_cs_track_init(track);
  1431. if (p->rdev->family >= CHIP_CAYMAN)
  1432. tmp = p->rdev->config.cayman.tile_config;
  1433. else
  1434. tmp = p->rdev->config.evergreen.tile_config;
  1435. switch (tmp & 0xf) {
  1436. case 0:
  1437. track->npipes = 1;
  1438. break;
  1439. case 1:
  1440. default:
  1441. track->npipes = 2;
  1442. break;
  1443. case 2:
  1444. track->npipes = 4;
  1445. break;
  1446. case 3:
  1447. track->npipes = 8;
  1448. break;
  1449. }
  1450. switch ((tmp & 0xf0) >> 4) {
  1451. case 0:
  1452. track->nbanks = 4;
  1453. break;
  1454. case 1:
  1455. default:
  1456. track->nbanks = 8;
  1457. break;
  1458. case 2:
  1459. track->nbanks = 16;
  1460. break;
  1461. }
  1462. switch ((tmp & 0xf00) >> 8) {
  1463. case 0:
  1464. track->group_size = 256;
  1465. break;
  1466. case 1:
  1467. default:
  1468. track->group_size = 512;
  1469. break;
  1470. }
  1471. switch ((tmp & 0xf000) >> 12) {
  1472. case 0:
  1473. track->row_size = 1;
  1474. break;
  1475. case 1:
  1476. default:
  1477. track->row_size = 2;
  1478. break;
  1479. case 2:
  1480. track->row_size = 4;
  1481. break;
  1482. }
  1483. p->track = track;
  1484. }
  1485. do {
  1486. r = evergreen_cs_packet_parse(p, &pkt, p->idx);
  1487. if (r) {
  1488. kfree(p->track);
  1489. p->track = NULL;
  1490. return r;
  1491. }
  1492. p->idx += pkt.count + 2;
  1493. switch (pkt.type) {
  1494. case PACKET_TYPE0:
  1495. r = evergreen_cs_parse_packet0(p, &pkt);
  1496. break;
  1497. case PACKET_TYPE2:
  1498. break;
  1499. case PACKET_TYPE3:
  1500. r = evergreen_packet3_check(p, &pkt);
  1501. break;
  1502. default:
  1503. DRM_ERROR("Unknown packet type %d !\n", pkt.type);
  1504. kfree(p->track);
  1505. p->track = NULL;
  1506. return -EINVAL;
  1507. }
  1508. if (r) {
  1509. kfree(p->track);
  1510. p->track = NULL;
  1511. return r;
  1512. }
  1513. } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
  1514. #if 0
  1515. for (r = 0; r < p->ib->length_dw; r++) {
  1516. printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
  1517. mdelay(1);
  1518. }
  1519. #endif
  1520. kfree(p->track);
  1521. p->track = NULL;
  1522. return 0;
  1523. }