r100.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "radeon_drm.h"
  32. #include "radeon_microcode.h"
  33. #include "radeon_reg.h"
  34. #include "radeon.h"
  35. /* This files gather functions specifics to:
  36. * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  37. *
  38. * Some of these functions might be used by newer ASICs.
  39. */
  40. void r100_hdp_reset(struct radeon_device *rdev);
  41. void r100_gpu_init(struct radeon_device *rdev);
  42. int r100_gui_wait_for_idle(struct radeon_device *rdev);
  43. int r100_mc_wait_for_idle(struct radeon_device *rdev);
  44. void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
  45. void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
  46. int r100_debugfs_mc_info_init(struct radeon_device *rdev);
  47. /*
  48. * PCI GART
  49. */
  50. void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
  51. {
  52. /* TODO: can we do somethings here ? */
  53. /* It seems hw only cache one entry so we should discard this
  54. * entry otherwise if first GPU GART read hit this entry it
  55. * could end up in wrong address. */
  56. }
  57. int r100_pci_gart_enable(struct radeon_device *rdev)
  58. {
  59. uint32_t tmp;
  60. int r;
  61. /* Initialize common gart structure */
  62. r = radeon_gart_init(rdev);
  63. if (r) {
  64. return r;
  65. }
  66. if (rdev->gart.table.ram.ptr == NULL) {
  67. rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
  68. r = radeon_gart_table_ram_alloc(rdev);
  69. if (r) {
  70. return r;
  71. }
  72. }
  73. /* discard memory request outside of configured range */
  74. tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
  75. WREG32(RADEON_AIC_CNTL, tmp);
  76. /* set address range for PCI address translate */
  77. WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
  78. tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
  79. WREG32(RADEON_AIC_HI_ADDR, tmp);
  80. /* Enable bus mastering */
  81. tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
  82. WREG32(RADEON_BUS_CNTL, tmp);
  83. /* set PCI GART page-table base address */
  84. WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
  85. tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
  86. WREG32(RADEON_AIC_CNTL, tmp);
  87. r100_pci_gart_tlb_flush(rdev);
  88. rdev->gart.ready = true;
  89. return 0;
  90. }
  91. void r100_pci_gart_disable(struct radeon_device *rdev)
  92. {
  93. uint32_t tmp;
  94. /* discard memory request outside of configured range */
  95. tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
  96. WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
  97. WREG32(RADEON_AIC_LO_ADDR, 0);
  98. WREG32(RADEON_AIC_HI_ADDR, 0);
  99. }
  100. int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
  101. {
  102. if (i < 0 || i > rdev->gart.num_gpu_pages) {
  103. return -EINVAL;
  104. }
  105. rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
  106. return 0;
  107. }
  108. int r100_gart_enable(struct radeon_device *rdev)
  109. {
  110. if (rdev->flags & RADEON_IS_AGP) {
  111. r100_pci_gart_disable(rdev);
  112. return 0;
  113. }
  114. return r100_pci_gart_enable(rdev);
  115. }
  116. /*
  117. * MC
  118. */
  119. void r100_mc_disable_clients(struct radeon_device *rdev)
  120. {
  121. uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
  122. /* FIXME: is this function correct for rs100,rs200,rs300 ? */
  123. if (r100_gui_wait_for_idle(rdev)) {
  124. printk(KERN_WARNING "Failed to wait GUI idle while "
  125. "programming pipes. Bad things might happen.\n");
  126. }
  127. /* stop display and memory access */
  128. ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
  129. WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
  130. crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
  131. WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
  132. crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
  133. r100_gpu_wait_for_vsync(rdev);
  134. WREG32(RADEON_CRTC_GEN_CNTL,
  135. (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
  136. RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
  137. if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
  138. crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
  139. r100_gpu_wait_for_vsync2(rdev);
  140. WREG32(RADEON_CRTC2_GEN_CNTL,
  141. (crtc2_gen_cntl &
  142. ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
  143. RADEON_CRTC2_DISP_REQ_EN_B);
  144. }
  145. udelay(500);
  146. }
  147. void r100_mc_setup(struct radeon_device *rdev)
  148. {
  149. uint32_t tmp;
  150. int r;
  151. r = r100_debugfs_mc_info_init(rdev);
  152. if (r) {
  153. DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
  154. }
  155. /* Write VRAM size in case we are limiting it */
  156. WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
  157. tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
  158. tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
  159. tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
  160. WREG32(RADEON_MC_FB_LOCATION, tmp);
  161. /* Enable bus mastering */
  162. tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
  163. WREG32(RADEON_BUS_CNTL, tmp);
  164. if (rdev->flags & RADEON_IS_AGP) {
  165. tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
  166. tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
  167. tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
  168. WREG32(RADEON_MC_AGP_LOCATION, tmp);
  169. WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
  170. } else {
  171. WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
  172. WREG32(RADEON_AGP_BASE, 0);
  173. }
  174. tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
  175. tmp |= (7 << 28);
  176. WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
  177. (void)RREG32(RADEON_HOST_PATH_CNTL);
  178. WREG32(RADEON_HOST_PATH_CNTL, tmp);
  179. (void)RREG32(RADEON_HOST_PATH_CNTL);
  180. }
  181. int r100_mc_init(struct radeon_device *rdev)
  182. {
  183. int r;
  184. if (r100_debugfs_rbbm_init(rdev)) {
  185. DRM_ERROR("Failed to register debugfs file for RBBM !\n");
  186. }
  187. r100_gpu_init(rdev);
  188. /* Disable gart which also disable out of gart access */
  189. r100_pci_gart_disable(rdev);
  190. /* Setup GPU memory space */
  191. rdev->mc.vram_location = 0xFFFFFFFFUL;
  192. rdev->mc.gtt_location = 0xFFFFFFFFUL;
  193. if (rdev->flags & RADEON_IS_AGP) {
  194. r = radeon_agp_init(rdev);
  195. if (r) {
  196. printk(KERN_WARNING "[drm] Disabling AGP\n");
  197. rdev->flags &= ~RADEON_IS_AGP;
  198. rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
  199. } else {
  200. rdev->mc.gtt_location = rdev->mc.agp_base;
  201. }
  202. }
  203. r = radeon_mc_setup(rdev);
  204. if (r) {
  205. return r;
  206. }
  207. r100_mc_disable_clients(rdev);
  208. if (r100_mc_wait_for_idle(rdev)) {
  209. printk(KERN_WARNING "Failed to wait MC idle while "
  210. "programming pipes. Bad things might happen.\n");
  211. }
  212. r100_mc_setup(rdev);
  213. return 0;
  214. }
  215. void r100_mc_fini(struct radeon_device *rdev)
  216. {
  217. r100_pci_gart_disable(rdev);
  218. radeon_gart_table_ram_free(rdev);
  219. radeon_gart_fini(rdev);
  220. }
  221. /*
  222. * Fence emission
  223. */
  224. void r100_fence_ring_emit(struct radeon_device *rdev,
  225. struct radeon_fence *fence)
  226. {
  227. /* Who ever call radeon_fence_emit should call ring_lock and ask
  228. * for enough space (today caller are ib schedule and buffer move) */
  229. /* Wait until IDLE & CLEAN */
  230. radeon_ring_write(rdev, PACKET0(0x1720, 0));
  231. radeon_ring_write(rdev, (1 << 16) | (1 << 17));
  232. /* Emit fence sequence & fire IRQ */
  233. radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
  234. radeon_ring_write(rdev, fence->seq);
  235. radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
  236. radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
  237. }
  238. /*
  239. * Writeback
  240. */
  241. int r100_wb_init(struct radeon_device *rdev)
  242. {
  243. int r;
  244. if (rdev->wb.wb_obj == NULL) {
  245. r = radeon_object_create(rdev, NULL, 4096,
  246. true,
  247. RADEON_GEM_DOMAIN_GTT,
  248. false, &rdev->wb.wb_obj);
  249. if (r) {
  250. DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
  251. return r;
  252. }
  253. r = radeon_object_pin(rdev->wb.wb_obj,
  254. RADEON_GEM_DOMAIN_GTT,
  255. &rdev->wb.gpu_addr);
  256. if (r) {
  257. DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
  258. return r;
  259. }
  260. r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
  261. if (r) {
  262. DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
  263. return r;
  264. }
  265. }
  266. WREG32(0x774, rdev->wb.gpu_addr);
  267. WREG32(0x70C, rdev->wb.gpu_addr + 1024);
  268. WREG32(0x770, 0xff);
  269. return 0;
  270. }
  271. void r100_wb_fini(struct radeon_device *rdev)
  272. {
  273. if (rdev->wb.wb_obj) {
  274. radeon_object_kunmap(rdev->wb.wb_obj);
  275. radeon_object_unpin(rdev->wb.wb_obj);
  276. radeon_object_unref(&rdev->wb.wb_obj);
  277. rdev->wb.wb = NULL;
  278. rdev->wb.wb_obj = NULL;
  279. }
  280. }
  281. int r100_copy_blit(struct radeon_device *rdev,
  282. uint64_t src_offset,
  283. uint64_t dst_offset,
  284. unsigned num_pages,
  285. struct radeon_fence *fence)
  286. {
  287. uint32_t cur_pages;
  288. uint32_t stride_bytes = PAGE_SIZE;
  289. uint32_t pitch;
  290. uint32_t stride_pixels;
  291. unsigned ndw;
  292. int num_loops;
  293. int r = 0;
  294. /* radeon limited to 16k stride */
  295. stride_bytes &= 0x3fff;
  296. /* radeon pitch is /64 */
  297. pitch = stride_bytes / 64;
  298. stride_pixels = stride_bytes / 4;
  299. num_loops = DIV_ROUND_UP(num_pages, 8191);
  300. /* Ask for enough room for blit + flush + fence */
  301. ndw = 64 + (10 * num_loops);
  302. r = radeon_ring_lock(rdev, ndw);
  303. if (r) {
  304. DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
  305. return -EINVAL;
  306. }
  307. while (num_pages > 0) {
  308. cur_pages = num_pages;
  309. if (cur_pages > 8191) {
  310. cur_pages = 8191;
  311. }
  312. num_pages -= cur_pages;
  313. /* pages are in Y direction - height
  314. page width in X direction - width */
  315. radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
  316. radeon_ring_write(rdev,
  317. RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
  318. RADEON_GMC_DST_PITCH_OFFSET_CNTL |
  319. RADEON_GMC_SRC_CLIPPING |
  320. RADEON_GMC_DST_CLIPPING |
  321. RADEON_GMC_BRUSH_NONE |
  322. (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
  323. RADEON_GMC_SRC_DATATYPE_COLOR |
  324. RADEON_ROP3_S |
  325. RADEON_DP_SRC_SOURCE_MEMORY |
  326. RADEON_GMC_CLR_CMP_CNTL_DIS |
  327. RADEON_GMC_WR_MSK_DIS);
  328. radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
  329. radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
  330. radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
  331. radeon_ring_write(rdev, 0);
  332. radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
  333. radeon_ring_write(rdev, num_pages);
  334. radeon_ring_write(rdev, num_pages);
  335. radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
  336. }
  337. radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
  338. radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
  339. radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
  340. radeon_ring_write(rdev,
  341. RADEON_WAIT_2D_IDLECLEAN |
  342. RADEON_WAIT_HOST_IDLECLEAN |
  343. RADEON_WAIT_DMA_GUI_IDLE);
  344. if (fence) {
  345. r = radeon_fence_emit(rdev, fence);
  346. }
  347. radeon_ring_unlock_commit(rdev);
  348. return r;
  349. }
  350. /*
  351. * CP
  352. */
  353. void r100_ring_start(struct radeon_device *rdev)
  354. {
  355. int r;
  356. r = radeon_ring_lock(rdev, 2);
  357. if (r) {
  358. return;
  359. }
  360. radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
  361. radeon_ring_write(rdev,
  362. RADEON_ISYNC_ANY2D_IDLE3D |
  363. RADEON_ISYNC_ANY3D_IDLE2D |
  364. RADEON_ISYNC_WAIT_IDLEGUI |
  365. RADEON_ISYNC_CPSCRATCH_IDLEGUI);
  366. radeon_ring_unlock_commit(rdev);
  367. }
  368. static void r100_cp_load_microcode(struct radeon_device *rdev)
  369. {
  370. int i;
  371. if (r100_gui_wait_for_idle(rdev)) {
  372. printk(KERN_WARNING "Failed to wait GUI idle while "
  373. "programming pipes. Bad things might happen.\n");
  374. }
  375. WREG32(RADEON_CP_ME_RAM_ADDR, 0);
  376. if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
  377. (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
  378. (rdev->family == CHIP_RS200)) {
  379. DRM_INFO("Loading R100 Microcode\n");
  380. for (i = 0; i < 256; i++) {
  381. WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
  382. WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
  383. }
  384. } else if ((rdev->family == CHIP_R200) ||
  385. (rdev->family == CHIP_RV250) ||
  386. (rdev->family == CHIP_RV280) ||
  387. (rdev->family == CHIP_RS300)) {
  388. DRM_INFO("Loading R200 Microcode\n");
  389. for (i = 0; i < 256; i++) {
  390. WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
  391. WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
  392. }
  393. } else if ((rdev->family == CHIP_R300) ||
  394. (rdev->family == CHIP_R350) ||
  395. (rdev->family == CHIP_RV350) ||
  396. (rdev->family == CHIP_RV380) ||
  397. (rdev->family == CHIP_RS400) ||
  398. (rdev->family == CHIP_RS480)) {
  399. DRM_INFO("Loading R300 Microcode\n");
  400. for (i = 0; i < 256; i++) {
  401. WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
  402. WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
  403. }
  404. } else if ((rdev->family == CHIP_R420) ||
  405. (rdev->family == CHIP_R423) ||
  406. (rdev->family == CHIP_RV410)) {
  407. DRM_INFO("Loading R400 Microcode\n");
  408. for (i = 0; i < 256; i++) {
  409. WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
  410. WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
  411. }
  412. } else if ((rdev->family == CHIP_RS690) ||
  413. (rdev->family == CHIP_RS740)) {
  414. DRM_INFO("Loading RS690/RS740 Microcode\n");
  415. for (i = 0; i < 256; i++) {
  416. WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
  417. WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
  418. }
  419. } else if (rdev->family == CHIP_RS600) {
  420. DRM_INFO("Loading RS600 Microcode\n");
  421. for (i = 0; i < 256; i++) {
  422. WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
  423. WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
  424. }
  425. } else if ((rdev->family == CHIP_RV515) ||
  426. (rdev->family == CHIP_R520) ||
  427. (rdev->family == CHIP_RV530) ||
  428. (rdev->family == CHIP_R580) ||
  429. (rdev->family == CHIP_RV560) ||
  430. (rdev->family == CHIP_RV570)) {
  431. DRM_INFO("Loading R500 Microcode\n");
  432. for (i = 0; i < 256; i++) {
  433. WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
  434. WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
  435. }
  436. }
  437. }
  438. int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
  439. {
  440. unsigned rb_bufsz;
  441. unsigned rb_blksz;
  442. unsigned max_fetch;
  443. unsigned pre_write_timer;
  444. unsigned pre_write_limit;
  445. unsigned indirect2_start;
  446. unsigned indirect1_start;
  447. uint32_t tmp;
  448. int r;
  449. if (r100_debugfs_cp_init(rdev)) {
  450. DRM_ERROR("Failed to register debugfs file for CP !\n");
  451. }
  452. /* Reset CP */
  453. tmp = RREG32(RADEON_CP_CSQ_STAT);
  454. if ((tmp & (1 << 31))) {
  455. DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
  456. WREG32(RADEON_CP_CSQ_MODE, 0);
  457. WREG32(RADEON_CP_CSQ_CNTL, 0);
  458. WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
  459. tmp = RREG32(RADEON_RBBM_SOFT_RESET);
  460. mdelay(2);
  461. WREG32(RADEON_RBBM_SOFT_RESET, 0);
  462. tmp = RREG32(RADEON_RBBM_SOFT_RESET);
  463. mdelay(2);
  464. tmp = RREG32(RADEON_CP_CSQ_STAT);
  465. if ((tmp & (1 << 31))) {
  466. DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
  467. }
  468. } else {
  469. DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
  470. }
  471. /* Align ring size */
  472. rb_bufsz = drm_order(ring_size / 8);
  473. ring_size = (1 << (rb_bufsz + 1)) * 4;
  474. r100_cp_load_microcode(rdev);
  475. r = radeon_ring_init(rdev, ring_size);
  476. if (r) {
  477. return r;
  478. }
  479. /* Each time the cp read 1024 bytes (16 dword/quadword) update
  480. * the rptr copy in system ram */
  481. rb_blksz = 9;
  482. /* cp will read 128bytes at a time (4 dwords) */
  483. max_fetch = 1;
  484. rdev->cp.align_mask = 16 - 1;
  485. /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
  486. pre_write_timer = 64;
  487. /* Force CP_RB_WPTR write if written more than one time before the
  488. * delay expire
  489. */
  490. pre_write_limit = 0;
  491. /* Setup the cp cache like this (cache size is 96 dwords) :
  492. * RING 0 to 15
  493. * INDIRECT1 16 to 79
  494. * INDIRECT2 80 to 95
  495. * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
  496. * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
  497. * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
  498. * Idea being that most of the gpu cmd will be through indirect1 buffer
  499. * so it gets the bigger cache.
  500. */
  501. indirect2_start = 80;
  502. indirect1_start = 16;
  503. /* cp setup */
  504. WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
  505. WREG32(RADEON_CP_RB_CNTL,
  506. REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
  507. REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
  508. REG_SET(RADEON_MAX_FETCH, max_fetch) |
  509. RADEON_RB_NO_UPDATE);
  510. /* Set ring address */
  511. DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
  512. WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
  513. /* Force read & write ptr to 0 */
  514. tmp = RREG32(RADEON_CP_RB_CNTL);
  515. WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
  516. WREG32(RADEON_CP_RB_RPTR_WR, 0);
  517. WREG32(RADEON_CP_RB_WPTR, 0);
  518. WREG32(RADEON_CP_RB_CNTL, tmp);
  519. udelay(10);
  520. rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
  521. rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
  522. /* Set cp mode to bus mastering & enable cp*/
  523. WREG32(RADEON_CP_CSQ_MODE,
  524. REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
  525. REG_SET(RADEON_INDIRECT1_START, indirect1_start));
  526. WREG32(0x718, 0);
  527. WREG32(0x744, 0x00004D4D);
  528. WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
  529. radeon_ring_start(rdev);
  530. r = radeon_ring_test(rdev);
  531. if (r) {
  532. DRM_ERROR("radeon: cp isn't working (%d).\n", r);
  533. return r;
  534. }
  535. rdev->cp.ready = true;
  536. return 0;
  537. }
  538. void r100_cp_fini(struct radeon_device *rdev)
  539. {
  540. /* Disable ring */
  541. rdev->cp.ready = false;
  542. WREG32(RADEON_CP_CSQ_CNTL, 0);
  543. radeon_ring_fini(rdev);
  544. DRM_INFO("radeon: cp finalized\n");
  545. }
  546. void r100_cp_disable(struct radeon_device *rdev)
  547. {
  548. /* Disable ring */
  549. rdev->cp.ready = false;
  550. WREG32(RADEON_CP_CSQ_MODE, 0);
  551. WREG32(RADEON_CP_CSQ_CNTL, 0);
  552. if (r100_gui_wait_for_idle(rdev)) {
  553. printk(KERN_WARNING "Failed to wait GUI idle while "
  554. "programming pipes. Bad things might happen.\n");
  555. }
  556. }
  557. int r100_cp_reset(struct radeon_device *rdev)
  558. {
  559. uint32_t tmp;
  560. bool reinit_cp;
  561. int i;
  562. reinit_cp = rdev->cp.ready;
  563. rdev->cp.ready = false;
  564. WREG32(RADEON_CP_CSQ_MODE, 0);
  565. WREG32(RADEON_CP_CSQ_CNTL, 0);
  566. WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
  567. (void)RREG32(RADEON_RBBM_SOFT_RESET);
  568. udelay(200);
  569. WREG32(RADEON_RBBM_SOFT_RESET, 0);
  570. /* Wait to prevent race in RBBM_STATUS */
  571. mdelay(1);
  572. for (i = 0; i < rdev->usec_timeout; i++) {
  573. tmp = RREG32(RADEON_RBBM_STATUS);
  574. if (!(tmp & (1 << 16))) {
  575. DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
  576. tmp);
  577. if (reinit_cp) {
  578. return r100_cp_init(rdev, rdev->cp.ring_size);
  579. }
  580. return 0;
  581. }
  582. DRM_UDELAY(1);
  583. }
  584. tmp = RREG32(RADEON_RBBM_STATUS);
  585. DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
  586. return -1;
  587. }
  588. /*
  589. * CS functions
  590. */
  591. int r100_cs_parse_packet0(struct radeon_cs_parser *p,
  592. struct radeon_cs_packet *pkt,
  593. unsigned *auth, unsigned n,
  594. radeon_packet0_check_t check)
  595. {
  596. unsigned reg;
  597. unsigned i, j, m;
  598. unsigned idx;
  599. int r;
  600. idx = pkt->idx + 1;
  601. reg = pkt->reg;
  602. if (pkt->one_reg_wr) {
  603. if ((reg >> 7) > n) {
  604. return -EINVAL;
  605. }
  606. } else {
  607. if (((reg + (pkt->count << 2)) >> 7) > n) {
  608. return -EINVAL;
  609. }
  610. }
  611. for (i = 0; i <= pkt->count; i++, idx++) {
  612. j = (reg >> 7);
  613. m = 1 << ((reg >> 2) & 31);
  614. if (auth[j] & m) {
  615. r = check(p, pkt, idx, reg);
  616. if (r) {
  617. return r;
  618. }
  619. }
  620. if (pkt->one_reg_wr) {
  621. if (!(auth[j] & m)) {
  622. break;
  623. }
  624. } else {
  625. reg += 4;
  626. }
  627. }
  628. return 0;
  629. }
  630. int r100_cs_parse_packet3(struct radeon_cs_parser *p,
  631. struct radeon_cs_packet *pkt,
  632. unsigned *auth, unsigned n,
  633. radeon_packet3_check_t check)
  634. {
  635. unsigned i, m;
  636. if ((pkt->opcode >> 5) > n) {
  637. return -EINVAL;
  638. }
  639. i = pkt->opcode >> 5;
  640. m = 1 << (pkt->opcode & 31);
  641. if (auth[i] & m) {
  642. return check(p, pkt);
  643. }
  644. return 0;
  645. }
  646. void r100_cs_dump_packet(struct radeon_cs_parser *p,
  647. struct radeon_cs_packet *pkt)
  648. {
  649. struct radeon_cs_chunk *ib_chunk;
  650. volatile uint32_t *ib;
  651. unsigned i;
  652. unsigned idx;
  653. ib = p->ib->ptr;
  654. ib_chunk = &p->chunks[p->chunk_ib_idx];
  655. idx = pkt->idx;
  656. for (i = 0; i <= (pkt->count + 1); i++, idx++) {
  657. DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
  658. }
  659. }
  660. /**
  661. * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
  662. * @parser: parser structure holding parsing context.
  663. * @pkt: where to store packet informations
  664. *
  665. * Assume that chunk_ib_index is properly set. Will return -EINVAL
  666. * if packet is bigger than remaining ib size. or if packets is unknown.
  667. **/
  668. int r100_cs_packet_parse(struct radeon_cs_parser *p,
  669. struct radeon_cs_packet *pkt,
  670. unsigned idx)
  671. {
  672. struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
  673. uint32_t header = ib_chunk->kdata[idx];
  674. if (idx >= ib_chunk->length_dw) {
  675. DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
  676. idx, ib_chunk->length_dw);
  677. return -EINVAL;
  678. }
  679. pkt->idx = idx;
  680. pkt->type = CP_PACKET_GET_TYPE(header);
  681. pkt->count = CP_PACKET_GET_COUNT(header);
  682. switch (pkt->type) {
  683. case PACKET_TYPE0:
  684. pkt->reg = CP_PACKET0_GET_REG(header);
  685. pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
  686. break;
  687. case PACKET_TYPE3:
  688. pkt->opcode = CP_PACKET3_GET_OPCODE(header);
  689. break;
  690. case PACKET_TYPE2:
  691. pkt->count = -1;
  692. break;
  693. default:
  694. DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
  695. return -EINVAL;
  696. }
  697. if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
  698. DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
  699. pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
  700. return -EINVAL;
  701. }
  702. return 0;
  703. }
  704. /**
  705. * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
  706. * @parser: parser structure holding parsing context.
  707. * @data: pointer to relocation data
  708. * @offset_start: starting offset
  709. * @offset_mask: offset mask (to align start offset on)
  710. * @reloc: reloc informations
  711. *
  712. * Check next packet is relocation packet3, do bo validation and compute
  713. * GPU offset using the provided start.
  714. **/
  715. int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
  716. struct radeon_cs_reloc **cs_reloc)
  717. {
  718. struct radeon_cs_chunk *ib_chunk;
  719. struct radeon_cs_chunk *relocs_chunk;
  720. struct radeon_cs_packet p3reloc;
  721. unsigned idx;
  722. int r;
  723. if (p->chunk_relocs_idx == -1) {
  724. DRM_ERROR("No relocation chunk !\n");
  725. return -EINVAL;
  726. }
  727. *cs_reloc = NULL;
  728. ib_chunk = &p->chunks[p->chunk_ib_idx];
  729. relocs_chunk = &p->chunks[p->chunk_relocs_idx];
  730. r = r100_cs_packet_parse(p, &p3reloc, p->idx);
  731. if (r) {
  732. return r;
  733. }
  734. p->idx += p3reloc.count + 2;
  735. if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
  736. DRM_ERROR("No packet3 for relocation for packet at %d.\n",
  737. p3reloc.idx);
  738. r100_cs_dump_packet(p, &p3reloc);
  739. return -EINVAL;
  740. }
  741. idx = ib_chunk->kdata[p3reloc.idx + 1];
  742. if (idx >= relocs_chunk->length_dw) {
  743. DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
  744. idx, relocs_chunk->length_dw);
  745. r100_cs_dump_packet(p, &p3reloc);
  746. return -EINVAL;
  747. }
  748. /* FIXME: we assume reloc size is 4 dwords */
  749. *cs_reloc = p->relocs_ptr[(idx / 4)];
  750. return 0;
  751. }
  752. static int r100_packet0_check(struct radeon_cs_parser *p,
  753. struct radeon_cs_packet *pkt)
  754. {
  755. struct radeon_cs_chunk *ib_chunk;
  756. struct radeon_cs_reloc *reloc;
  757. volatile uint32_t *ib;
  758. uint32_t tmp;
  759. unsigned reg;
  760. unsigned i;
  761. unsigned idx;
  762. bool onereg;
  763. int r;
  764. ib = p->ib->ptr;
  765. ib_chunk = &p->chunks[p->chunk_ib_idx];
  766. idx = pkt->idx + 1;
  767. reg = pkt->reg;
  768. onereg = false;
  769. if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
  770. onereg = true;
  771. }
  772. for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
  773. switch (reg) {
  774. /* FIXME: only allow PACKET3 blit? easier to check for out of
  775. * range access */
  776. case RADEON_DST_PITCH_OFFSET:
  777. case RADEON_SRC_PITCH_OFFSET:
  778. r = r100_cs_packet_next_reloc(p, &reloc);
  779. if (r) {
  780. DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
  781. idx, reg);
  782. r100_cs_dump_packet(p, pkt);
  783. return r;
  784. }
  785. tmp = ib_chunk->kdata[idx] & 0x003fffff;
  786. tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
  787. ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
  788. break;
  789. case RADEON_RB3D_DEPTHOFFSET:
  790. case RADEON_RB3D_COLOROFFSET:
  791. case R300_RB3D_COLOROFFSET0:
  792. case R300_ZB_DEPTHOFFSET:
  793. case R200_PP_TXOFFSET_0:
  794. case R200_PP_TXOFFSET_1:
  795. case R200_PP_TXOFFSET_2:
  796. case R200_PP_TXOFFSET_3:
  797. case R200_PP_TXOFFSET_4:
  798. case R200_PP_TXOFFSET_5:
  799. case RADEON_PP_TXOFFSET_0:
  800. case RADEON_PP_TXOFFSET_1:
  801. case RADEON_PP_TXOFFSET_2:
  802. case R300_TX_OFFSET_0:
  803. case R300_TX_OFFSET_0+4:
  804. case R300_TX_OFFSET_0+8:
  805. case R300_TX_OFFSET_0+12:
  806. case R300_TX_OFFSET_0+16:
  807. case R300_TX_OFFSET_0+20:
  808. case R300_TX_OFFSET_0+24:
  809. case R300_TX_OFFSET_0+28:
  810. case R300_TX_OFFSET_0+32:
  811. case R300_TX_OFFSET_0+36:
  812. case R300_TX_OFFSET_0+40:
  813. case R300_TX_OFFSET_0+44:
  814. case R300_TX_OFFSET_0+48:
  815. case R300_TX_OFFSET_0+52:
  816. case R300_TX_OFFSET_0+56:
  817. case R300_TX_OFFSET_0+60:
  818. r = r100_cs_packet_next_reloc(p, &reloc);
  819. if (r) {
  820. DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
  821. idx, reg);
  822. r100_cs_dump_packet(p, pkt);
  823. return r;
  824. }
  825. ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
  826. break;
  827. default:
  828. /* FIXME: we don't want to allow anyothers packet */
  829. break;
  830. }
  831. if (onereg) {
  832. /* FIXME: forbid onereg write to register on relocate */
  833. break;
  834. }
  835. }
  836. return 0;
  837. }
  838. static int r100_packet3_check(struct radeon_cs_parser *p,
  839. struct radeon_cs_packet *pkt)
  840. {
  841. struct radeon_cs_chunk *ib_chunk;
  842. struct radeon_cs_reloc *reloc;
  843. unsigned idx;
  844. unsigned i, c;
  845. volatile uint32_t *ib;
  846. int r;
  847. ib = p->ib->ptr;
  848. ib_chunk = &p->chunks[p->chunk_ib_idx];
  849. idx = pkt->idx + 1;
  850. switch (pkt->opcode) {
  851. case PACKET3_3D_LOAD_VBPNTR:
  852. c = ib_chunk->kdata[idx++];
  853. for (i = 0; i < (c - 1); i += 2, idx += 3) {
  854. r = r100_cs_packet_next_reloc(p, &reloc);
  855. if (r) {
  856. DRM_ERROR("No reloc for packet3 %d\n",
  857. pkt->opcode);
  858. r100_cs_dump_packet(p, pkt);
  859. return r;
  860. }
  861. ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
  862. r = r100_cs_packet_next_reloc(p, &reloc);
  863. if (r) {
  864. DRM_ERROR("No reloc for packet3 %d\n",
  865. pkt->opcode);
  866. r100_cs_dump_packet(p, pkt);
  867. return r;
  868. }
  869. ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
  870. }
  871. if (c & 1) {
  872. r = r100_cs_packet_next_reloc(p, &reloc);
  873. if (r) {
  874. DRM_ERROR("No reloc for packet3 %d\n",
  875. pkt->opcode);
  876. r100_cs_dump_packet(p, pkt);
  877. return r;
  878. }
  879. ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
  880. }
  881. break;
  882. case PACKET3_INDX_BUFFER:
  883. r = r100_cs_packet_next_reloc(p, &reloc);
  884. if (r) {
  885. DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
  886. r100_cs_dump_packet(p, pkt);
  887. return r;
  888. }
  889. ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
  890. break;
  891. case 0x23:
  892. /* FIXME: cleanup */
  893. /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
  894. r = r100_cs_packet_next_reloc(p, &reloc);
  895. if (r) {
  896. DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
  897. r100_cs_dump_packet(p, pkt);
  898. return r;
  899. }
  900. ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
  901. break;
  902. case PACKET3_3D_DRAW_IMMD:
  903. /* triggers drawing using in-packet vertex data */
  904. case PACKET3_3D_DRAW_IMMD_2:
  905. /* triggers drawing using in-packet vertex data */
  906. case PACKET3_3D_DRAW_VBUF_2:
  907. /* triggers drawing of vertex buffers setup elsewhere */
  908. case PACKET3_3D_DRAW_INDX_2:
  909. /* triggers drawing using indices to vertex buffer */
  910. case PACKET3_3D_DRAW_VBUF:
  911. /* triggers drawing of vertex buffers setup elsewhere */
  912. case PACKET3_3D_DRAW_INDX:
  913. /* triggers drawing using indices to vertex buffer */
  914. case PACKET3_NOP:
  915. break;
  916. default:
  917. DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
  918. return -EINVAL;
  919. }
  920. return 0;
  921. }
  922. int r100_cs_parse(struct radeon_cs_parser *p)
  923. {
  924. struct radeon_cs_packet pkt;
  925. int r;
  926. do {
  927. r = r100_cs_packet_parse(p, &pkt, p->idx);
  928. if (r) {
  929. return r;
  930. }
  931. p->idx += pkt.count + 2;
  932. switch (pkt.type) {
  933. case PACKET_TYPE0:
  934. r = r100_packet0_check(p, &pkt);
  935. break;
  936. case PACKET_TYPE2:
  937. break;
  938. case PACKET_TYPE3:
  939. r = r100_packet3_check(p, &pkt);
  940. break;
  941. default:
  942. DRM_ERROR("Unknown packet type %d !\n",
  943. pkt.type);
  944. return -EINVAL;
  945. }
  946. if (r) {
  947. return r;
  948. }
  949. } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
  950. return 0;
  951. }
  952. /*
  953. * Global GPU functions
  954. */
  955. void r100_errata(struct radeon_device *rdev)
  956. {
  957. rdev->pll_errata = 0;
  958. if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
  959. rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
  960. }
  961. if (rdev->family == CHIP_RV100 ||
  962. rdev->family == CHIP_RS100 ||
  963. rdev->family == CHIP_RS200) {
  964. rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
  965. }
  966. }
  967. /* Wait for vertical sync on primary CRTC */
  968. void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
  969. {
  970. uint32_t crtc_gen_cntl, tmp;
  971. int i;
  972. crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
  973. if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
  974. !(crtc_gen_cntl & RADEON_CRTC_EN)) {
  975. return;
  976. }
  977. /* Clear the CRTC_VBLANK_SAVE bit */
  978. WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
  979. for (i = 0; i < rdev->usec_timeout; i++) {
  980. tmp = RREG32(RADEON_CRTC_STATUS);
  981. if (tmp & RADEON_CRTC_VBLANK_SAVE) {
  982. return;
  983. }
  984. DRM_UDELAY(1);
  985. }
  986. }
  987. /* Wait for vertical sync on secondary CRTC */
  988. void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
  989. {
  990. uint32_t crtc2_gen_cntl, tmp;
  991. int i;
  992. crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
  993. if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
  994. !(crtc2_gen_cntl & RADEON_CRTC2_EN))
  995. return;
  996. /* Clear the CRTC_VBLANK_SAVE bit */
  997. WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
  998. for (i = 0; i < rdev->usec_timeout; i++) {
  999. tmp = RREG32(RADEON_CRTC2_STATUS);
  1000. if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
  1001. return;
  1002. }
  1003. DRM_UDELAY(1);
  1004. }
  1005. }
  1006. int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
  1007. {
  1008. unsigned i;
  1009. uint32_t tmp;
  1010. for (i = 0; i < rdev->usec_timeout; i++) {
  1011. tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
  1012. if (tmp >= n) {
  1013. return 0;
  1014. }
  1015. DRM_UDELAY(1);
  1016. }
  1017. return -1;
  1018. }
  1019. int r100_gui_wait_for_idle(struct radeon_device *rdev)
  1020. {
  1021. unsigned i;
  1022. uint32_t tmp;
  1023. if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
  1024. printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
  1025. " Bad things might happen.\n");
  1026. }
  1027. for (i = 0; i < rdev->usec_timeout; i++) {
  1028. tmp = RREG32(RADEON_RBBM_STATUS);
  1029. if (!(tmp & (1 << 31))) {
  1030. return 0;
  1031. }
  1032. DRM_UDELAY(1);
  1033. }
  1034. return -1;
  1035. }
  1036. int r100_mc_wait_for_idle(struct radeon_device *rdev)
  1037. {
  1038. unsigned i;
  1039. uint32_t tmp;
  1040. for (i = 0; i < rdev->usec_timeout; i++) {
  1041. /* read MC_STATUS */
  1042. tmp = RREG32(0x0150);
  1043. if (tmp & (1 << 2)) {
  1044. return 0;
  1045. }
  1046. DRM_UDELAY(1);
  1047. }
  1048. return -1;
  1049. }
  1050. void r100_gpu_init(struct radeon_device *rdev)
  1051. {
  1052. /* TODO: anythings to do here ? pipes ? */
  1053. r100_hdp_reset(rdev);
  1054. }
  1055. void r100_hdp_reset(struct radeon_device *rdev)
  1056. {
  1057. uint32_t tmp;
  1058. tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
  1059. tmp |= (7 << 28);
  1060. WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
  1061. (void)RREG32(RADEON_HOST_PATH_CNTL);
  1062. udelay(200);
  1063. WREG32(RADEON_RBBM_SOFT_RESET, 0);
  1064. WREG32(RADEON_HOST_PATH_CNTL, tmp);
  1065. (void)RREG32(RADEON_HOST_PATH_CNTL);
  1066. }
  1067. int r100_rb2d_reset(struct radeon_device *rdev)
  1068. {
  1069. uint32_t tmp;
  1070. int i;
  1071. WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
  1072. (void)RREG32(RADEON_RBBM_SOFT_RESET);
  1073. udelay(200);
  1074. WREG32(RADEON_RBBM_SOFT_RESET, 0);
  1075. /* Wait to prevent race in RBBM_STATUS */
  1076. mdelay(1);
  1077. for (i = 0; i < rdev->usec_timeout; i++) {
  1078. tmp = RREG32(RADEON_RBBM_STATUS);
  1079. if (!(tmp & (1 << 26))) {
  1080. DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
  1081. tmp);
  1082. return 0;
  1083. }
  1084. DRM_UDELAY(1);
  1085. }
  1086. tmp = RREG32(RADEON_RBBM_STATUS);
  1087. DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
  1088. return -1;
  1089. }
  1090. int r100_gpu_reset(struct radeon_device *rdev)
  1091. {
  1092. uint32_t status;
  1093. /* reset order likely matter */
  1094. status = RREG32(RADEON_RBBM_STATUS);
  1095. /* reset HDP */
  1096. r100_hdp_reset(rdev);
  1097. /* reset rb2d */
  1098. if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
  1099. r100_rb2d_reset(rdev);
  1100. }
  1101. /* TODO: reset 3D engine */
  1102. /* reset CP */
  1103. status = RREG32(RADEON_RBBM_STATUS);
  1104. if (status & (1 << 16)) {
  1105. r100_cp_reset(rdev);
  1106. }
  1107. /* Check if GPU is idle */
  1108. status = RREG32(RADEON_RBBM_STATUS);
  1109. if (status & (1 << 31)) {
  1110. DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
  1111. return -1;
  1112. }
  1113. DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
  1114. return 0;
  1115. }
  1116. /*
  1117. * VRAM info
  1118. */
  1119. static void r100_vram_get_type(struct radeon_device *rdev)
  1120. {
  1121. uint32_t tmp;
  1122. rdev->mc.vram_is_ddr = false;
  1123. if (rdev->flags & RADEON_IS_IGP)
  1124. rdev->mc.vram_is_ddr = true;
  1125. else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
  1126. rdev->mc.vram_is_ddr = true;
  1127. if ((rdev->family == CHIP_RV100) ||
  1128. (rdev->family == CHIP_RS100) ||
  1129. (rdev->family == CHIP_RS200)) {
  1130. tmp = RREG32(RADEON_MEM_CNTL);
  1131. if (tmp & RV100_HALF_MODE) {
  1132. rdev->mc.vram_width = 32;
  1133. } else {
  1134. rdev->mc.vram_width = 64;
  1135. }
  1136. if (rdev->flags & RADEON_SINGLE_CRTC) {
  1137. rdev->mc.vram_width /= 4;
  1138. rdev->mc.vram_is_ddr = true;
  1139. }
  1140. } else if (rdev->family <= CHIP_RV280) {
  1141. tmp = RREG32(RADEON_MEM_CNTL);
  1142. if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
  1143. rdev->mc.vram_width = 128;
  1144. } else {
  1145. rdev->mc.vram_width = 64;
  1146. }
  1147. } else {
  1148. /* newer IGPs */
  1149. rdev->mc.vram_width = 128;
  1150. }
  1151. }
  1152. void r100_vram_info(struct radeon_device *rdev)
  1153. {
  1154. r100_vram_get_type(rdev);
  1155. if (rdev->flags & RADEON_IS_IGP) {
  1156. uint32_t tom;
  1157. /* read NB_TOM to get the amount of ram stolen for the GPU */
  1158. tom = RREG32(RADEON_NB_TOM);
  1159. rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
  1160. WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
  1161. } else {
  1162. rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
  1163. /* Some production boards of m6 will report 0
  1164. * if it's 8 MB
  1165. */
  1166. if (rdev->mc.vram_size == 0) {
  1167. rdev->mc.vram_size = 8192 * 1024;
  1168. WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
  1169. }
  1170. }
  1171. rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
  1172. rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
  1173. if (rdev->mc.aper_size > rdev->mc.vram_size) {
  1174. /* Why does some hw doesn't have CONFIG_MEMSIZE properly
  1175. * setup ? */
  1176. rdev->mc.vram_size = rdev->mc.aper_size;
  1177. WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
  1178. }
  1179. }
  1180. /*
  1181. * Indirect registers accessor
  1182. */
  1183. void r100_pll_errata_after_index(struct radeon_device *rdev)
  1184. {
  1185. if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
  1186. return;
  1187. }
  1188. (void)RREG32(RADEON_CLOCK_CNTL_DATA);
  1189. (void)RREG32(RADEON_CRTC_GEN_CNTL);
  1190. }
  1191. static void r100_pll_errata_after_data(struct radeon_device *rdev)
  1192. {
  1193. /* This workarounds is necessary on RV100, RS100 and RS200 chips
  1194. * or the chip could hang on a subsequent access
  1195. */
  1196. if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
  1197. udelay(5000);
  1198. }
  1199. /* This function is required to workaround a hardware bug in some (all?)
  1200. * revisions of the R300. This workaround should be called after every
  1201. * CLOCK_CNTL_INDEX register access. If not, register reads afterward
  1202. * may not be correct.
  1203. */
  1204. if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
  1205. uint32_t save, tmp;
  1206. save = RREG32(RADEON_CLOCK_CNTL_INDEX);
  1207. tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
  1208. WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
  1209. tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
  1210. WREG32(RADEON_CLOCK_CNTL_INDEX, save);
  1211. }
  1212. }
  1213. uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
  1214. {
  1215. uint32_t data;
  1216. WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
  1217. r100_pll_errata_after_index(rdev);
  1218. data = RREG32(RADEON_CLOCK_CNTL_DATA);
  1219. r100_pll_errata_after_data(rdev);
  1220. return data;
  1221. }
  1222. void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  1223. {
  1224. WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
  1225. r100_pll_errata_after_index(rdev);
  1226. WREG32(RADEON_CLOCK_CNTL_DATA, v);
  1227. r100_pll_errata_after_data(rdev);
  1228. }
  1229. uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
  1230. {
  1231. if (reg < 0x10000)
  1232. return readl(((void __iomem *)rdev->rmmio) + reg);
  1233. else {
  1234. writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
  1235. return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
  1236. }
  1237. }
  1238. void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  1239. {
  1240. if (reg < 0x10000)
  1241. writel(v, ((void __iomem *)rdev->rmmio) + reg);
  1242. else {
  1243. writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
  1244. writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
  1245. }
  1246. }
  1247. /*
  1248. * Debugfs info
  1249. */
  1250. #if defined(CONFIG_DEBUG_FS)
  1251. static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
  1252. {
  1253. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1254. struct drm_device *dev = node->minor->dev;
  1255. struct radeon_device *rdev = dev->dev_private;
  1256. uint32_t reg, value;
  1257. unsigned i;
  1258. seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
  1259. seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
  1260. seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
  1261. for (i = 0; i < 64; i++) {
  1262. WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
  1263. reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
  1264. WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
  1265. value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
  1266. seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
  1267. }
  1268. return 0;
  1269. }
  1270. static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
  1271. {
  1272. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1273. struct drm_device *dev = node->minor->dev;
  1274. struct radeon_device *rdev = dev->dev_private;
  1275. uint32_t rdp, wdp;
  1276. unsigned count, i, j;
  1277. radeon_ring_free_size(rdev);
  1278. rdp = RREG32(RADEON_CP_RB_RPTR);
  1279. wdp = RREG32(RADEON_CP_RB_WPTR);
  1280. count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
  1281. seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
  1282. seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
  1283. seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
  1284. seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
  1285. seq_printf(m, "%u dwords in ring\n", count);
  1286. for (j = 0; j <= count; j++) {
  1287. i = (rdp + j) & rdev->cp.ptr_mask;
  1288. seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
  1289. }
  1290. return 0;
  1291. }
  1292. static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
  1293. {
  1294. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1295. struct drm_device *dev = node->minor->dev;
  1296. struct radeon_device *rdev = dev->dev_private;
  1297. uint32_t csq_stat, csq2_stat, tmp;
  1298. unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
  1299. unsigned i;
  1300. seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
  1301. seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
  1302. csq_stat = RREG32(RADEON_CP_CSQ_STAT);
  1303. csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
  1304. r_rptr = (csq_stat >> 0) & 0x3ff;
  1305. r_wptr = (csq_stat >> 10) & 0x3ff;
  1306. ib1_rptr = (csq_stat >> 20) & 0x3ff;
  1307. ib1_wptr = (csq2_stat >> 0) & 0x3ff;
  1308. ib2_rptr = (csq2_stat >> 10) & 0x3ff;
  1309. ib2_wptr = (csq2_stat >> 20) & 0x3ff;
  1310. seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
  1311. seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
  1312. seq_printf(m, "Ring rptr %u\n", r_rptr);
  1313. seq_printf(m, "Ring wptr %u\n", r_wptr);
  1314. seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
  1315. seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
  1316. seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
  1317. seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
  1318. /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
  1319. * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
  1320. seq_printf(m, "Ring fifo:\n");
  1321. for (i = 0; i < 256; i++) {
  1322. WREG32(RADEON_CP_CSQ_ADDR, i << 2);
  1323. tmp = RREG32(RADEON_CP_CSQ_DATA);
  1324. seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
  1325. }
  1326. seq_printf(m, "Indirect1 fifo:\n");
  1327. for (i = 256; i <= 512; i++) {
  1328. WREG32(RADEON_CP_CSQ_ADDR, i << 2);
  1329. tmp = RREG32(RADEON_CP_CSQ_DATA);
  1330. seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
  1331. }
  1332. seq_printf(m, "Indirect2 fifo:\n");
  1333. for (i = 640; i < ib1_wptr; i++) {
  1334. WREG32(RADEON_CP_CSQ_ADDR, i << 2);
  1335. tmp = RREG32(RADEON_CP_CSQ_DATA);
  1336. seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
  1337. }
  1338. return 0;
  1339. }
  1340. static int r100_debugfs_mc_info(struct seq_file *m, void *data)
  1341. {
  1342. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1343. struct drm_device *dev = node->minor->dev;
  1344. struct radeon_device *rdev = dev->dev_private;
  1345. uint32_t tmp;
  1346. tmp = RREG32(RADEON_CONFIG_MEMSIZE);
  1347. seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
  1348. tmp = RREG32(RADEON_MC_FB_LOCATION);
  1349. seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
  1350. tmp = RREG32(RADEON_BUS_CNTL);
  1351. seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
  1352. tmp = RREG32(RADEON_MC_AGP_LOCATION);
  1353. seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
  1354. tmp = RREG32(RADEON_AGP_BASE);
  1355. seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
  1356. tmp = RREG32(RADEON_HOST_PATH_CNTL);
  1357. seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
  1358. tmp = RREG32(0x01D0);
  1359. seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
  1360. tmp = RREG32(RADEON_AIC_LO_ADDR);
  1361. seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
  1362. tmp = RREG32(RADEON_AIC_HI_ADDR);
  1363. seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
  1364. tmp = RREG32(0x01E4);
  1365. seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
  1366. return 0;
  1367. }
  1368. static struct drm_info_list r100_debugfs_rbbm_list[] = {
  1369. {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
  1370. };
  1371. static struct drm_info_list r100_debugfs_cp_list[] = {
  1372. {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
  1373. {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
  1374. };
  1375. static struct drm_info_list r100_debugfs_mc_info_list[] = {
  1376. {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
  1377. };
  1378. #endif
  1379. int r100_debugfs_rbbm_init(struct radeon_device *rdev)
  1380. {
  1381. #if defined(CONFIG_DEBUG_FS)
  1382. return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
  1383. #else
  1384. return 0;
  1385. #endif
  1386. }
  1387. int r100_debugfs_cp_init(struct radeon_device *rdev)
  1388. {
  1389. #if defined(CONFIG_DEBUG_FS)
  1390. return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
  1391. #else
  1392. return 0;
  1393. #endif
  1394. }
  1395. int r100_debugfs_mc_info_init(struct radeon_device *rdev)
  1396. {
  1397. #if defined(CONFIG_DEBUG_FS)
  1398. return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
  1399. #else
  1400. return 0;
  1401. #endif
  1402. }