i830_dma.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588
  1. /* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
  2. * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
  3. *
  4. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  5. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the next
  16. * paragraph) shall be included in all copies or substantial portions of the
  17. * Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25. * DEALINGS IN THE SOFTWARE.
  26. *
  27. * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
  28. * Jeff Hartmann <jhartmann@valinux.com>
  29. * Keith Whitwell <keith@tungstengraphics.com>
  30. * Abraham vd Merwe <abraham@2d3d.co.za>
  31. *
  32. */
  33. #include "drmP.h"
  34. #include "drm.h"
  35. #include "i830_drm.h"
  36. #include "i830_drv.h"
  37. #include <linux/interrupt.h> /* For task queue support */
  38. #include <linux/pagemap.h> /* For FASTCALL on unlock_page() */
  39. #include <linux/delay.h>
  40. #include <asm/uaccess.h>
  41. #define I830_BUF_FREE 2
  42. #define I830_BUF_CLIENT 1
  43. #define I830_BUF_HARDWARE 0
  44. #define I830_BUF_UNMAPPED 0
  45. #define I830_BUF_MAPPED 1
  46. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
  47. #define down_write down
  48. #define up_write up
  49. #endif
  50. static drm_buf_t *i830_freelist_get(drm_device_t *dev)
  51. {
  52. drm_device_dma_t *dma = dev->dma;
  53. int i;
  54. int used;
  55. /* Linear search might not be the best solution */
  56. for (i = 0; i < dma->buf_count; i++) {
  57. drm_buf_t *buf = dma->buflist[ i ];
  58. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  59. /* In use is already a pointer */
  60. used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
  61. I830_BUF_CLIENT);
  62. if(used == I830_BUF_FREE) {
  63. return buf;
  64. }
  65. }
  66. return NULL;
  67. }
  68. /* This should only be called if the buffer is not sent to the hardware
  69. * yet, the hardware updates in use for us once its on the ring buffer.
  70. */
  71. static int i830_freelist_put(drm_device_t *dev, drm_buf_t *buf)
  72. {
  73. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  74. int used;
  75. /* In use is already a pointer */
  76. used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
  77. if(used != I830_BUF_CLIENT) {
  78. DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
  79. return -EINVAL;
  80. }
  81. return 0;
  82. }
  83. static struct file_operations i830_buffer_fops = {
  84. .open = drm_open,
  85. .flush = drm_flush,
  86. .release = drm_release,
  87. .ioctl = drm_ioctl,
  88. .mmap = i830_mmap_buffers,
  89. .fasync = drm_fasync,
  90. };
  91. int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
  92. {
  93. drm_file_t *priv = filp->private_data;
  94. drm_device_t *dev;
  95. drm_i830_private_t *dev_priv;
  96. drm_buf_t *buf;
  97. drm_i830_buf_priv_t *buf_priv;
  98. lock_kernel();
  99. dev = priv->head->dev;
  100. dev_priv = dev->dev_private;
  101. buf = dev_priv->mmap_buffer;
  102. buf_priv = buf->dev_private;
  103. vma->vm_flags |= (VM_IO | VM_DONTCOPY);
  104. vma->vm_file = filp;
  105. buf_priv->currently_mapped = I830_BUF_MAPPED;
  106. unlock_kernel();
  107. if (io_remap_pfn_range(vma, vma->vm_start,
  108. VM_OFFSET(vma) >> PAGE_SHIFT,
  109. vma->vm_end - vma->vm_start,
  110. vma->vm_page_prot)) return -EAGAIN;
  111. return 0;
  112. }
  113. static int i830_map_buffer(drm_buf_t *buf, struct file *filp)
  114. {
  115. drm_file_t *priv = filp->private_data;
  116. drm_device_t *dev = priv->head->dev;
  117. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  118. drm_i830_private_t *dev_priv = dev->dev_private;
  119. struct file_operations *old_fops;
  120. unsigned long virtual;
  121. int retcode = 0;
  122. if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
  123. down_write( &current->mm->mmap_sem );
  124. old_fops = filp->f_op;
  125. filp->f_op = &i830_buffer_fops;
  126. dev_priv->mmap_buffer = buf;
  127. virtual = do_mmap(filp, 0, buf->total, PROT_READ|PROT_WRITE,
  128. MAP_SHARED, buf->bus_address);
  129. dev_priv->mmap_buffer = NULL;
  130. filp->f_op = old_fops;
  131. if (IS_ERR((void *)virtual)) { /* ugh */
  132. /* Real error */
  133. DRM_ERROR("mmap error\n");
  134. retcode = virtual;
  135. buf_priv->virtual = NULL;
  136. } else {
  137. buf_priv->virtual = (void __user *)virtual;
  138. }
  139. up_write( &current->mm->mmap_sem );
  140. return retcode;
  141. }
  142. static int i830_unmap_buffer(drm_buf_t *buf)
  143. {
  144. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  145. int retcode = 0;
  146. if(buf_priv->currently_mapped != I830_BUF_MAPPED)
  147. return -EINVAL;
  148. down_write(&current->mm->mmap_sem);
  149. retcode = do_munmap(current->mm,
  150. (unsigned long)buf_priv->virtual,
  151. (size_t) buf->total);
  152. up_write(&current->mm->mmap_sem);
  153. buf_priv->currently_mapped = I830_BUF_UNMAPPED;
  154. buf_priv->virtual = NULL;
  155. return retcode;
  156. }
  157. static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
  158. struct file *filp)
  159. {
  160. drm_buf_t *buf;
  161. drm_i830_buf_priv_t *buf_priv;
  162. int retcode = 0;
  163. buf = i830_freelist_get(dev);
  164. if (!buf) {
  165. retcode = -ENOMEM;
  166. DRM_DEBUG("retcode=%d\n", retcode);
  167. return retcode;
  168. }
  169. retcode = i830_map_buffer(buf, filp);
  170. if(retcode) {
  171. i830_freelist_put(dev, buf);
  172. DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
  173. return retcode;
  174. }
  175. buf->filp = filp;
  176. buf_priv = buf->dev_private;
  177. d->granted = 1;
  178. d->request_idx = buf->idx;
  179. d->request_size = buf->total;
  180. d->virtual = buf_priv->virtual;
  181. return retcode;
  182. }
  183. static int i830_dma_cleanup(drm_device_t *dev)
  184. {
  185. drm_device_dma_t *dma = dev->dma;
  186. /* Make sure interrupts are disabled here because the uninstall ioctl
  187. * may not have been called from userspace and after dev_private
  188. * is freed, it's too late.
  189. */
  190. if ( dev->irq_enabled ) drm_irq_uninstall(dev);
  191. if (dev->dev_private) {
  192. int i;
  193. drm_i830_private_t *dev_priv =
  194. (drm_i830_private_t *) dev->dev_private;
  195. if (dev_priv->ring.virtual_start) {
  196. drm_ioremapfree((void *) dev_priv->ring.virtual_start,
  197. dev_priv->ring.Size, dev);
  198. }
  199. if (dev_priv->hw_status_page) {
  200. pci_free_consistent(dev->pdev, PAGE_SIZE,
  201. dev_priv->hw_status_page,
  202. dev_priv->dma_status_page);
  203. /* Need to rewrite hardware status page */
  204. I830_WRITE(0x02080, 0x1ffff000);
  205. }
  206. drm_free(dev->dev_private, sizeof(drm_i830_private_t),
  207. DRM_MEM_DRIVER);
  208. dev->dev_private = NULL;
  209. for (i = 0; i < dma->buf_count; i++) {
  210. drm_buf_t *buf = dma->buflist[ i ];
  211. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  212. if ( buf_priv->kernel_virtual && buf->total )
  213. drm_ioremapfree(buf_priv->kernel_virtual, buf->total, dev);
  214. }
  215. }
  216. return 0;
  217. }
  218. int i830_wait_ring(drm_device_t *dev, int n, const char *caller)
  219. {
  220. drm_i830_private_t *dev_priv = dev->dev_private;
  221. drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
  222. int iters = 0;
  223. unsigned long end;
  224. unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
  225. end = jiffies + (HZ*3);
  226. while (ring->space < n) {
  227. ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
  228. ring->space = ring->head - (ring->tail+8);
  229. if (ring->space < 0) ring->space += ring->Size;
  230. if (ring->head != last_head) {
  231. end = jiffies + (HZ*3);
  232. last_head = ring->head;
  233. }
  234. iters++;
  235. if(time_before(end, jiffies)) {
  236. DRM_ERROR("space: %d wanted %d\n", ring->space, n);
  237. DRM_ERROR("lockup\n");
  238. goto out_wait_ring;
  239. }
  240. udelay(1);
  241. dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
  242. }
  243. out_wait_ring:
  244. return iters;
  245. }
  246. static void i830_kernel_lost_context(drm_device_t *dev)
  247. {
  248. drm_i830_private_t *dev_priv = dev->dev_private;
  249. drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
  250. ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
  251. ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
  252. ring->space = ring->head - (ring->tail+8);
  253. if (ring->space < 0) ring->space += ring->Size;
  254. if (ring->head == ring->tail)
  255. dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
  256. }
  257. static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
  258. {
  259. drm_device_dma_t *dma = dev->dma;
  260. int my_idx = 36;
  261. u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
  262. int i;
  263. if(dma->buf_count > 1019) {
  264. /* Not enough space in the status page for the freelist */
  265. return -EINVAL;
  266. }
  267. for (i = 0; i < dma->buf_count; i++) {
  268. drm_buf_t *buf = dma->buflist[ i ];
  269. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  270. buf_priv->in_use = hw_status++;
  271. buf_priv->my_use_idx = my_idx;
  272. my_idx += 4;
  273. *buf_priv->in_use = I830_BUF_FREE;
  274. buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
  275. buf->total, dev);
  276. }
  277. return 0;
  278. }
  279. static int i830_dma_initialize(drm_device_t *dev,
  280. drm_i830_private_t *dev_priv,
  281. drm_i830_init_t *init)
  282. {
  283. struct list_head *list;
  284. memset(dev_priv, 0, sizeof(drm_i830_private_t));
  285. list_for_each(list, &dev->maplist->head) {
  286. drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
  287. if( r_list->map &&
  288. r_list->map->type == _DRM_SHM &&
  289. r_list->map->flags & _DRM_CONTAINS_LOCK ) {
  290. dev_priv->sarea_map = r_list->map;
  291. break;
  292. }
  293. }
  294. if(!dev_priv->sarea_map) {
  295. dev->dev_private = (void *)dev_priv;
  296. i830_dma_cleanup(dev);
  297. DRM_ERROR("can not find sarea!\n");
  298. return -EINVAL;
  299. }
  300. dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
  301. if(!dev_priv->mmio_map) {
  302. dev->dev_private = (void *)dev_priv;
  303. i830_dma_cleanup(dev);
  304. DRM_ERROR("can not find mmio map!\n");
  305. return -EINVAL;
  306. }
  307. dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
  308. if(!dev->agp_buffer_map) {
  309. dev->dev_private = (void *)dev_priv;
  310. i830_dma_cleanup(dev);
  311. DRM_ERROR("can not find dma buffer map!\n");
  312. return -EINVAL;
  313. }
  314. dev_priv->sarea_priv = (drm_i830_sarea_t *)
  315. ((u8 *)dev_priv->sarea_map->handle +
  316. init->sarea_priv_offset);
  317. dev_priv->ring.Start = init->ring_start;
  318. dev_priv->ring.End = init->ring_end;
  319. dev_priv->ring.Size = init->ring_size;
  320. dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
  321. init->ring_start,
  322. init->ring_size, dev);
  323. if (dev_priv->ring.virtual_start == NULL) {
  324. dev->dev_private = (void *) dev_priv;
  325. i830_dma_cleanup(dev);
  326. DRM_ERROR("can not ioremap virtual address for"
  327. " ring buffer\n");
  328. return -ENOMEM;
  329. }
  330. dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
  331. dev_priv->w = init->w;
  332. dev_priv->h = init->h;
  333. dev_priv->pitch = init->pitch;
  334. dev_priv->back_offset = init->back_offset;
  335. dev_priv->depth_offset = init->depth_offset;
  336. dev_priv->front_offset = init->front_offset;
  337. dev_priv->front_di1 = init->front_offset | init->pitch_bits;
  338. dev_priv->back_di1 = init->back_offset | init->pitch_bits;
  339. dev_priv->zi1 = init->depth_offset | init->pitch_bits;
  340. DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
  341. DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
  342. DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
  343. DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
  344. dev_priv->cpp = init->cpp;
  345. /* We are using separate values as placeholders for mechanisms for
  346. * private backbuffer/depthbuffer usage.
  347. */
  348. dev_priv->back_pitch = init->back_pitch;
  349. dev_priv->depth_pitch = init->depth_pitch;
  350. dev_priv->do_boxes = 0;
  351. dev_priv->use_mi_batchbuffer_start = 0;
  352. /* Program Hardware Status Page */
  353. dev_priv->hw_status_page =
  354. pci_alloc_consistent(dev->pdev, PAGE_SIZE,
  355. &dev_priv->dma_status_page);
  356. if (!dev_priv->hw_status_page) {
  357. dev->dev_private = (void *)dev_priv;
  358. i830_dma_cleanup(dev);
  359. DRM_ERROR("Can not allocate hardware status page\n");
  360. return -ENOMEM;
  361. }
  362. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  363. DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
  364. I830_WRITE(0x02080, dev_priv->dma_status_page);
  365. DRM_DEBUG("Enabled hardware status page\n");
  366. /* Now we need to init our freelist */
  367. if(i830_freelist_init(dev, dev_priv) != 0) {
  368. dev->dev_private = (void *)dev_priv;
  369. i830_dma_cleanup(dev);
  370. DRM_ERROR("Not enough space in the status page for"
  371. " the freelist\n");
  372. return -ENOMEM;
  373. }
  374. dev->dev_private = (void *)dev_priv;
  375. return 0;
  376. }
  377. static int i830_dma_init(struct inode *inode, struct file *filp,
  378. unsigned int cmd, unsigned long arg)
  379. {
  380. drm_file_t *priv = filp->private_data;
  381. drm_device_t *dev = priv->head->dev;
  382. drm_i830_private_t *dev_priv;
  383. drm_i830_init_t init;
  384. int retcode = 0;
  385. if (copy_from_user(&init, (void * __user) arg, sizeof(init)))
  386. return -EFAULT;
  387. switch(init.func) {
  388. case I830_INIT_DMA:
  389. dev_priv = drm_alloc(sizeof(drm_i830_private_t),
  390. DRM_MEM_DRIVER);
  391. if(dev_priv == NULL) return -ENOMEM;
  392. retcode = i830_dma_initialize(dev, dev_priv, &init);
  393. break;
  394. case I830_CLEANUP_DMA:
  395. retcode = i830_dma_cleanup(dev);
  396. break;
  397. default:
  398. retcode = -EINVAL;
  399. break;
  400. }
  401. return retcode;
  402. }
  403. #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
  404. #define ST1_ENABLE (1<<16)
  405. #define ST1_MASK (0xffff)
  406. /* Most efficient way to verify state for the i830 is as it is
  407. * emitted. Non-conformant state is silently dropped.
  408. */
  409. static void i830EmitContextVerified( drm_device_t *dev,
  410. unsigned int *code )
  411. {
  412. drm_i830_private_t *dev_priv = dev->dev_private;
  413. int i, j = 0;
  414. unsigned int tmp;
  415. RING_LOCALS;
  416. BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 4 );
  417. for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) {
  418. tmp = code[i];
  419. if ((tmp & (7<<29)) == CMD_3D &&
  420. (tmp & (0x1f<<24)) < (0x1d<<24)) {
  421. OUT_RING( tmp );
  422. j++;
  423. } else {
  424. DRM_ERROR("Skipping %d\n", i);
  425. }
  426. }
  427. OUT_RING( STATE3D_CONST_BLEND_COLOR_CMD );
  428. OUT_RING( code[I830_CTXREG_BLENDCOLR] );
  429. j += 2;
  430. for ( i = I830_CTXREG_VF ; i < I830_CTXREG_MCSB0 ; i++ ) {
  431. tmp = code[i];
  432. if ((tmp & (7<<29)) == CMD_3D &&
  433. (tmp & (0x1f<<24)) < (0x1d<<24)) {
  434. OUT_RING( tmp );
  435. j++;
  436. } else {
  437. DRM_ERROR("Skipping %d\n", i);
  438. }
  439. }
  440. OUT_RING( STATE3D_MAP_COORD_SETBIND_CMD );
  441. OUT_RING( code[I830_CTXREG_MCSB1] );
  442. j += 2;
  443. if (j & 1)
  444. OUT_RING( 0 );
  445. ADVANCE_LP_RING();
  446. }
  447. static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code )
  448. {
  449. drm_i830_private_t *dev_priv = dev->dev_private;
  450. int i, j = 0;
  451. unsigned int tmp;
  452. RING_LOCALS;
  453. if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
  454. (code[I830_TEXREG_MI0] & ~(0xf*LOAD_TEXTURE_MAP0)) ==
  455. (STATE3D_LOAD_STATE_IMMEDIATE_2|4)) {
  456. BEGIN_LP_RING( I830_TEX_SETUP_SIZE );
  457. OUT_RING( code[I830_TEXREG_MI0] ); /* TM0LI */
  458. OUT_RING( code[I830_TEXREG_MI1] ); /* TM0S0 */
  459. OUT_RING( code[I830_TEXREG_MI2] ); /* TM0S1 */
  460. OUT_RING( code[I830_TEXREG_MI3] ); /* TM0S2 */
  461. OUT_RING( code[I830_TEXREG_MI4] ); /* TM0S3 */
  462. OUT_RING( code[I830_TEXREG_MI5] ); /* TM0S4 */
  463. for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) {
  464. tmp = code[i];
  465. OUT_RING( tmp );
  466. j++;
  467. }
  468. if (j & 1)
  469. OUT_RING( 0 );
  470. ADVANCE_LP_RING();
  471. }
  472. else
  473. printk("rejected packet %x\n", code[0]);
  474. }
  475. static void i830EmitTexBlendVerified( drm_device_t *dev,
  476. unsigned int *code,
  477. unsigned int num)
  478. {
  479. drm_i830_private_t *dev_priv = dev->dev_private;
  480. int i, j = 0;
  481. unsigned int tmp;
  482. RING_LOCALS;
  483. if (!num)
  484. return;
  485. BEGIN_LP_RING( num + 1 );
  486. for ( i = 0 ; i < num ; i++ ) {
  487. tmp = code[i];
  488. OUT_RING( tmp );
  489. j++;
  490. }
  491. if (j & 1)
  492. OUT_RING( 0 );
  493. ADVANCE_LP_RING();
  494. }
  495. static void i830EmitTexPalette( drm_device_t *dev,
  496. unsigned int *palette,
  497. int number,
  498. int is_shared )
  499. {
  500. drm_i830_private_t *dev_priv = dev->dev_private;
  501. int i;
  502. RING_LOCALS;
  503. return;
  504. BEGIN_LP_RING( 258 );
  505. if(is_shared == 1) {
  506. OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
  507. MAP_PALETTE_NUM(0) |
  508. MAP_PALETTE_BOTH);
  509. } else {
  510. OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
  511. }
  512. for(i = 0; i < 256; i++) {
  513. OUT_RING(palette[i]);
  514. }
  515. OUT_RING(0);
  516. /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
  517. */
  518. }
  519. /* Need to do some additional checking when setting the dest buffer.
  520. */
  521. static void i830EmitDestVerified( drm_device_t *dev,
  522. unsigned int *code )
  523. {
  524. drm_i830_private_t *dev_priv = dev->dev_private;
  525. unsigned int tmp;
  526. RING_LOCALS;
  527. BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 10 );
  528. tmp = code[I830_DESTREG_CBUFADDR];
  529. if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
  530. if (((int)outring) & 8) {
  531. OUT_RING(0);
  532. OUT_RING(0);
  533. }
  534. OUT_RING( CMD_OP_DESTBUFFER_INFO );
  535. OUT_RING( BUF_3D_ID_COLOR_BACK |
  536. BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
  537. BUF_3D_USE_FENCE);
  538. OUT_RING( tmp );
  539. OUT_RING( 0 );
  540. OUT_RING( CMD_OP_DESTBUFFER_INFO );
  541. OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
  542. BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
  543. OUT_RING( dev_priv->zi1 );
  544. OUT_RING( 0 );
  545. } else {
  546. DRM_ERROR("bad di1 %x (allow %x or %x)\n",
  547. tmp, dev_priv->front_di1, dev_priv->back_di1);
  548. }
  549. /* invarient:
  550. */
  551. OUT_RING( GFX_OP_DESTBUFFER_VARS );
  552. OUT_RING( code[I830_DESTREG_DV1] );
  553. OUT_RING( GFX_OP_DRAWRECT_INFO );
  554. OUT_RING( code[I830_DESTREG_DR1] );
  555. OUT_RING( code[I830_DESTREG_DR2] );
  556. OUT_RING( code[I830_DESTREG_DR3] );
  557. OUT_RING( code[I830_DESTREG_DR4] );
  558. /* Need to verify this */
  559. tmp = code[I830_DESTREG_SENABLE];
  560. if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
  561. OUT_RING( tmp );
  562. } else {
  563. DRM_ERROR("bad scissor enable\n");
  564. OUT_RING( 0 );
  565. }
  566. OUT_RING( GFX_OP_SCISSOR_RECT );
  567. OUT_RING( code[I830_DESTREG_SR1] );
  568. OUT_RING( code[I830_DESTREG_SR2] );
  569. OUT_RING( 0 );
  570. ADVANCE_LP_RING();
  571. }
  572. static void i830EmitStippleVerified( drm_device_t *dev,
  573. unsigned int *code )
  574. {
  575. drm_i830_private_t *dev_priv = dev->dev_private;
  576. RING_LOCALS;
  577. BEGIN_LP_RING( 2 );
  578. OUT_RING( GFX_OP_STIPPLE );
  579. OUT_RING( code[1] );
  580. ADVANCE_LP_RING();
  581. }
  582. static void i830EmitState( drm_device_t *dev )
  583. {
  584. drm_i830_private_t *dev_priv = dev->dev_private;
  585. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  586. unsigned int dirty = sarea_priv->dirty;
  587. DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
  588. if (dirty & I830_UPLOAD_BUFFERS) {
  589. i830EmitDestVerified( dev, sarea_priv->BufferState );
  590. sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
  591. }
  592. if (dirty & I830_UPLOAD_CTX) {
  593. i830EmitContextVerified( dev, sarea_priv->ContextState );
  594. sarea_priv->dirty &= ~I830_UPLOAD_CTX;
  595. }
  596. if (dirty & I830_UPLOAD_TEX0) {
  597. i830EmitTexVerified( dev, sarea_priv->TexState[0] );
  598. sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
  599. }
  600. if (dirty & I830_UPLOAD_TEX1) {
  601. i830EmitTexVerified( dev, sarea_priv->TexState[1] );
  602. sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
  603. }
  604. if (dirty & I830_UPLOAD_TEXBLEND0) {
  605. i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[0],
  606. sarea_priv->TexBlendStateWordsUsed[0]);
  607. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
  608. }
  609. if (dirty & I830_UPLOAD_TEXBLEND1) {
  610. i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[1],
  611. sarea_priv->TexBlendStateWordsUsed[1]);
  612. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
  613. }
  614. if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
  615. i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
  616. } else {
  617. if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
  618. i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
  619. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
  620. }
  621. if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
  622. i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
  623. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
  624. }
  625. /* 1.3:
  626. */
  627. #if 0
  628. if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
  629. i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
  630. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
  631. }
  632. if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
  633. i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
  634. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
  635. }
  636. #endif
  637. }
  638. /* 1.3:
  639. */
  640. if (dirty & I830_UPLOAD_STIPPLE) {
  641. i830EmitStippleVerified( dev,
  642. sarea_priv->StippleState);
  643. sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
  644. }
  645. if (dirty & I830_UPLOAD_TEX2) {
  646. i830EmitTexVerified( dev, sarea_priv->TexState2 );
  647. sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
  648. }
  649. if (dirty & I830_UPLOAD_TEX3) {
  650. i830EmitTexVerified( dev, sarea_priv->TexState3 );
  651. sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
  652. }
  653. if (dirty & I830_UPLOAD_TEXBLEND2) {
  654. i830EmitTexBlendVerified(
  655. dev,
  656. sarea_priv->TexBlendState2,
  657. sarea_priv->TexBlendStateWordsUsed2);
  658. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
  659. }
  660. if (dirty & I830_UPLOAD_TEXBLEND3) {
  661. i830EmitTexBlendVerified(
  662. dev,
  663. sarea_priv->TexBlendState3,
  664. sarea_priv->TexBlendStateWordsUsed3);
  665. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
  666. }
  667. }
  668. /* ================================================================
  669. * Performance monitoring functions
  670. */
  671. static void i830_fill_box( drm_device_t *dev,
  672. int x, int y, int w, int h,
  673. int r, int g, int b )
  674. {
  675. drm_i830_private_t *dev_priv = dev->dev_private;
  676. u32 color;
  677. unsigned int BR13, CMD;
  678. RING_LOCALS;
  679. BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1<<24);
  680. CMD = XY_COLOR_BLT_CMD;
  681. x += dev_priv->sarea_priv->boxes[0].x1;
  682. y += dev_priv->sarea_priv->boxes[0].y1;
  683. if (dev_priv->cpp == 4) {
  684. BR13 |= (1<<25);
  685. CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
  686. color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
  687. } else {
  688. color = (((r & 0xf8) << 8) |
  689. ((g & 0xfc) << 3) |
  690. ((b & 0xf8) >> 3));
  691. }
  692. BEGIN_LP_RING( 6 );
  693. OUT_RING( CMD );
  694. OUT_RING( BR13 );
  695. OUT_RING( (y << 16) | x );
  696. OUT_RING( ((y+h) << 16) | (x+w) );
  697. if ( dev_priv->current_page == 1 ) {
  698. OUT_RING( dev_priv->front_offset );
  699. } else {
  700. OUT_RING( dev_priv->back_offset );
  701. }
  702. OUT_RING( color );
  703. ADVANCE_LP_RING();
  704. }
  705. static void i830_cp_performance_boxes( drm_device_t *dev )
  706. {
  707. drm_i830_private_t *dev_priv = dev->dev_private;
  708. /* Purple box for page flipping
  709. */
  710. if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP )
  711. i830_fill_box( dev, 4, 4, 8, 8, 255, 0, 255 );
  712. /* Red box if we have to wait for idle at any point
  713. */
  714. if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT )
  715. i830_fill_box( dev, 16, 4, 8, 8, 255, 0, 0 );
  716. /* Blue box: lost context?
  717. */
  718. if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT )
  719. i830_fill_box( dev, 28, 4, 8, 8, 0, 0, 255 );
  720. /* Yellow box for texture swaps
  721. */
  722. if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD )
  723. i830_fill_box( dev, 40, 4, 8, 8, 255, 255, 0 );
  724. /* Green box if hardware never idles (as far as we can tell)
  725. */
  726. if ( !(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY) )
  727. i830_fill_box( dev, 64, 4, 8, 8, 0, 255, 0 );
  728. /* Draw bars indicating number of buffers allocated
  729. * (not a great measure, easily confused)
  730. */
  731. if (dev_priv->dma_used) {
  732. int bar = dev_priv->dma_used / 10240;
  733. if (bar > 100) bar = 100;
  734. if (bar < 1) bar = 1;
  735. i830_fill_box( dev, 4, 16, bar, 4, 196, 128, 128 );
  736. dev_priv->dma_used = 0;
  737. }
  738. dev_priv->sarea_priv->perf_boxes = 0;
  739. }
  740. static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
  741. unsigned int clear_color,
  742. unsigned int clear_zval,
  743. unsigned int clear_depthmask)
  744. {
  745. drm_i830_private_t *dev_priv = dev->dev_private;
  746. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  747. int nbox = sarea_priv->nbox;
  748. drm_clip_rect_t *pbox = sarea_priv->boxes;
  749. int pitch = dev_priv->pitch;
  750. int cpp = dev_priv->cpp;
  751. int i;
  752. unsigned int BR13, CMD, D_CMD;
  753. RING_LOCALS;
  754. if ( dev_priv->current_page == 1 ) {
  755. unsigned int tmp = flags;
  756. flags &= ~(I830_FRONT | I830_BACK);
  757. if ( tmp & I830_FRONT ) flags |= I830_BACK;
  758. if ( tmp & I830_BACK ) flags |= I830_FRONT;
  759. }
  760. i830_kernel_lost_context(dev);
  761. switch(cpp) {
  762. case 2:
  763. BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
  764. D_CMD = CMD = XY_COLOR_BLT_CMD;
  765. break;
  766. case 4:
  767. BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24) | (1<<25);
  768. CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
  769. XY_COLOR_BLT_WRITE_RGB);
  770. D_CMD = XY_COLOR_BLT_CMD;
  771. if(clear_depthmask & 0x00ffffff)
  772. D_CMD |= XY_COLOR_BLT_WRITE_RGB;
  773. if(clear_depthmask & 0xff000000)
  774. D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
  775. break;
  776. default:
  777. BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
  778. D_CMD = CMD = XY_COLOR_BLT_CMD;
  779. break;
  780. }
  781. if (nbox > I830_NR_SAREA_CLIPRECTS)
  782. nbox = I830_NR_SAREA_CLIPRECTS;
  783. for (i = 0 ; i < nbox ; i++, pbox++) {
  784. if (pbox->x1 > pbox->x2 ||
  785. pbox->y1 > pbox->y2 ||
  786. pbox->x2 > dev_priv->w ||
  787. pbox->y2 > dev_priv->h)
  788. continue;
  789. if ( flags & I830_FRONT ) {
  790. DRM_DEBUG("clear front\n");
  791. BEGIN_LP_RING( 6 );
  792. OUT_RING( CMD );
  793. OUT_RING( BR13 );
  794. OUT_RING( (pbox->y1 << 16) | pbox->x1 );
  795. OUT_RING( (pbox->y2 << 16) | pbox->x2 );
  796. OUT_RING( dev_priv->front_offset );
  797. OUT_RING( clear_color );
  798. ADVANCE_LP_RING();
  799. }
  800. if ( flags & I830_BACK ) {
  801. DRM_DEBUG("clear back\n");
  802. BEGIN_LP_RING( 6 );
  803. OUT_RING( CMD );
  804. OUT_RING( BR13 );
  805. OUT_RING( (pbox->y1 << 16) | pbox->x1 );
  806. OUT_RING( (pbox->y2 << 16) | pbox->x2 );
  807. OUT_RING( dev_priv->back_offset );
  808. OUT_RING( clear_color );
  809. ADVANCE_LP_RING();
  810. }
  811. if ( flags & I830_DEPTH ) {
  812. DRM_DEBUG("clear depth\n");
  813. BEGIN_LP_RING( 6 );
  814. OUT_RING( D_CMD );
  815. OUT_RING( BR13 );
  816. OUT_RING( (pbox->y1 << 16) | pbox->x1 );
  817. OUT_RING( (pbox->y2 << 16) | pbox->x2 );
  818. OUT_RING( dev_priv->depth_offset );
  819. OUT_RING( clear_zval );
  820. ADVANCE_LP_RING();
  821. }
  822. }
  823. }
  824. static void i830_dma_dispatch_swap( drm_device_t *dev )
  825. {
  826. drm_i830_private_t *dev_priv = dev->dev_private;
  827. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  828. int nbox = sarea_priv->nbox;
  829. drm_clip_rect_t *pbox = sarea_priv->boxes;
  830. int pitch = dev_priv->pitch;
  831. int cpp = dev_priv->cpp;
  832. int i;
  833. unsigned int CMD, BR13;
  834. RING_LOCALS;
  835. DRM_DEBUG("swapbuffers\n");
  836. i830_kernel_lost_context(dev);
  837. if (dev_priv->do_boxes)
  838. i830_cp_performance_boxes( dev );
  839. switch(cpp) {
  840. case 2:
  841. BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
  842. CMD = XY_SRC_COPY_BLT_CMD;
  843. break;
  844. case 4:
  845. BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24) | (1<<25);
  846. CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
  847. XY_SRC_COPY_BLT_WRITE_RGB);
  848. break;
  849. default:
  850. BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
  851. CMD = XY_SRC_COPY_BLT_CMD;
  852. break;
  853. }
  854. if (nbox > I830_NR_SAREA_CLIPRECTS)
  855. nbox = I830_NR_SAREA_CLIPRECTS;
  856. for (i = 0 ; i < nbox; i++, pbox++)
  857. {
  858. if (pbox->x1 > pbox->x2 ||
  859. pbox->y1 > pbox->y2 ||
  860. pbox->x2 > dev_priv->w ||
  861. pbox->y2 > dev_priv->h)
  862. continue;
  863. DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
  864. pbox->x1, pbox->y1,
  865. pbox->x2, pbox->y2);
  866. BEGIN_LP_RING( 8 );
  867. OUT_RING( CMD );
  868. OUT_RING( BR13 );
  869. OUT_RING( (pbox->y1 << 16) | pbox->x1 );
  870. OUT_RING( (pbox->y2 << 16) | pbox->x2 );
  871. if (dev_priv->current_page == 0)
  872. OUT_RING( dev_priv->front_offset );
  873. else
  874. OUT_RING( dev_priv->back_offset );
  875. OUT_RING( (pbox->y1 << 16) | pbox->x1 );
  876. OUT_RING( BR13 & 0xffff );
  877. if (dev_priv->current_page == 0)
  878. OUT_RING( dev_priv->back_offset );
  879. else
  880. OUT_RING( dev_priv->front_offset );
  881. ADVANCE_LP_RING();
  882. }
  883. }
  884. static void i830_dma_dispatch_flip( drm_device_t *dev )
  885. {
  886. drm_i830_private_t *dev_priv = dev->dev_private;
  887. RING_LOCALS;
  888. DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
  889. __FUNCTION__,
  890. dev_priv->current_page,
  891. dev_priv->sarea_priv->pf_current_page);
  892. i830_kernel_lost_context(dev);
  893. if (dev_priv->do_boxes) {
  894. dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
  895. i830_cp_performance_boxes( dev );
  896. }
  897. BEGIN_LP_RING( 2 );
  898. OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
  899. OUT_RING( 0 );
  900. ADVANCE_LP_RING();
  901. BEGIN_LP_RING( 6 );
  902. OUT_RING( CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP );
  903. OUT_RING( 0 );
  904. if ( dev_priv->current_page == 0 ) {
  905. OUT_RING( dev_priv->back_offset );
  906. dev_priv->current_page = 1;
  907. } else {
  908. OUT_RING( dev_priv->front_offset );
  909. dev_priv->current_page = 0;
  910. }
  911. OUT_RING(0);
  912. ADVANCE_LP_RING();
  913. BEGIN_LP_RING( 2 );
  914. OUT_RING( MI_WAIT_FOR_EVENT |
  915. MI_WAIT_FOR_PLANE_A_FLIP );
  916. OUT_RING( 0 );
  917. ADVANCE_LP_RING();
  918. dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
  919. }
  920. static void i830_dma_dispatch_vertex(drm_device_t *dev,
  921. drm_buf_t *buf,
  922. int discard,
  923. int used)
  924. {
  925. drm_i830_private_t *dev_priv = dev->dev_private;
  926. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  927. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  928. drm_clip_rect_t *box = sarea_priv->boxes;
  929. int nbox = sarea_priv->nbox;
  930. unsigned long address = (unsigned long)buf->bus_address;
  931. unsigned long start = address - dev->agp->base;
  932. int i = 0, u;
  933. RING_LOCALS;
  934. i830_kernel_lost_context(dev);
  935. if (nbox > I830_NR_SAREA_CLIPRECTS)
  936. nbox = I830_NR_SAREA_CLIPRECTS;
  937. if (discard) {
  938. u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
  939. I830_BUF_HARDWARE);
  940. if(u != I830_BUF_CLIENT) {
  941. DRM_DEBUG("xxxx 2\n");
  942. }
  943. }
  944. if (used > 4*1023)
  945. used = 0;
  946. if (sarea_priv->dirty)
  947. i830EmitState( dev );
  948. DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
  949. address, used, nbox);
  950. dev_priv->counter++;
  951. DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
  952. DRM_DEBUG( "i830_dma_dispatch\n");
  953. DRM_DEBUG( "start : %lx\n", start);
  954. DRM_DEBUG( "used : %d\n", used);
  955. DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
  956. if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
  957. u32 *vp = buf_priv->kernel_virtual;
  958. vp[0] = (GFX_OP_PRIMITIVE |
  959. sarea_priv->vertex_prim |
  960. ((used/4)-2));
  961. if (dev_priv->use_mi_batchbuffer_start) {
  962. vp[used/4] = MI_BATCH_BUFFER_END;
  963. used += 4;
  964. }
  965. if (used & 4) {
  966. vp[used/4] = 0;
  967. used += 4;
  968. }
  969. i830_unmap_buffer(buf);
  970. }
  971. if (used) {
  972. do {
  973. if (i < nbox) {
  974. BEGIN_LP_RING(6);
  975. OUT_RING( GFX_OP_DRAWRECT_INFO );
  976. OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR1] );
  977. OUT_RING( box[i].x1 | (box[i].y1<<16) );
  978. OUT_RING( box[i].x2 | (box[i].y2<<16) );
  979. OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR4] );
  980. OUT_RING( 0 );
  981. ADVANCE_LP_RING();
  982. }
  983. if (dev_priv->use_mi_batchbuffer_start) {
  984. BEGIN_LP_RING(2);
  985. OUT_RING( MI_BATCH_BUFFER_START | (2<<6) );
  986. OUT_RING( start | MI_BATCH_NON_SECURE );
  987. ADVANCE_LP_RING();
  988. }
  989. else {
  990. BEGIN_LP_RING(4);
  991. OUT_RING( MI_BATCH_BUFFER );
  992. OUT_RING( start | MI_BATCH_NON_SECURE );
  993. OUT_RING( start + used - 4 );
  994. OUT_RING( 0 );
  995. ADVANCE_LP_RING();
  996. }
  997. } while (++i < nbox);
  998. }
  999. if (discard) {
  1000. dev_priv->counter++;
  1001. (void) cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
  1002. I830_BUF_HARDWARE);
  1003. BEGIN_LP_RING(8);
  1004. OUT_RING( CMD_STORE_DWORD_IDX );
  1005. OUT_RING( 20 );
  1006. OUT_RING( dev_priv->counter );
  1007. OUT_RING( CMD_STORE_DWORD_IDX );
  1008. OUT_RING( buf_priv->my_use_idx );
  1009. OUT_RING( I830_BUF_FREE );
  1010. OUT_RING( CMD_REPORT_HEAD );
  1011. OUT_RING( 0 );
  1012. ADVANCE_LP_RING();
  1013. }
  1014. }
  1015. static void i830_dma_quiescent(drm_device_t *dev)
  1016. {
  1017. drm_i830_private_t *dev_priv = dev->dev_private;
  1018. RING_LOCALS;
  1019. i830_kernel_lost_context(dev);
  1020. BEGIN_LP_RING(4);
  1021. OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
  1022. OUT_RING( CMD_REPORT_HEAD );
  1023. OUT_RING( 0 );
  1024. OUT_RING( 0 );
  1025. ADVANCE_LP_RING();
  1026. i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
  1027. }
  1028. static int i830_flush_queue(drm_device_t *dev)
  1029. {
  1030. drm_i830_private_t *dev_priv = dev->dev_private;
  1031. drm_device_dma_t *dma = dev->dma;
  1032. int i, ret = 0;
  1033. RING_LOCALS;
  1034. i830_kernel_lost_context(dev);
  1035. BEGIN_LP_RING(2);
  1036. OUT_RING( CMD_REPORT_HEAD );
  1037. OUT_RING( 0 );
  1038. ADVANCE_LP_RING();
  1039. i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
  1040. for (i = 0; i < dma->buf_count; i++) {
  1041. drm_buf_t *buf = dma->buflist[ i ];
  1042. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  1043. int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
  1044. I830_BUF_FREE);
  1045. if (used == I830_BUF_HARDWARE)
  1046. DRM_DEBUG("reclaimed from HARDWARE\n");
  1047. if (used == I830_BUF_CLIENT)
  1048. DRM_DEBUG("still on client\n");
  1049. }
  1050. return ret;
  1051. }
  1052. /* Must be called with the lock held */
  1053. void i830_reclaim_buffers(drm_device_t *dev, struct file *filp)
  1054. {
  1055. drm_device_dma_t *dma = dev->dma;
  1056. int i;
  1057. if (!dma) return;
  1058. if (!dev->dev_private) return;
  1059. if (!dma->buflist) return;
  1060. i830_flush_queue(dev);
  1061. for (i = 0; i < dma->buf_count; i++) {
  1062. drm_buf_t *buf = dma->buflist[ i ];
  1063. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  1064. if (buf->filp == filp && buf_priv) {
  1065. int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
  1066. I830_BUF_FREE);
  1067. if (used == I830_BUF_CLIENT)
  1068. DRM_DEBUG("reclaimed from client\n");
  1069. if(buf_priv->currently_mapped == I830_BUF_MAPPED)
  1070. buf_priv->currently_mapped = I830_BUF_UNMAPPED;
  1071. }
  1072. }
  1073. }
  1074. static int i830_flush_ioctl(struct inode *inode, struct file *filp,
  1075. unsigned int cmd, unsigned long arg)
  1076. {
  1077. drm_file_t *priv = filp->private_data;
  1078. drm_device_t *dev = priv->head->dev;
  1079. LOCK_TEST_WITH_RETURN(dev, filp);
  1080. i830_flush_queue(dev);
  1081. return 0;
  1082. }
  1083. static int i830_dma_vertex(struct inode *inode, struct file *filp,
  1084. unsigned int cmd, unsigned long arg)
  1085. {
  1086. drm_file_t *priv = filp->private_data;
  1087. drm_device_t *dev = priv->head->dev;
  1088. drm_device_dma_t *dma = dev->dma;
  1089. drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
  1090. u32 *hw_status = dev_priv->hw_status_page;
  1091. drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
  1092. dev_priv->sarea_priv;
  1093. drm_i830_vertex_t vertex;
  1094. if (copy_from_user(&vertex, (drm_i830_vertex_t __user *)arg, sizeof(vertex)))
  1095. return -EFAULT;
  1096. LOCK_TEST_WITH_RETURN(dev, filp);
  1097. DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
  1098. vertex.idx, vertex.used, vertex.discard);
  1099. if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
  1100. i830_dma_dispatch_vertex( dev,
  1101. dma->buflist[ vertex.idx ],
  1102. vertex.discard, vertex.used );
  1103. sarea_priv->last_enqueue = dev_priv->counter-1;
  1104. sarea_priv->last_dispatch = (int) hw_status[5];
  1105. return 0;
  1106. }
  1107. static int i830_clear_bufs(struct inode *inode, struct file *filp,
  1108. unsigned int cmd, unsigned long arg)
  1109. {
  1110. drm_file_t *priv = filp->private_data;
  1111. drm_device_t *dev = priv->head->dev;
  1112. drm_i830_clear_t clear;
  1113. if (copy_from_user(&clear, (drm_i830_clear_t __user *)arg, sizeof(clear)))
  1114. return -EFAULT;
  1115. LOCK_TEST_WITH_RETURN(dev, filp);
  1116. /* GH: Someone's doing nasty things... */
  1117. if (!dev->dev_private) {
  1118. return -EINVAL;
  1119. }
  1120. i830_dma_dispatch_clear( dev, clear.flags,
  1121. clear.clear_color,
  1122. clear.clear_depth,
  1123. clear.clear_depthmask);
  1124. return 0;
  1125. }
  1126. static int i830_swap_bufs(struct inode *inode, struct file *filp,
  1127. unsigned int cmd, unsigned long arg)
  1128. {
  1129. drm_file_t *priv = filp->private_data;
  1130. drm_device_t *dev = priv->head->dev;
  1131. DRM_DEBUG("i830_swap_bufs\n");
  1132. LOCK_TEST_WITH_RETURN(dev, filp);
  1133. i830_dma_dispatch_swap( dev );
  1134. return 0;
  1135. }
  1136. /* Not sure why this isn't set all the time:
  1137. */
  1138. static void i830_do_init_pageflip( drm_device_t *dev )
  1139. {
  1140. drm_i830_private_t *dev_priv = dev->dev_private;
  1141. DRM_DEBUG("%s\n", __FUNCTION__);
  1142. dev_priv->page_flipping = 1;
  1143. dev_priv->current_page = 0;
  1144. dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
  1145. }
  1146. static int i830_do_cleanup_pageflip( drm_device_t *dev )
  1147. {
  1148. drm_i830_private_t *dev_priv = dev->dev_private;
  1149. DRM_DEBUG("%s\n", __FUNCTION__);
  1150. if (dev_priv->current_page != 0)
  1151. i830_dma_dispatch_flip( dev );
  1152. dev_priv->page_flipping = 0;
  1153. return 0;
  1154. }
  1155. static int i830_flip_bufs(struct inode *inode, struct file *filp,
  1156. unsigned int cmd, unsigned long arg)
  1157. {
  1158. drm_file_t *priv = filp->private_data;
  1159. drm_device_t *dev = priv->head->dev;
  1160. drm_i830_private_t *dev_priv = dev->dev_private;
  1161. DRM_DEBUG("%s\n", __FUNCTION__);
  1162. LOCK_TEST_WITH_RETURN(dev, filp);
  1163. if (!dev_priv->page_flipping)
  1164. i830_do_init_pageflip( dev );
  1165. i830_dma_dispatch_flip( dev );
  1166. return 0;
  1167. }
  1168. static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
  1169. unsigned long arg)
  1170. {
  1171. drm_file_t *priv = filp->private_data;
  1172. drm_device_t *dev = priv->head->dev;
  1173. drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
  1174. u32 *hw_status = dev_priv->hw_status_page;
  1175. drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
  1176. dev_priv->sarea_priv;
  1177. sarea_priv->last_dispatch = (int) hw_status[5];
  1178. return 0;
  1179. }
  1180. static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
  1181. unsigned long arg)
  1182. {
  1183. drm_file_t *priv = filp->private_data;
  1184. drm_device_t *dev = priv->head->dev;
  1185. int retcode = 0;
  1186. drm_i830_dma_t d;
  1187. drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
  1188. u32 *hw_status = dev_priv->hw_status_page;
  1189. drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
  1190. dev_priv->sarea_priv;
  1191. DRM_DEBUG("getbuf\n");
  1192. if (copy_from_user(&d, (drm_i830_dma_t __user *)arg, sizeof(d)))
  1193. return -EFAULT;
  1194. LOCK_TEST_WITH_RETURN(dev, filp);
  1195. d.granted = 0;
  1196. retcode = i830_dma_get_buffer(dev, &d, filp);
  1197. DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
  1198. current->pid, retcode, d.granted);
  1199. if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d)))
  1200. return -EFAULT;
  1201. sarea_priv->last_dispatch = (int) hw_status[5];
  1202. return retcode;
  1203. }
  1204. static int i830_copybuf(struct inode *inode,
  1205. struct file *filp, unsigned int cmd, unsigned long arg)
  1206. {
  1207. /* Never copy - 2.4.x doesn't need it */
  1208. return 0;
  1209. }
  1210. static int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
  1211. unsigned long arg)
  1212. {
  1213. return 0;
  1214. }
  1215. static int i830_getparam( struct inode *inode, struct file *filp,
  1216. unsigned int cmd, unsigned long arg )
  1217. {
  1218. drm_file_t *priv = filp->private_data;
  1219. drm_device_t *dev = priv->head->dev;
  1220. drm_i830_private_t *dev_priv = dev->dev_private;
  1221. drm_i830_getparam_t param;
  1222. int value;
  1223. if ( !dev_priv ) {
  1224. DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
  1225. return -EINVAL;
  1226. }
  1227. if (copy_from_user(&param, (drm_i830_getparam_t __user *)arg, sizeof(param) ))
  1228. return -EFAULT;
  1229. switch( param.param ) {
  1230. case I830_PARAM_IRQ_ACTIVE:
  1231. value = dev->irq_enabled;
  1232. break;
  1233. default:
  1234. return -EINVAL;
  1235. }
  1236. if ( copy_to_user( param.value, &value, sizeof(int) ) ) {
  1237. DRM_ERROR( "copy_to_user\n" );
  1238. return -EFAULT;
  1239. }
  1240. return 0;
  1241. }
  1242. static int i830_setparam( struct inode *inode, struct file *filp,
  1243. unsigned int cmd, unsigned long arg )
  1244. {
  1245. drm_file_t *priv = filp->private_data;
  1246. drm_device_t *dev = priv->head->dev;
  1247. drm_i830_private_t *dev_priv = dev->dev_private;
  1248. drm_i830_setparam_t param;
  1249. if ( !dev_priv ) {
  1250. DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
  1251. return -EINVAL;
  1252. }
  1253. if (copy_from_user(&param, (drm_i830_setparam_t __user *)arg, sizeof(param) ))
  1254. return -EFAULT;
  1255. switch( param.param ) {
  1256. case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
  1257. dev_priv->use_mi_batchbuffer_start = param.value;
  1258. break;
  1259. default:
  1260. return -EINVAL;
  1261. }
  1262. return 0;
  1263. }
  1264. void i830_driver_pretakedown(drm_device_t *dev)
  1265. {
  1266. i830_dma_cleanup( dev );
  1267. }
  1268. void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp)
  1269. {
  1270. if (dev->dev_private) {
  1271. drm_i830_private_t *dev_priv = dev->dev_private;
  1272. if (dev_priv->page_flipping) {
  1273. i830_do_cleanup_pageflip(dev);
  1274. }
  1275. }
  1276. }
  1277. void i830_driver_release(drm_device_t *dev, struct file *filp)
  1278. {
  1279. i830_reclaim_buffers(dev, filp);
  1280. }
  1281. int i830_driver_dma_quiescent(drm_device_t *dev)
  1282. {
  1283. i830_dma_quiescent( dev );
  1284. return 0;
  1285. }
  1286. drm_ioctl_desc_t i830_ioctls[] = {
  1287. [DRM_IOCTL_NR(DRM_I830_INIT)] = { i830_dma_init, 1, 1 },
  1288. [DRM_IOCTL_NR(DRM_I830_VERTEX)] = { i830_dma_vertex, 1, 0 },
  1289. [DRM_IOCTL_NR(DRM_I830_CLEAR)] = { i830_clear_bufs, 1, 0 },
  1290. [DRM_IOCTL_NR(DRM_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 },
  1291. [DRM_IOCTL_NR(DRM_I830_GETAGE)] = { i830_getage, 1, 0 },
  1292. [DRM_IOCTL_NR(DRM_I830_GETBUF)] = { i830_getbuf, 1, 0 },
  1293. [DRM_IOCTL_NR(DRM_I830_SWAP)] = { i830_swap_bufs, 1, 0 },
  1294. [DRM_IOCTL_NR(DRM_I830_COPY)] = { i830_copybuf, 1, 0 },
  1295. [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = { i830_docopy, 1, 0 },
  1296. [DRM_IOCTL_NR(DRM_I830_FLIP)] = { i830_flip_bufs, 1, 0 },
  1297. [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = { i830_irq_emit, 1, 0 },
  1298. [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = { i830_irq_wait, 1, 0 },
  1299. [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = { i830_getparam, 1, 0 },
  1300. [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = { i830_setparam, 1, 0 }
  1301. };
  1302. int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);