i830_dma.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559
  1. /* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
  2. * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
  3. *
  4. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  5. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the next
  16. * paragraph) shall be included in all copies or substantial portions of the
  17. * Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25. * DEALINGS IN THE SOFTWARE.
  26. *
  27. * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
  28. * Jeff Hartmann <jhartmann@valinux.com>
  29. * Keith Whitwell <keith@tungstengraphics.com>
  30. * Abraham vd Merwe <abraham@2d3d.co.za>
  31. *
  32. */
  33. #include "drmP.h"
  34. #include "drm.h"
  35. #include "i830_drm.h"
  36. #include "i830_drv.h"
  37. #include <linux/interrupt.h> /* For task queue support */
  38. #include <linux/smp_lock.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/delay.h>
  41. #include <linux/slab.h>
  42. #include <asm/uaccess.h>
  43. #define I830_BUF_FREE 2
  44. #define I830_BUF_CLIENT 1
  45. #define I830_BUF_HARDWARE 0
  46. #define I830_BUF_UNMAPPED 0
  47. #define I830_BUF_MAPPED 1
  48. static struct drm_buf *i830_freelist_get(struct drm_device * dev)
  49. {
  50. struct drm_device_dma *dma = dev->dma;
  51. int i;
  52. int used;
  53. /* Linear search might not be the best solution */
  54. for (i = 0; i < dma->buf_count; i++) {
  55. struct drm_buf *buf = dma->buflist[i];
  56. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  57. /* In use is already a pointer */
  58. used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
  59. I830_BUF_CLIENT);
  60. if (used == I830_BUF_FREE)
  61. return buf;
  62. }
  63. return NULL;
  64. }
  65. /* This should only be called if the buffer is not sent to the hardware
  66. * yet, the hardware updates in use for us once its on the ring buffer.
  67. */
  68. static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
  69. {
  70. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  71. int used;
  72. /* In use is already a pointer */
  73. used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
  74. if (used != I830_BUF_CLIENT) {
  75. DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
  76. return -EINVAL;
  77. }
  78. return 0;
  79. }
  80. static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
  81. {
  82. struct drm_file *priv = filp->private_data;
  83. struct drm_device *dev;
  84. drm_i830_private_t *dev_priv;
  85. struct drm_buf *buf;
  86. drm_i830_buf_priv_t *buf_priv;
  87. lock_kernel();
  88. dev = priv->minor->dev;
  89. dev_priv = dev->dev_private;
  90. buf = dev_priv->mmap_buffer;
  91. buf_priv = buf->dev_private;
  92. vma->vm_flags |= (VM_IO | VM_DONTCOPY);
  93. vma->vm_file = filp;
  94. buf_priv->currently_mapped = I830_BUF_MAPPED;
  95. unlock_kernel();
  96. if (io_remap_pfn_range(vma, vma->vm_start,
  97. vma->vm_pgoff,
  98. vma->vm_end - vma->vm_start, vma->vm_page_prot))
  99. return -EAGAIN;
  100. return 0;
  101. }
  102. static const struct file_operations i830_buffer_fops = {
  103. .open = drm_open,
  104. .release = drm_release,
  105. .unlocked_ioctl = drm_ioctl,
  106. .mmap = i830_mmap_buffers,
  107. .fasync = drm_fasync,
  108. };
  109. static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
  110. {
  111. struct drm_device *dev = file_priv->minor->dev;
  112. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  113. drm_i830_private_t *dev_priv = dev->dev_private;
  114. const struct file_operations *old_fops;
  115. unsigned long virtual;
  116. int retcode = 0;
  117. if (buf_priv->currently_mapped == I830_BUF_MAPPED)
  118. return -EINVAL;
  119. down_write(&current->mm->mmap_sem);
  120. old_fops = file_priv->filp->f_op;
  121. file_priv->filp->f_op = &i830_buffer_fops;
  122. dev_priv->mmap_buffer = buf;
  123. virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
  124. MAP_SHARED, buf->bus_address);
  125. dev_priv->mmap_buffer = NULL;
  126. file_priv->filp->f_op = old_fops;
  127. if (IS_ERR((void *)virtual)) { /* ugh */
  128. /* Real error */
  129. DRM_ERROR("mmap error\n");
  130. retcode = PTR_ERR((void *)virtual);
  131. buf_priv->virtual = NULL;
  132. } else {
  133. buf_priv->virtual = (void __user *)virtual;
  134. }
  135. up_write(&current->mm->mmap_sem);
  136. return retcode;
  137. }
  138. static int i830_unmap_buffer(struct drm_buf *buf)
  139. {
  140. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  141. int retcode = 0;
  142. if (buf_priv->currently_mapped != I830_BUF_MAPPED)
  143. return -EINVAL;
  144. down_write(&current->mm->mmap_sem);
  145. retcode = do_munmap(current->mm,
  146. (unsigned long)buf_priv->virtual,
  147. (size_t) buf->total);
  148. up_write(&current->mm->mmap_sem);
  149. buf_priv->currently_mapped = I830_BUF_UNMAPPED;
  150. buf_priv->virtual = NULL;
  151. return retcode;
  152. }
  153. static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
  154. struct drm_file *file_priv)
  155. {
  156. struct drm_buf *buf;
  157. drm_i830_buf_priv_t *buf_priv;
  158. int retcode = 0;
  159. buf = i830_freelist_get(dev);
  160. if (!buf) {
  161. retcode = -ENOMEM;
  162. DRM_DEBUG("retcode=%d\n", retcode);
  163. return retcode;
  164. }
  165. retcode = i830_map_buffer(buf, file_priv);
  166. if (retcode) {
  167. i830_freelist_put(dev, buf);
  168. DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
  169. return retcode;
  170. }
  171. buf->file_priv = file_priv;
  172. buf_priv = buf->dev_private;
  173. d->granted = 1;
  174. d->request_idx = buf->idx;
  175. d->request_size = buf->total;
  176. d->virtual = buf_priv->virtual;
  177. return retcode;
  178. }
  179. static int i830_dma_cleanup(struct drm_device *dev)
  180. {
  181. struct drm_device_dma *dma = dev->dma;
  182. /* Make sure interrupts are disabled here because the uninstall ioctl
  183. * may not have been called from userspace and after dev_private
  184. * is freed, it's too late.
  185. */
  186. if (dev->irq_enabled)
  187. drm_irq_uninstall(dev);
  188. if (dev->dev_private) {
  189. int i;
  190. drm_i830_private_t *dev_priv =
  191. (drm_i830_private_t *) dev->dev_private;
  192. if (dev_priv->ring.virtual_start)
  193. drm_core_ioremapfree(&dev_priv->ring.map, dev);
  194. if (dev_priv->hw_status_page) {
  195. pci_free_consistent(dev->pdev, PAGE_SIZE,
  196. dev_priv->hw_status_page,
  197. dev_priv->dma_status_page);
  198. /* Need to rewrite hardware status page */
  199. I830_WRITE(0x02080, 0x1ffff000);
  200. }
  201. kfree(dev->dev_private);
  202. dev->dev_private = NULL;
  203. for (i = 0; i < dma->buf_count; i++) {
  204. struct drm_buf *buf = dma->buflist[i];
  205. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  206. if (buf_priv->kernel_virtual && buf->total)
  207. drm_core_ioremapfree(&buf_priv->map, dev);
  208. }
  209. }
  210. return 0;
  211. }
  212. int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
  213. {
  214. drm_i830_private_t *dev_priv = dev->dev_private;
  215. drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
  216. int iters = 0;
  217. unsigned long end;
  218. unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
  219. end = jiffies + (HZ * 3);
  220. while (ring->space < n) {
  221. ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
  222. ring->space = ring->head - (ring->tail + 8);
  223. if (ring->space < 0)
  224. ring->space += ring->Size;
  225. if (ring->head != last_head) {
  226. end = jiffies + (HZ * 3);
  227. last_head = ring->head;
  228. }
  229. iters++;
  230. if (time_before(end, jiffies)) {
  231. DRM_ERROR("space: %d wanted %d\n", ring->space, n);
  232. DRM_ERROR("lockup\n");
  233. goto out_wait_ring;
  234. }
  235. udelay(1);
  236. dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
  237. }
  238. out_wait_ring:
  239. return iters;
  240. }
  241. static void i830_kernel_lost_context(struct drm_device *dev)
  242. {
  243. drm_i830_private_t *dev_priv = dev->dev_private;
  244. drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
  245. ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
  246. ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
  247. ring->space = ring->head - (ring->tail + 8);
  248. if (ring->space < 0)
  249. ring->space += ring->Size;
  250. if (ring->head == ring->tail)
  251. dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
  252. }
  253. static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
  254. {
  255. struct drm_device_dma *dma = dev->dma;
  256. int my_idx = 36;
  257. u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
  258. int i;
  259. if (dma->buf_count > 1019) {
  260. /* Not enough space in the status page for the freelist */
  261. return -EINVAL;
  262. }
  263. for (i = 0; i < dma->buf_count; i++) {
  264. struct drm_buf *buf = dma->buflist[i];
  265. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  266. buf_priv->in_use = hw_status++;
  267. buf_priv->my_use_idx = my_idx;
  268. my_idx += 4;
  269. *buf_priv->in_use = I830_BUF_FREE;
  270. buf_priv->map.offset = buf->bus_address;
  271. buf_priv->map.size = buf->total;
  272. buf_priv->map.type = _DRM_AGP;
  273. buf_priv->map.flags = 0;
  274. buf_priv->map.mtrr = 0;
  275. drm_core_ioremap(&buf_priv->map, dev);
  276. buf_priv->kernel_virtual = buf_priv->map.handle;
  277. }
  278. return 0;
  279. }
  280. static int i830_dma_initialize(struct drm_device *dev,
  281. drm_i830_private_t *dev_priv,
  282. drm_i830_init_t *init)
  283. {
  284. struct drm_map_list *r_list;
  285. memset(dev_priv, 0, sizeof(drm_i830_private_t));
  286. list_for_each_entry(r_list, &dev->maplist, head) {
  287. if (r_list->map &&
  288. r_list->map->type == _DRM_SHM &&
  289. r_list->map->flags & _DRM_CONTAINS_LOCK) {
  290. dev_priv->sarea_map = r_list->map;
  291. break;
  292. }
  293. }
  294. if (!dev_priv->sarea_map) {
  295. dev->dev_private = (void *)dev_priv;
  296. i830_dma_cleanup(dev);
  297. DRM_ERROR("can not find sarea!\n");
  298. return -EINVAL;
  299. }
  300. dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
  301. if (!dev_priv->mmio_map) {
  302. dev->dev_private = (void *)dev_priv;
  303. i830_dma_cleanup(dev);
  304. DRM_ERROR("can not find mmio map!\n");
  305. return -EINVAL;
  306. }
  307. dev->agp_buffer_token = init->buffers_offset;
  308. dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
  309. if (!dev->agp_buffer_map) {
  310. dev->dev_private = (void *)dev_priv;
  311. i830_dma_cleanup(dev);
  312. DRM_ERROR("can not find dma buffer map!\n");
  313. return -EINVAL;
  314. }
  315. dev_priv->sarea_priv = (drm_i830_sarea_t *)
  316. ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
  317. dev_priv->ring.Start = init->ring_start;
  318. dev_priv->ring.End = init->ring_end;
  319. dev_priv->ring.Size = init->ring_size;
  320. dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
  321. dev_priv->ring.map.size = init->ring_size;
  322. dev_priv->ring.map.type = _DRM_AGP;
  323. dev_priv->ring.map.flags = 0;
  324. dev_priv->ring.map.mtrr = 0;
  325. drm_core_ioremap(&dev_priv->ring.map, dev);
  326. if (dev_priv->ring.map.handle == NULL) {
  327. dev->dev_private = (void *)dev_priv;
  328. i830_dma_cleanup(dev);
  329. DRM_ERROR("can not ioremap virtual address for"
  330. " ring buffer\n");
  331. return -ENOMEM;
  332. }
  333. dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
  334. dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
  335. dev_priv->w = init->w;
  336. dev_priv->h = init->h;
  337. dev_priv->pitch = init->pitch;
  338. dev_priv->back_offset = init->back_offset;
  339. dev_priv->depth_offset = init->depth_offset;
  340. dev_priv->front_offset = init->front_offset;
  341. dev_priv->front_di1 = init->front_offset | init->pitch_bits;
  342. dev_priv->back_di1 = init->back_offset | init->pitch_bits;
  343. dev_priv->zi1 = init->depth_offset | init->pitch_bits;
  344. DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
  345. DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
  346. DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
  347. DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
  348. dev_priv->cpp = init->cpp;
  349. /* We are using separate values as placeholders for mechanisms for
  350. * private backbuffer/depthbuffer usage.
  351. */
  352. dev_priv->back_pitch = init->back_pitch;
  353. dev_priv->depth_pitch = init->depth_pitch;
  354. dev_priv->do_boxes = 0;
  355. dev_priv->use_mi_batchbuffer_start = 0;
  356. /* Program Hardware Status Page */
  357. dev_priv->hw_status_page =
  358. pci_alloc_consistent(dev->pdev, PAGE_SIZE,
  359. &dev_priv->dma_status_page);
  360. if (!dev_priv->hw_status_page) {
  361. dev->dev_private = (void *)dev_priv;
  362. i830_dma_cleanup(dev);
  363. DRM_ERROR("Can not allocate hardware status page\n");
  364. return -ENOMEM;
  365. }
  366. memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  367. DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
  368. I830_WRITE(0x02080, dev_priv->dma_status_page);
  369. DRM_DEBUG("Enabled hardware status page\n");
  370. /* Now we need to init our freelist */
  371. if (i830_freelist_init(dev, dev_priv) != 0) {
  372. dev->dev_private = (void *)dev_priv;
  373. i830_dma_cleanup(dev);
  374. DRM_ERROR("Not enough space in the status page for"
  375. " the freelist\n");
  376. return -ENOMEM;
  377. }
  378. dev->dev_private = (void *)dev_priv;
  379. return 0;
  380. }
  381. static int i830_dma_init(struct drm_device *dev, void *data,
  382. struct drm_file *file_priv)
  383. {
  384. drm_i830_private_t *dev_priv;
  385. drm_i830_init_t *init = data;
  386. int retcode = 0;
  387. switch (init->func) {
  388. case I830_INIT_DMA:
  389. dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
  390. if (dev_priv == NULL)
  391. return -ENOMEM;
  392. retcode = i830_dma_initialize(dev, dev_priv, init);
  393. break;
  394. case I830_CLEANUP_DMA:
  395. retcode = i830_dma_cleanup(dev);
  396. break;
  397. default:
  398. retcode = -EINVAL;
  399. break;
  400. }
  401. return retcode;
  402. }
  403. #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
  404. #define ST1_ENABLE (1<<16)
  405. #define ST1_MASK (0xffff)
  406. /* Most efficient way to verify state for the i830 is as it is
  407. * emitted. Non-conformant state is silently dropped.
  408. */
  409. static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
  410. {
  411. drm_i830_private_t *dev_priv = dev->dev_private;
  412. int i, j = 0;
  413. unsigned int tmp;
  414. RING_LOCALS;
  415. BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
  416. for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
  417. tmp = code[i];
  418. if ((tmp & (7 << 29)) == CMD_3D &&
  419. (tmp & (0x1f << 24)) < (0x1d << 24)) {
  420. OUT_RING(tmp);
  421. j++;
  422. } else {
  423. DRM_ERROR("Skipping %d\n", i);
  424. }
  425. }
  426. OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
  427. OUT_RING(code[I830_CTXREG_BLENDCOLR]);
  428. j += 2;
  429. for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
  430. tmp = code[i];
  431. if ((tmp & (7 << 29)) == CMD_3D &&
  432. (tmp & (0x1f << 24)) < (0x1d << 24)) {
  433. OUT_RING(tmp);
  434. j++;
  435. } else {
  436. DRM_ERROR("Skipping %d\n", i);
  437. }
  438. }
  439. OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
  440. OUT_RING(code[I830_CTXREG_MCSB1]);
  441. j += 2;
  442. if (j & 1)
  443. OUT_RING(0);
  444. ADVANCE_LP_RING();
  445. }
  446. static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
  447. {
  448. drm_i830_private_t *dev_priv = dev->dev_private;
  449. int i, j = 0;
  450. unsigned int tmp;
  451. RING_LOCALS;
  452. if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
  453. (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
  454. (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
  455. BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
  456. OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
  457. OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
  458. OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
  459. OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
  460. OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
  461. OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
  462. for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
  463. tmp = code[i];
  464. OUT_RING(tmp);
  465. j++;
  466. }
  467. if (j & 1)
  468. OUT_RING(0);
  469. ADVANCE_LP_RING();
  470. } else
  471. printk("rejected packet %x\n", code[0]);
  472. }
  473. static void i830EmitTexBlendVerified(struct drm_device *dev,
  474. unsigned int *code, unsigned int num)
  475. {
  476. drm_i830_private_t *dev_priv = dev->dev_private;
  477. int i, j = 0;
  478. unsigned int tmp;
  479. RING_LOCALS;
  480. if (!num)
  481. return;
  482. BEGIN_LP_RING(num + 1);
  483. for (i = 0; i < num; i++) {
  484. tmp = code[i];
  485. OUT_RING(tmp);
  486. j++;
  487. }
  488. if (j & 1)
  489. OUT_RING(0);
  490. ADVANCE_LP_RING();
  491. }
  492. static void i830EmitTexPalette(struct drm_device *dev,
  493. unsigned int *palette, int number, int is_shared)
  494. {
  495. drm_i830_private_t *dev_priv = dev->dev_private;
  496. int i;
  497. RING_LOCALS;
  498. return;
  499. BEGIN_LP_RING(258);
  500. if (is_shared == 1) {
  501. OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
  502. MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
  503. } else {
  504. OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
  505. }
  506. for (i = 0; i < 256; i++)
  507. OUT_RING(palette[i]);
  508. OUT_RING(0);
  509. /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
  510. */
  511. }
  512. /* Need to do some additional checking when setting the dest buffer.
  513. */
  514. static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
  515. {
  516. drm_i830_private_t *dev_priv = dev->dev_private;
  517. unsigned int tmp;
  518. RING_LOCALS;
  519. BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
  520. tmp = code[I830_DESTREG_CBUFADDR];
  521. if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
  522. if (((int)outring) & 8) {
  523. OUT_RING(0);
  524. OUT_RING(0);
  525. }
  526. OUT_RING(CMD_OP_DESTBUFFER_INFO);
  527. OUT_RING(BUF_3D_ID_COLOR_BACK |
  528. BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
  529. BUF_3D_USE_FENCE);
  530. OUT_RING(tmp);
  531. OUT_RING(0);
  532. OUT_RING(CMD_OP_DESTBUFFER_INFO);
  533. OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
  534. BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
  535. OUT_RING(dev_priv->zi1);
  536. OUT_RING(0);
  537. } else {
  538. DRM_ERROR("bad di1 %x (allow %x or %x)\n",
  539. tmp, dev_priv->front_di1, dev_priv->back_di1);
  540. }
  541. /* invarient:
  542. */
  543. OUT_RING(GFX_OP_DESTBUFFER_VARS);
  544. OUT_RING(code[I830_DESTREG_DV1]);
  545. OUT_RING(GFX_OP_DRAWRECT_INFO);
  546. OUT_RING(code[I830_DESTREG_DR1]);
  547. OUT_RING(code[I830_DESTREG_DR2]);
  548. OUT_RING(code[I830_DESTREG_DR3]);
  549. OUT_RING(code[I830_DESTREG_DR4]);
  550. /* Need to verify this */
  551. tmp = code[I830_DESTREG_SENABLE];
  552. if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
  553. OUT_RING(tmp);
  554. } else {
  555. DRM_ERROR("bad scissor enable\n");
  556. OUT_RING(0);
  557. }
  558. OUT_RING(GFX_OP_SCISSOR_RECT);
  559. OUT_RING(code[I830_DESTREG_SR1]);
  560. OUT_RING(code[I830_DESTREG_SR2]);
  561. OUT_RING(0);
  562. ADVANCE_LP_RING();
  563. }
  564. static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
  565. {
  566. drm_i830_private_t *dev_priv = dev->dev_private;
  567. RING_LOCALS;
  568. BEGIN_LP_RING(2);
  569. OUT_RING(GFX_OP_STIPPLE);
  570. OUT_RING(code[1]);
  571. ADVANCE_LP_RING();
  572. }
  573. static void i830EmitState(struct drm_device *dev)
  574. {
  575. drm_i830_private_t *dev_priv = dev->dev_private;
  576. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  577. unsigned int dirty = sarea_priv->dirty;
  578. DRM_DEBUG("%s %x\n", __func__, dirty);
  579. if (dirty & I830_UPLOAD_BUFFERS) {
  580. i830EmitDestVerified(dev, sarea_priv->BufferState);
  581. sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
  582. }
  583. if (dirty & I830_UPLOAD_CTX) {
  584. i830EmitContextVerified(dev, sarea_priv->ContextState);
  585. sarea_priv->dirty &= ~I830_UPLOAD_CTX;
  586. }
  587. if (dirty & I830_UPLOAD_TEX0) {
  588. i830EmitTexVerified(dev, sarea_priv->TexState[0]);
  589. sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
  590. }
  591. if (dirty & I830_UPLOAD_TEX1) {
  592. i830EmitTexVerified(dev, sarea_priv->TexState[1]);
  593. sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
  594. }
  595. if (dirty & I830_UPLOAD_TEXBLEND0) {
  596. i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
  597. sarea_priv->TexBlendStateWordsUsed[0]);
  598. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
  599. }
  600. if (dirty & I830_UPLOAD_TEXBLEND1) {
  601. i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
  602. sarea_priv->TexBlendStateWordsUsed[1]);
  603. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
  604. }
  605. if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
  606. i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
  607. } else {
  608. if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
  609. i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
  610. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
  611. }
  612. if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
  613. i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
  614. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
  615. }
  616. /* 1.3:
  617. */
  618. #if 0
  619. if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
  620. i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
  621. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
  622. }
  623. if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
  624. i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
  625. sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
  626. }
  627. #endif
  628. }
  629. /* 1.3:
  630. */
  631. if (dirty & I830_UPLOAD_STIPPLE) {
  632. i830EmitStippleVerified(dev, sarea_priv->StippleState);
  633. sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
  634. }
  635. if (dirty & I830_UPLOAD_TEX2) {
  636. i830EmitTexVerified(dev, sarea_priv->TexState2);
  637. sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
  638. }
  639. if (dirty & I830_UPLOAD_TEX3) {
  640. i830EmitTexVerified(dev, sarea_priv->TexState3);
  641. sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
  642. }
  643. if (dirty & I830_UPLOAD_TEXBLEND2) {
  644. i830EmitTexBlendVerified(dev,
  645. sarea_priv->TexBlendState2,
  646. sarea_priv->TexBlendStateWordsUsed2);
  647. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
  648. }
  649. if (dirty & I830_UPLOAD_TEXBLEND3) {
  650. i830EmitTexBlendVerified(dev,
  651. sarea_priv->TexBlendState3,
  652. sarea_priv->TexBlendStateWordsUsed3);
  653. sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
  654. }
  655. }
  656. /* ================================================================
  657. * Performance monitoring functions
  658. */
  659. static void i830_fill_box(struct drm_device *dev,
  660. int x, int y, int w, int h, int r, int g, int b)
  661. {
  662. drm_i830_private_t *dev_priv = dev->dev_private;
  663. u32 color;
  664. unsigned int BR13, CMD;
  665. RING_LOCALS;
  666. BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
  667. CMD = XY_COLOR_BLT_CMD;
  668. x += dev_priv->sarea_priv->boxes[0].x1;
  669. y += dev_priv->sarea_priv->boxes[0].y1;
  670. if (dev_priv->cpp == 4) {
  671. BR13 |= (1 << 25);
  672. CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
  673. color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
  674. } else {
  675. color = (((r & 0xf8) << 8) |
  676. ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
  677. }
  678. BEGIN_LP_RING(6);
  679. OUT_RING(CMD);
  680. OUT_RING(BR13);
  681. OUT_RING((y << 16) | x);
  682. OUT_RING(((y + h) << 16) | (x + w));
  683. if (dev_priv->current_page == 1)
  684. OUT_RING(dev_priv->front_offset);
  685. else
  686. OUT_RING(dev_priv->back_offset);
  687. OUT_RING(color);
  688. ADVANCE_LP_RING();
  689. }
  690. static void i830_cp_performance_boxes(struct drm_device *dev)
  691. {
  692. drm_i830_private_t *dev_priv = dev->dev_private;
  693. /* Purple box for page flipping
  694. */
  695. if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
  696. i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
  697. /* Red box if we have to wait for idle at any point
  698. */
  699. if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
  700. i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
  701. /* Blue box: lost context?
  702. */
  703. if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
  704. i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
  705. /* Yellow box for texture swaps
  706. */
  707. if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
  708. i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
  709. /* Green box if hardware never idles (as far as we can tell)
  710. */
  711. if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
  712. i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
  713. /* Draw bars indicating number of buffers allocated
  714. * (not a great measure, easily confused)
  715. */
  716. if (dev_priv->dma_used) {
  717. int bar = dev_priv->dma_used / 10240;
  718. if (bar > 100)
  719. bar = 100;
  720. if (bar < 1)
  721. bar = 1;
  722. i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
  723. dev_priv->dma_used = 0;
  724. }
  725. dev_priv->sarea_priv->perf_boxes = 0;
  726. }
  727. static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
  728. unsigned int clear_color,
  729. unsigned int clear_zval,
  730. unsigned int clear_depthmask)
  731. {
  732. drm_i830_private_t *dev_priv = dev->dev_private;
  733. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  734. int nbox = sarea_priv->nbox;
  735. struct drm_clip_rect *pbox = sarea_priv->boxes;
  736. int pitch = dev_priv->pitch;
  737. int cpp = dev_priv->cpp;
  738. int i;
  739. unsigned int BR13, CMD, D_CMD;
  740. RING_LOCALS;
  741. if (dev_priv->current_page == 1) {
  742. unsigned int tmp = flags;
  743. flags &= ~(I830_FRONT | I830_BACK);
  744. if (tmp & I830_FRONT)
  745. flags |= I830_BACK;
  746. if (tmp & I830_BACK)
  747. flags |= I830_FRONT;
  748. }
  749. i830_kernel_lost_context(dev);
  750. switch (cpp) {
  751. case 2:
  752. BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
  753. D_CMD = CMD = XY_COLOR_BLT_CMD;
  754. break;
  755. case 4:
  756. BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
  757. CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
  758. XY_COLOR_BLT_WRITE_RGB);
  759. D_CMD = XY_COLOR_BLT_CMD;
  760. if (clear_depthmask & 0x00ffffff)
  761. D_CMD |= XY_COLOR_BLT_WRITE_RGB;
  762. if (clear_depthmask & 0xff000000)
  763. D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
  764. break;
  765. default:
  766. BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
  767. D_CMD = CMD = XY_COLOR_BLT_CMD;
  768. break;
  769. }
  770. if (nbox > I830_NR_SAREA_CLIPRECTS)
  771. nbox = I830_NR_SAREA_CLIPRECTS;
  772. for (i = 0; i < nbox; i++, pbox++) {
  773. if (pbox->x1 > pbox->x2 ||
  774. pbox->y1 > pbox->y2 ||
  775. pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
  776. continue;
  777. if (flags & I830_FRONT) {
  778. DRM_DEBUG("clear front\n");
  779. BEGIN_LP_RING(6);
  780. OUT_RING(CMD);
  781. OUT_RING(BR13);
  782. OUT_RING((pbox->y1 << 16) | pbox->x1);
  783. OUT_RING((pbox->y2 << 16) | pbox->x2);
  784. OUT_RING(dev_priv->front_offset);
  785. OUT_RING(clear_color);
  786. ADVANCE_LP_RING();
  787. }
  788. if (flags & I830_BACK) {
  789. DRM_DEBUG("clear back\n");
  790. BEGIN_LP_RING(6);
  791. OUT_RING(CMD);
  792. OUT_RING(BR13);
  793. OUT_RING((pbox->y1 << 16) | pbox->x1);
  794. OUT_RING((pbox->y2 << 16) | pbox->x2);
  795. OUT_RING(dev_priv->back_offset);
  796. OUT_RING(clear_color);
  797. ADVANCE_LP_RING();
  798. }
  799. if (flags & I830_DEPTH) {
  800. DRM_DEBUG("clear depth\n");
  801. BEGIN_LP_RING(6);
  802. OUT_RING(D_CMD);
  803. OUT_RING(BR13);
  804. OUT_RING((pbox->y1 << 16) | pbox->x1);
  805. OUT_RING((pbox->y2 << 16) | pbox->x2);
  806. OUT_RING(dev_priv->depth_offset);
  807. OUT_RING(clear_zval);
  808. ADVANCE_LP_RING();
  809. }
  810. }
  811. }
  812. static void i830_dma_dispatch_swap(struct drm_device *dev)
  813. {
  814. drm_i830_private_t *dev_priv = dev->dev_private;
  815. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  816. int nbox = sarea_priv->nbox;
  817. struct drm_clip_rect *pbox = sarea_priv->boxes;
  818. int pitch = dev_priv->pitch;
  819. int cpp = dev_priv->cpp;
  820. int i;
  821. unsigned int CMD, BR13;
  822. RING_LOCALS;
  823. DRM_DEBUG("swapbuffers\n");
  824. i830_kernel_lost_context(dev);
  825. if (dev_priv->do_boxes)
  826. i830_cp_performance_boxes(dev);
  827. switch (cpp) {
  828. case 2:
  829. BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
  830. CMD = XY_SRC_COPY_BLT_CMD;
  831. break;
  832. case 4:
  833. BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
  834. CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
  835. XY_SRC_COPY_BLT_WRITE_RGB);
  836. break;
  837. default:
  838. BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
  839. CMD = XY_SRC_COPY_BLT_CMD;
  840. break;
  841. }
  842. if (nbox > I830_NR_SAREA_CLIPRECTS)
  843. nbox = I830_NR_SAREA_CLIPRECTS;
  844. for (i = 0; i < nbox; i++, pbox++) {
  845. if (pbox->x1 > pbox->x2 ||
  846. pbox->y1 > pbox->y2 ||
  847. pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
  848. continue;
  849. DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
  850. pbox->x1, pbox->y1, pbox->x2, pbox->y2);
  851. BEGIN_LP_RING(8);
  852. OUT_RING(CMD);
  853. OUT_RING(BR13);
  854. OUT_RING((pbox->y1 << 16) | pbox->x1);
  855. OUT_RING((pbox->y2 << 16) | pbox->x2);
  856. if (dev_priv->current_page == 0)
  857. OUT_RING(dev_priv->front_offset);
  858. else
  859. OUT_RING(dev_priv->back_offset);
  860. OUT_RING((pbox->y1 << 16) | pbox->x1);
  861. OUT_RING(BR13 & 0xffff);
  862. if (dev_priv->current_page == 0)
  863. OUT_RING(dev_priv->back_offset);
  864. else
  865. OUT_RING(dev_priv->front_offset);
  866. ADVANCE_LP_RING();
  867. }
  868. }
  869. static void i830_dma_dispatch_flip(struct drm_device *dev)
  870. {
  871. drm_i830_private_t *dev_priv = dev->dev_private;
  872. RING_LOCALS;
  873. DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
  874. __func__,
  875. dev_priv->current_page,
  876. dev_priv->sarea_priv->pf_current_page);
  877. i830_kernel_lost_context(dev);
  878. if (dev_priv->do_boxes) {
  879. dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
  880. i830_cp_performance_boxes(dev);
  881. }
  882. BEGIN_LP_RING(2);
  883. OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
  884. OUT_RING(0);
  885. ADVANCE_LP_RING();
  886. BEGIN_LP_RING(6);
  887. OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
  888. OUT_RING(0);
  889. if (dev_priv->current_page == 0) {
  890. OUT_RING(dev_priv->back_offset);
  891. dev_priv->current_page = 1;
  892. } else {
  893. OUT_RING(dev_priv->front_offset);
  894. dev_priv->current_page = 0;
  895. }
  896. OUT_RING(0);
  897. ADVANCE_LP_RING();
  898. BEGIN_LP_RING(2);
  899. OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
  900. OUT_RING(0);
  901. ADVANCE_LP_RING();
  902. dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
  903. }
  904. static void i830_dma_dispatch_vertex(struct drm_device *dev,
  905. struct drm_buf *buf, int discard, int used)
  906. {
  907. drm_i830_private_t *dev_priv = dev->dev_private;
  908. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  909. drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
  910. struct drm_clip_rect *box = sarea_priv->boxes;
  911. int nbox = sarea_priv->nbox;
  912. unsigned long address = (unsigned long)buf->bus_address;
  913. unsigned long start = address - dev->agp->base;
  914. int i = 0, u;
  915. RING_LOCALS;
  916. i830_kernel_lost_context(dev);
  917. if (nbox > I830_NR_SAREA_CLIPRECTS)
  918. nbox = I830_NR_SAREA_CLIPRECTS;
  919. if (discard) {
  920. u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
  921. I830_BUF_HARDWARE);
  922. if (u != I830_BUF_CLIENT)
  923. DRM_DEBUG("xxxx 2\n");
  924. }
  925. if (used > 4 * 1023)
  926. used = 0;
  927. if (sarea_priv->dirty)
  928. i830EmitState(dev);
  929. DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
  930. address, used, nbox);
  931. dev_priv->counter++;
  932. DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
  933. DRM_DEBUG("i830_dma_dispatch\n");
  934. DRM_DEBUG("start : %lx\n", start);
  935. DRM_DEBUG("used : %d\n", used);
  936. DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
  937. if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
  938. u32 *vp = buf_priv->kernel_virtual;
  939. vp[0] = (GFX_OP_PRIMITIVE |
  940. sarea_priv->vertex_prim | ((used / 4) - 2));
  941. if (dev_priv->use_mi_batchbuffer_start) {
  942. vp[used / 4] = MI_BATCH_BUFFER_END;
  943. used += 4;
  944. }
  945. if (used & 4) {
  946. vp[used / 4] = 0;
  947. used += 4;
  948. }
  949. i830_unmap_buffer(buf);
  950. }
  951. if (used) {
  952. do {
  953. if (i < nbox) {
  954. BEGIN_LP_RING(6);
  955. OUT_RING(GFX_OP_DRAWRECT_INFO);
  956. OUT_RING(sarea_priv->
  957. BufferState[I830_DESTREG_DR1]);
  958. OUT_RING(box[i].x1 | (box[i].y1 << 16));
  959. OUT_RING(box[i].x2 | (box[i].y2 << 16));
  960. OUT_RING(sarea_priv->
  961. BufferState[I830_DESTREG_DR4]);
  962. OUT_RING(0);
  963. ADVANCE_LP_RING();
  964. }
  965. if (dev_priv->use_mi_batchbuffer_start) {
  966. BEGIN_LP_RING(2);
  967. OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
  968. OUT_RING(start | MI_BATCH_NON_SECURE);
  969. ADVANCE_LP_RING();
  970. } else {
  971. BEGIN_LP_RING(4);
  972. OUT_RING(MI_BATCH_BUFFER);
  973. OUT_RING(start | MI_BATCH_NON_SECURE);
  974. OUT_RING(start + used - 4);
  975. OUT_RING(0);
  976. ADVANCE_LP_RING();
  977. }
  978. } while (++i < nbox);
  979. }
  980. if (discard) {
  981. dev_priv->counter++;
  982. (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
  983. I830_BUF_HARDWARE);
  984. BEGIN_LP_RING(8);
  985. OUT_RING(CMD_STORE_DWORD_IDX);
  986. OUT_RING(20);
  987. OUT_RING(dev_priv->counter);
  988. OUT_RING(CMD_STORE_DWORD_IDX);
  989. OUT_RING(buf_priv->my_use_idx);
  990. OUT_RING(I830_BUF_FREE);
  991. OUT_RING(CMD_REPORT_HEAD);
  992. OUT_RING(0);
  993. ADVANCE_LP_RING();
  994. }
  995. }
  996. static void i830_dma_quiescent(struct drm_device *dev)
  997. {
  998. drm_i830_private_t *dev_priv = dev->dev_private;
  999. RING_LOCALS;
  1000. i830_kernel_lost_context(dev);
  1001. BEGIN_LP_RING(4);
  1002. OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
  1003. OUT_RING(CMD_REPORT_HEAD);
  1004. OUT_RING(0);
  1005. OUT_RING(0);
  1006. ADVANCE_LP_RING();
  1007. i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
  1008. }
  1009. static int i830_flush_queue(struct drm_device *dev)
  1010. {
  1011. drm_i830_private_t *dev_priv = dev->dev_private;
  1012. struct drm_device_dma *dma = dev->dma;
  1013. int i, ret = 0;
  1014. RING_LOCALS;
  1015. i830_kernel_lost_context(dev);
  1016. BEGIN_LP_RING(2);
  1017. OUT_RING(CMD_REPORT_HEAD);
  1018. OUT_RING(0);
  1019. ADVANCE_LP_RING();
  1020. i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
  1021. for (i = 0; i < dma->buf_count; i++) {
  1022. struct drm_buf *buf = dma->buflist[i];
  1023. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  1024. int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
  1025. I830_BUF_FREE);
  1026. if (used == I830_BUF_HARDWARE)
  1027. DRM_DEBUG("reclaimed from HARDWARE\n");
  1028. if (used == I830_BUF_CLIENT)
  1029. DRM_DEBUG("still on client\n");
  1030. }
  1031. return ret;
  1032. }
  1033. /* Must be called with the lock held */
  1034. static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
  1035. {
  1036. struct drm_device_dma *dma = dev->dma;
  1037. int i;
  1038. if (!dma)
  1039. return;
  1040. if (!dev->dev_private)
  1041. return;
  1042. if (!dma->buflist)
  1043. return;
  1044. i830_flush_queue(dev);
  1045. for (i = 0; i < dma->buf_count; i++) {
  1046. struct drm_buf *buf = dma->buflist[i];
  1047. drm_i830_buf_priv_t *buf_priv = buf->dev_private;
  1048. if (buf->file_priv == file_priv && buf_priv) {
  1049. int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
  1050. I830_BUF_FREE);
  1051. if (used == I830_BUF_CLIENT)
  1052. DRM_DEBUG("reclaimed from client\n");
  1053. if (buf_priv->currently_mapped == I830_BUF_MAPPED)
  1054. buf_priv->currently_mapped = I830_BUF_UNMAPPED;
  1055. }
  1056. }
  1057. }
  1058. static int i830_flush_ioctl(struct drm_device *dev, void *data,
  1059. struct drm_file *file_priv)
  1060. {
  1061. LOCK_TEST_WITH_RETURN(dev, file_priv);
  1062. i830_flush_queue(dev);
  1063. return 0;
  1064. }
  1065. static int i830_dma_vertex(struct drm_device *dev, void *data,
  1066. struct drm_file *file_priv)
  1067. {
  1068. struct drm_device_dma *dma = dev->dma;
  1069. drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
  1070. u32 *hw_status = dev_priv->hw_status_page;
  1071. drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
  1072. dev_priv->sarea_priv;
  1073. drm_i830_vertex_t *vertex = data;
  1074. LOCK_TEST_WITH_RETURN(dev, file_priv);
  1075. DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
  1076. vertex->idx, vertex->used, vertex->discard);
  1077. if (vertex->idx < 0 || vertex->idx > dma->buf_count)
  1078. return -EINVAL;
  1079. i830_dma_dispatch_vertex(dev,
  1080. dma->buflist[vertex->idx],
  1081. vertex->discard, vertex->used);
  1082. sarea_priv->last_enqueue = dev_priv->counter - 1;
  1083. sarea_priv->last_dispatch = (int)hw_status[5];
  1084. return 0;
  1085. }
  1086. static int i830_clear_bufs(struct drm_device *dev, void *data,
  1087. struct drm_file *file_priv)
  1088. {
  1089. drm_i830_clear_t *clear = data;
  1090. LOCK_TEST_WITH_RETURN(dev, file_priv);
  1091. /* GH: Someone's doing nasty things... */
  1092. if (!dev->dev_private)
  1093. return -EINVAL;
  1094. i830_dma_dispatch_clear(dev, clear->flags,
  1095. clear->clear_color,
  1096. clear->clear_depth, clear->clear_depthmask);
  1097. return 0;
  1098. }
  1099. static int i830_swap_bufs(struct drm_device *dev, void *data,
  1100. struct drm_file *file_priv)
  1101. {
  1102. DRM_DEBUG("i830_swap_bufs\n");
  1103. LOCK_TEST_WITH_RETURN(dev, file_priv);
  1104. i830_dma_dispatch_swap(dev);
  1105. return 0;
  1106. }
  1107. /* Not sure why this isn't set all the time:
  1108. */
  1109. static void i830_do_init_pageflip(struct drm_device *dev)
  1110. {
  1111. drm_i830_private_t *dev_priv = dev->dev_private;
  1112. DRM_DEBUG("%s\n", __func__);
  1113. dev_priv->page_flipping = 1;
  1114. dev_priv->current_page = 0;
  1115. dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
  1116. }
  1117. static int i830_do_cleanup_pageflip(struct drm_device *dev)
  1118. {
  1119. drm_i830_private_t *dev_priv = dev->dev_private;
  1120. DRM_DEBUG("%s\n", __func__);
  1121. if (dev_priv->current_page != 0)
  1122. i830_dma_dispatch_flip(dev);
  1123. dev_priv->page_flipping = 0;
  1124. return 0;
  1125. }
  1126. static int i830_flip_bufs(struct drm_device *dev, void *data,
  1127. struct drm_file *file_priv)
  1128. {
  1129. drm_i830_private_t *dev_priv = dev->dev_private;
  1130. DRM_DEBUG("%s\n", __func__);
  1131. LOCK_TEST_WITH_RETURN(dev, file_priv);
  1132. if (!dev_priv->page_flipping)
  1133. i830_do_init_pageflip(dev);
  1134. i830_dma_dispatch_flip(dev);
  1135. return 0;
  1136. }
  1137. static int i830_getage(struct drm_device *dev, void *data,
  1138. struct drm_file *file_priv)
  1139. {
  1140. drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
  1141. u32 *hw_status = dev_priv->hw_status_page;
  1142. drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
  1143. dev_priv->sarea_priv;
  1144. sarea_priv->last_dispatch = (int)hw_status[5];
  1145. return 0;
  1146. }
  1147. static int i830_getbuf(struct drm_device *dev, void *data,
  1148. struct drm_file *file_priv)
  1149. {
  1150. int retcode = 0;
  1151. drm_i830_dma_t *d = data;
  1152. drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
  1153. u32 *hw_status = dev_priv->hw_status_page;
  1154. drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
  1155. dev_priv->sarea_priv;
  1156. DRM_DEBUG("getbuf\n");
  1157. LOCK_TEST_WITH_RETURN(dev, file_priv);
  1158. d->granted = 0;
  1159. retcode = i830_dma_get_buffer(dev, d, file_priv);
  1160. DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
  1161. task_pid_nr(current), retcode, d->granted);
  1162. sarea_priv->last_dispatch = (int)hw_status[5];
  1163. return retcode;
  1164. }
  1165. static int i830_copybuf(struct drm_device *dev, void *data,
  1166. struct drm_file *file_priv)
  1167. {
  1168. /* Never copy - 2.4.x doesn't need it */
  1169. return 0;
  1170. }
  1171. static int i830_docopy(struct drm_device *dev, void *data,
  1172. struct drm_file *file_priv)
  1173. {
  1174. return 0;
  1175. }
  1176. static int i830_getparam(struct drm_device *dev, void *data,
  1177. struct drm_file *file_priv)
  1178. {
  1179. drm_i830_private_t *dev_priv = dev->dev_private;
  1180. drm_i830_getparam_t *param = data;
  1181. int value;
  1182. if (!dev_priv) {
  1183. DRM_ERROR("%s called with no initialization\n", __func__);
  1184. return -EINVAL;
  1185. }
  1186. switch (param->param) {
  1187. case I830_PARAM_IRQ_ACTIVE:
  1188. value = dev->irq_enabled;
  1189. break;
  1190. default:
  1191. return -EINVAL;
  1192. }
  1193. if (copy_to_user(param->value, &value, sizeof(int))) {
  1194. DRM_ERROR("copy_to_user\n");
  1195. return -EFAULT;
  1196. }
  1197. return 0;
  1198. }
  1199. static int i830_setparam(struct drm_device *dev, void *data,
  1200. struct drm_file *file_priv)
  1201. {
  1202. drm_i830_private_t *dev_priv = dev->dev_private;
  1203. drm_i830_setparam_t *param = data;
  1204. if (!dev_priv) {
  1205. DRM_ERROR("%s called with no initialization\n", __func__);
  1206. return -EINVAL;
  1207. }
  1208. switch (param->param) {
  1209. case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
  1210. dev_priv->use_mi_batchbuffer_start = param->value;
  1211. break;
  1212. default:
  1213. return -EINVAL;
  1214. }
  1215. return 0;
  1216. }
  1217. int i830_driver_load(struct drm_device *dev, unsigned long flags)
  1218. {
  1219. /* i830 has 4 more counters */
  1220. dev->counters += 4;
  1221. dev->types[6] = _DRM_STAT_IRQ;
  1222. dev->types[7] = _DRM_STAT_PRIMARY;
  1223. dev->types[8] = _DRM_STAT_SECONDARY;
  1224. dev->types[9] = _DRM_STAT_DMA;
  1225. return 0;
  1226. }
  1227. void i830_driver_lastclose(struct drm_device *dev)
  1228. {
  1229. i830_dma_cleanup(dev);
  1230. }
  1231. void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
  1232. {
  1233. if (dev->dev_private) {
  1234. drm_i830_private_t *dev_priv = dev->dev_private;
  1235. if (dev_priv->page_flipping)
  1236. i830_do_cleanup_pageflip(dev);
  1237. }
  1238. }
  1239. void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
  1240. {
  1241. i830_reclaim_buffers(dev, file_priv);
  1242. }
  1243. int i830_driver_dma_quiescent(struct drm_device *dev)
  1244. {
  1245. i830_dma_quiescent(dev);
  1246. return 0;
  1247. }
  1248. /*
  1249. * call the drm_ioctl under the big kernel lock because
  1250. * to lock against the i830_mmap_buffers function.
  1251. */
  1252. long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1253. {
  1254. int ret;
  1255. lock_kernel();
  1256. ret = drm_ioctl(file, cmd, arg);
  1257. unlock_kernel();
  1258. return ret;
  1259. }
  1260. struct drm_ioctl_desc i830_ioctls[] = {
  1261. DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1262. DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
  1263. DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
  1264. DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
  1265. DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
  1266. DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
  1267. DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
  1268. DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
  1269. DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
  1270. DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
  1271. DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
  1272. DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
  1273. DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
  1274. DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
  1275. };
  1276. int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
  1277. /**
  1278. * Determine if the device really is AGP or not.
  1279. *
  1280. * All Intel graphics chipsets are treated as AGP, even if they are really
  1281. * PCI-e.
  1282. *
  1283. * \param dev The device to be tested.
  1284. *
  1285. * \returns
  1286. * A value of 1 is always retured to indictate every i8xx is AGP.
  1287. */
  1288. int i830_driver_device_is_agp(struct drm_device *dev)
  1289. {
  1290. return 1;
  1291. }