gamma_dma.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
  2. * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
  3. *
  4. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  5. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the next
  16. * paragraph) shall be included in all copies or substantial portions of the
  17. * Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25. * DEALINGS IN THE SOFTWARE.
  26. *
  27. * Authors:
  28. * Rickard E. (Rik) Faith <faith@valinux.com>
  29. *
  30. */
  31. #include "gamma.h"
  32. #include "drmP.h"
  33. #include "drm.h"
  34. #include "gamma_drm.h"
  35. #include "gamma_drv.h"
  36. #include <linux/interrupt.h> /* For task queue support */
  37. #include <linux/delay.h>
  38. static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
  39. unsigned long length)
  40. {
  41. drm_gamma_private_t *dev_priv =
  42. (drm_gamma_private_t *)dev->dev_private;
  43. mb();
  44. while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
  45. cpu_relax();
  46. GAMMA_WRITE(GAMMA_DMAADDRESS, address);
  47. while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
  48. cpu_relax();
  49. GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
  50. }
  51. void gamma_dma_quiescent_single(drm_device_t *dev)
  52. {
  53. drm_gamma_private_t *dev_priv =
  54. (drm_gamma_private_t *)dev->dev_private;
  55. while (GAMMA_READ(GAMMA_DMACOUNT))
  56. cpu_relax();
  57. while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
  58. cpu_relax();
  59. GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
  60. GAMMA_WRITE(GAMMA_SYNC, 0);
  61. do {
  62. while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
  63. cpu_relax();
  64. } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
  65. }
  66. void gamma_dma_quiescent_dual(drm_device_t *dev)
  67. {
  68. drm_gamma_private_t *dev_priv =
  69. (drm_gamma_private_t *)dev->dev_private;
  70. while (GAMMA_READ(GAMMA_DMACOUNT))
  71. cpu_relax();
  72. while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
  73. cpu_relax();
  74. GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
  75. GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
  76. GAMMA_WRITE(GAMMA_SYNC, 0);
  77. /* Read from first MX */
  78. do {
  79. while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
  80. cpu_relax();
  81. } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
  82. /* Read from second MX */
  83. do {
  84. while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
  85. cpu_relax();
  86. } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
  87. }
  88. void gamma_dma_ready(drm_device_t *dev)
  89. {
  90. drm_gamma_private_t *dev_priv =
  91. (drm_gamma_private_t *)dev->dev_private;
  92. while (GAMMA_READ(GAMMA_DMACOUNT))
  93. cpu_relax();
  94. }
  95. static inline int gamma_dma_is_ready(drm_device_t *dev)
  96. {
  97. drm_gamma_private_t *dev_priv =
  98. (drm_gamma_private_t *)dev->dev_private;
  99. return (!GAMMA_READ(GAMMA_DMACOUNT));
  100. }
  101. irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
  102. {
  103. drm_device_t *dev = (drm_device_t *)arg;
  104. drm_device_dma_t *dma = dev->dma;
  105. drm_gamma_private_t *dev_priv =
  106. (drm_gamma_private_t *)dev->dev_private;
  107. /* FIXME: should check whether we're actually interested in the interrupt? */
  108. atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
  109. while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
  110. cpu_relax();
  111. GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
  112. GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
  113. GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
  114. if (gamma_dma_is_ready(dev)) {
  115. /* Free previous buffer */
  116. if (test_and_set_bit(0, &dev->dma_flag))
  117. return IRQ_HANDLED;
  118. if (dma->this_buffer) {
  119. gamma_free_buffer(dev, dma->this_buffer);
  120. dma->this_buffer = NULL;
  121. }
  122. clear_bit(0, &dev->dma_flag);
  123. /* Dispatch new buffer */
  124. schedule_work(&dev->work);
  125. }
  126. return IRQ_HANDLED;
  127. }
  128. /* Only called by gamma_dma_schedule. */
  129. static int gamma_do_dma(drm_device_t *dev, int locked)
  130. {
  131. unsigned long address;
  132. unsigned long length;
  133. drm_buf_t *buf;
  134. int retcode = 0;
  135. drm_device_dma_t *dma = dev->dma;
  136. if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
  137. if (!dma->next_buffer) {
  138. DRM_ERROR("No next_buffer\n");
  139. clear_bit(0, &dev->dma_flag);
  140. return -EINVAL;
  141. }
  142. buf = dma->next_buffer;
  143. /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
  144. /* So we pass the buffer index value into the physical page offset */
  145. address = buf->idx << 12;
  146. length = buf->used;
  147. DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
  148. buf->context, buf->idx, length);
  149. if (buf->list == DRM_LIST_RECLAIM) {
  150. gamma_clear_next_buffer(dev);
  151. gamma_free_buffer(dev, buf);
  152. clear_bit(0, &dev->dma_flag);
  153. return -EINVAL;
  154. }
  155. if (!length) {
  156. DRM_ERROR("0 length buffer\n");
  157. gamma_clear_next_buffer(dev);
  158. gamma_free_buffer(dev, buf);
  159. clear_bit(0, &dev->dma_flag);
  160. return 0;
  161. }
  162. if (!gamma_dma_is_ready(dev)) {
  163. clear_bit(0, &dev->dma_flag);
  164. return -EBUSY;
  165. }
  166. if (buf->while_locked) {
  167. if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
  168. DRM_ERROR("Dispatching buffer %d from pid %d"
  169. " \"while locked\", but no lock held\n",
  170. buf->idx, current->pid);
  171. }
  172. } else {
  173. if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
  174. DRM_KERNEL_CONTEXT)) {
  175. clear_bit(0, &dev->dma_flag);
  176. return -EBUSY;
  177. }
  178. }
  179. if (dev->last_context != buf->context
  180. && !(dev->queuelist[buf->context]->flags
  181. & _DRM_CONTEXT_PRESERVED)) {
  182. /* PRE: dev->last_context != buf->context */
  183. if (DRM(context_switch)(dev, dev->last_context,
  184. buf->context)) {
  185. DRM(clear_next_buffer)(dev);
  186. DRM(free_buffer)(dev, buf);
  187. }
  188. retcode = -EBUSY;
  189. goto cleanup;
  190. /* POST: we will wait for the context
  191. switch and will dispatch on a later call
  192. when dev->last_context == buf->context.
  193. NOTE WE HOLD THE LOCK THROUGHOUT THIS
  194. TIME! */
  195. }
  196. gamma_clear_next_buffer(dev);
  197. buf->pending = 1;
  198. buf->waiting = 0;
  199. buf->list = DRM_LIST_PEND;
  200. /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
  201. address = buf->idx << 12;
  202. gamma_dma_dispatch(dev, address, length);
  203. gamma_free_buffer(dev, dma->this_buffer);
  204. dma->this_buffer = buf;
  205. atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
  206. atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
  207. if (!buf->while_locked && !dev->context_flag && !locked) {
  208. if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
  209. DRM_KERNEL_CONTEXT)) {
  210. DRM_ERROR("\n");
  211. }
  212. }
  213. cleanup:
  214. clear_bit(0, &dev->dma_flag);
  215. return retcode;
  216. }
  217. static void gamma_dma_timer_bh(unsigned long dev)
  218. {
  219. gamma_dma_schedule((drm_device_t *)dev, 0);
  220. }
  221. void gamma_irq_immediate_bh(void *dev)
  222. {
  223. gamma_dma_schedule(dev, 0);
  224. }
  225. int gamma_dma_schedule(drm_device_t *dev, int locked)
  226. {
  227. int next;
  228. drm_queue_t *q;
  229. drm_buf_t *buf;
  230. int retcode = 0;
  231. int processed = 0;
  232. int missed;
  233. int expire = 20;
  234. drm_device_dma_t *dma = dev->dma;
  235. if (test_and_set_bit(0, &dev->interrupt_flag)) {
  236. /* Not reentrant */
  237. atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
  238. return -EBUSY;
  239. }
  240. missed = atomic_read(&dev->counts[10]);
  241. again:
  242. if (dev->context_flag) {
  243. clear_bit(0, &dev->interrupt_flag);
  244. return -EBUSY;
  245. }
  246. if (dma->next_buffer) {
  247. /* Unsent buffer that was previously
  248. selected, but that couldn't be sent
  249. because the lock could not be obtained
  250. or the DMA engine wasn't ready. Try
  251. again. */
  252. if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
  253. } else {
  254. do {
  255. next = gamma_select_queue(dev, gamma_dma_timer_bh);
  256. if (next >= 0) {
  257. q = dev->queuelist[next];
  258. buf = gamma_waitlist_get(&q->waitlist);
  259. dma->next_buffer = buf;
  260. dma->next_queue = q;
  261. if (buf && buf->list == DRM_LIST_RECLAIM) {
  262. gamma_clear_next_buffer(dev);
  263. gamma_free_buffer(dev, buf);
  264. }
  265. }
  266. } while (next >= 0 && !dma->next_buffer);
  267. if (dma->next_buffer) {
  268. if (!(retcode = gamma_do_dma(dev, locked))) {
  269. ++processed;
  270. }
  271. }
  272. }
  273. if (--expire) {
  274. if (missed != atomic_read(&dev->counts[10])) {
  275. if (gamma_dma_is_ready(dev)) goto again;
  276. }
  277. if (processed && gamma_dma_is_ready(dev)) {
  278. processed = 0;
  279. goto again;
  280. }
  281. }
  282. clear_bit(0, &dev->interrupt_flag);
  283. return retcode;
  284. }
  285. static int gamma_dma_priority(struct file *filp,
  286. drm_device_t *dev, drm_dma_t *d)
  287. {
  288. unsigned long address;
  289. unsigned long length;
  290. int must_free = 0;
  291. int retcode = 0;
  292. int i;
  293. int idx;
  294. drm_buf_t *buf;
  295. drm_buf_t *last_buf = NULL;
  296. drm_device_dma_t *dma = dev->dma;
  297. int *send_indices = NULL;
  298. int *send_sizes = NULL;
  299. DECLARE_WAITQUEUE(entry, current);
  300. /* Turn off interrupt handling */
  301. while (test_and_set_bit(0, &dev->interrupt_flag)) {
  302. schedule();
  303. if (signal_pending(current)) return -EINTR;
  304. }
  305. if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
  306. while (!gamma_lock_take(&dev->lock.hw_lock->lock,
  307. DRM_KERNEL_CONTEXT)) {
  308. schedule();
  309. if (signal_pending(current)) {
  310. clear_bit(0, &dev->interrupt_flag);
  311. return -EINTR;
  312. }
  313. }
  314. ++must_free;
  315. }
  316. send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
  317. DRM_MEM_DRIVER);
  318. if (send_indices == NULL)
  319. return -ENOMEM;
  320. if (copy_from_user(send_indices, d->send_indices,
  321. d->send_count * sizeof(*send_indices))) {
  322. retcode = -EFAULT;
  323. goto cleanup;
  324. }
  325. send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
  326. DRM_MEM_DRIVER);
  327. if (send_sizes == NULL)
  328. return -ENOMEM;
  329. if (copy_from_user(send_sizes, d->send_sizes,
  330. d->send_count * sizeof(*send_sizes))) {
  331. retcode = -EFAULT;
  332. goto cleanup;
  333. }
  334. for (i = 0; i < d->send_count; i++) {
  335. idx = send_indices[i];
  336. if (idx < 0 || idx >= dma->buf_count) {
  337. DRM_ERROR("Index %d (of %d max)\n",
  338. send_indices[i], dma->buf_count - 1);
  339. continue;
  340. }
  341. buf = dma->buflist[ idx ];
  342. if (buf->filp != filp) {
  343. DRM_ERROR("Process %d using buffer not owned\n",
  344. current->pid);
  345. retcode = -EINVAL;
  346. goto cleanup;
  347. }
  348. if (buf->list != DRM_LIST_NONE) {
  349. DRM_ERROR("Process %d using buffer on list %d\n",
  350. current->pid, buf->list);
  351. retcode = -EINVAL;
  352. goto cleanup;
  353. }
  354. /* This isn't a race condition on
  355. buf->list, since our concern is the
  356. buffer reclaim during the time the
  357. process closes the /dev/drm? handle, so
  358. it can't also be doing DMA. */
  359. buf->list = DRM_LIST_PRIO;
  360. buf->used = send_sizes[i];
  361. buf->context = d->context;
  362. buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
  363. address = (unsigned long)buf->address;
  364. length = buf->used;
  365. if (!length) {
  366. DRM_ERROR("0 length buffer\n");
  367. }
  368. if (buf->pending) {
  369. DRM_ERROR("Sending pending buffer:"
  370. " buffer %d, offset %d\n",
  371. send_indices[i], i);
  372. retcode = -EINVAL;
  373. goto cleanup;
  374. }
  375. if (buf->waiting) {
  376. DRM_ERROR("Sending waiting buffer:"
  377. " buffer %d, offset %d\n",
  378. send_indices[i], i);
  379. retcode = -EINVAL;
  380. goto cleanup;
  381. }
  382. buf->pending = 1;
  383. if (dev->last_context != buf->context
  384. && !(dev->queuelist[buf->context]->flags
  385. & _DRM_CONTEXT_PRESERVED)) {
  386. add_wait_queue(&dev->context_wait, &entry);
  387. current->state = TASK_INTERRUPTIBLE;
  388. /* PRE: dev->last_context != buf->context */
  389. DRM(context_switch)(dev, dev->last_context,
  390. buf->context);
  391. /* POST: we will wait for the context
  392. switch and will dispatch on a later call
  393. when dev->last_context == buf->context.
  394. NOTE WE HOLD THE LOCK THROUGHOUT THIS
  395. TIME! */
  396. schedule();
  397. current->state = TASK_RUNNING;
  398. remove_wait_queue(&dev->context_wait, &entry);
  399. if (signal_pending(current)) {
  400. retcode = -EINTR;
  401. goto cleanup;
  402. }
  403. if (dev->last_context != buf->context) {
  404. DRM_ERROR("Context mismatch: %d %d\n",
  405. dev->last_context,
  406. buf->context);
  407. }
  408. }
  409. gamma_dma_dispatch(dev, address, length);
  410. atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
  411. atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
  412. if (last_buf) {
  413. gamma_free_buffer(dev, last_buf);
  414. }
  415. last_buf = buf;
  416. }
  417. cleanup:
  418. if (last_buf) {
  419. gamma_dma_ready(dev);
  420. gamma_free_buffer(dev, last_buf);
  421. }
  422. if (send_indices)
  423. DRM(free)(send_indices, d->send_count * sizeof(*send_indices),
  424. DRM_MEM_DRIVER);
  425. if (send_sizes)
  426. DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes),
  427. DRM_MEM_DRIVER);
  428. if (must_free && !dev->context_flag) {
  429. if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
  430. DRM_KERNEL_CONTEXT)) {
  431. DRM_ERROR("\n");
  432. }
  433. }
  434. clear_bit(0, &dev->interrupt_flag);
  435. return retcode;
  436. }
  437. static int gamma_dma_send_buffers(struct file *filp,
  438. drm_device_t *dev, drm_dma_t *d)
  439. {
  440. DECLARE_WAITQUEUE(entry, current);
  441. drm_buf_t *last_buf = NULL;
  442. int retcode = 0;
  443. drm_device_dma_t *dma = dev->dma;
  444. int send_index;
  445. if (get_user(send_index, &d->send_indices[d->send_count-1]))
  446. return -EFAULT;
  447. if (d->flags & _DRM_DMA_BLOCK) {
  448. last_buf = dma->buflist[send_index];
  449. add_wait_queue(&last_buf->dma_wait, &entry);
  450. }
  451. if ((retcode = gamma_dma_enqueue(filp, d))) {
  452. if (d->flags & _DRM_DMA_BLOCK)
  453. remove_wait_queue(&last_buf->dma_wait, &entry);
  454. return retcode;
  455. }
  456. gamma_dma_schedule(dev, 0);
  457. if (d->flags & _DRM_DMA_BLOCK) {
  458. DRM_DEBUG("%d waiting\n", current->pid);
  459. for (;;) {
  460. current->state = TASK_INTERRUPTIBLE;
  461. if (!last_buf->waiting && !last_buf->pending)
  462. break; /* finished */
  463. schedule();
  464. if (signal_pending(current)) {
  465. retcode = -EINTR; /* Can't restart */
  466. break;
  467. }
  468. }
  469. current->state = TASK_RUNNING;
  470. DRM_DEBUG("%d running\n", current->pid);
  471. remove_wait_queue(&last_buf->dma_wait, &entry);
  472. if (!retcode
  473. || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
  474. if (!waitqueue_active(&last_buf->dma_wait)) {
  475. gamma_free_buffer(dev, last_buf);
  476. }
  477. }
  478. if (retcode) {
  479. DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
  480. d->context,
  481. last_buf->waiting,
  482. last_buf->pending,
  483. (long)DRM_WAITCOUNT(dev, d->context),
  484. last_buf->idx,
  485. last_buf->list,
  486. current->pid);
  487. }
  488. }
  489. return retcode;
  490. }
  491. int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
  492. unsigned long arg)
  493. {
  494. drm_file_t *priv = filp->private_data;
  495. drm_device_t *dev = priv->dev;
  496. drm_device_dma_t *dma = dev->dma;
  497. int retcode = 0;
  498. drm_dma_t __user *argp = (void __user *)arg;
  499. drm_dma_t d;
  500. if (copy_from_user(&d, argp, sizeof(d)))
  501. return -EFAULT;
  502. if (d.send_count < 0 || d.send_count > dma->buf_count) {
  503. DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
  504. current->pid, d.send_count, dma->buf_count);
  505. return -EINVAL;
  506. }
  507. if (d.request_count < 0 || d.request_count > dma->buf_count) {
  508. DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
  509. current->pid, d.request_count, dma->buf_count);
  510. return -EINVAL;
  511. }
  512. if (d.send_count) {
  513. if (d.flags & _DRM_DMA_PRIORITY)
  514. retcode = gamma_dma_priority(filp, dev, &d);
  515. else
  516. retcode = gamma_dma_send_buffers(filp, dev, &d);
  517. }
  518. d.granted_count = 0;
  519. if (!retcode && d.request_count) {
  520. retcode = gamma_dma_get_buffers(filp, &d);
  521. }
  522. DRM_DEBUG("%d returning, granted = %d\n",
  523. current->pid, d.granted_count);
  524. if (copy_to_user(argp, &d, sizeof(d)))
  525. return -EFAULT;
  526. return retcode;
  527. }
  528. /* =============================================================
  529. * DMA initialization, cleanup
  530. */
  531. static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
  532. {
  533. drm_gamma_private_t *dev_priv;
  534. drm_device_dma_t *dma = dev->dma;
  535. drm_buf_t *buf;
  536. int i;
  537. struct list_head *list;
  538. unsigned long *pgt;
  539. DRM_DEBUG( "%s\n", __FUNCTION__ );
  540. dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
  541. DRM_MEM_DRIVER );
  542. if ( !dev_priv )
  543. return -ENOMEM;
  544. dev->dev_private = (void *)dev_priv;
  545. memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
  546. dev_priv->num_rast = init->num_rast;
  547. list_for_each(list, &dev->maplist->head) {
  548. drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
  549. if( r_list->map &&
  550. r_list->map->type == _DRM_SHM &&
  551. r_list->map->flags & _DRM_CONTAINS_LOCK ) {
  552. dev_priv->sarea = r_list->map;
  553. break;
  554. }
  555. }
  556. dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
  557. dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
  558. dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
  559. dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
  560. dev_priv->sarea_priv = (drm_gamma_sarea_t *)
  561. ((u8 *)dev_priv->sarea->handle +
  562. init->sarea_priv_offset);
  563. if (init->pcimode) {
  564. buf = dma->buflist[GLINT_DRI_BUF_COUNT];
  565. pgt = buf->address;
  566. for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
  567. buf = dma->buflist[i];
  568. *pgt = virt_to_phys((void*)buf->address) | 0x07;
  569. pgt++;
  570. }
  571. buf = dma->buflist[GLINT_DRI_BUF_COUNT];
  572. } else {
  573. dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
  574. drm_core_ioremap( dev->agp_buffer_map, dev);
  575. buf = dma->buflist[GLINT_DRI_BUF_COUNT];
  576. pgt = buf->address;
  577. for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
  578. buf = dma->buflist[i];
  579. *pgt = (unsigned long)buf->address + 0x07;
  580. pgt++;
  581. }
  582. buf = dma->buflist[GLINT_DRI_BUF_COUNT];
  583. while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
  584. GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
  585. }
  586. while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
  587. GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
  588. GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
  589. return 0;
  590. }
  591. int gamma_do_cleanup_dma( drm_device_t *dev )
  592. {
  593. DRM_DEBUG( "%s\n", __FUNCTION__ );
  594. /* Make sure interrupts are disabled here because the uninstall ioctl
  595. * may not have been called from userspace and after dev_private
  596. * is freed, it's too late.
  597. */
  598. if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  599. if ( dev->irq_enabled )
  600. DRM(irq_uninstall)(dev);
  601. if ( dev->dev_private ) {
  602. if ( dev->agp_buffer_map != NULL )
  603. drm_core_ioremapfree( dev->agp_buffer_map, dev );
  604. DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
  605. DRM_MEM_DRIVER );
  606. dev->dev_private = NULL;
  607. }
  608. return 0;
  609. }
  610. int gamma_dma_init( struct inode *inode, struct file *filp,
  611. unsigned int cmd, unsigned long arg )
  612. {
  613. drm_file_t *priv = filp->private_data;
  614. drm_device_t *dev = priv->dev;
  615. drm_gamma_init_t init;
  616. LOCK_TEST_WITH_RETURN( dev, filp );
  617. if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
  618. return -EFAULT;
  619. switch ( init.func ) {
  620. case GAMMA_INIT_DMA:
  621. return gamma_do_init_dma( dev, &init );
  622. case GAMMA_CLEANUP_DMA:
  623. return gamma_do_cleanup_dma( dev );
  624. }
  625. return -EINVAL;
  626. }
  627. static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
  628. {
  629. drm_device_dma_t *dma = dev->dma;
  630. unsigned int *screenbuf;
  631. DRM_DEBUG( "%s\n", __FUNCTION__ );
  632. /* We've DRM_RESTRICTED this DMA buffer */
  633. screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
  634. #if 0
  635. *buffer++ = 0x180; /* Tag (FilterMode) */
  636. *buffer++ = 0x200; /* Allow FBColor through */
  637. *buffer++ = 0x53B; /* Tag */
  638. *buffer++ = copy->Pitch;
  639. *buffer++ = 0x53A; /* Tag */
  640. *buffer++ = copy->SrcAddress;
  641. *buffer++ = 0x539; /* Tag */
  642. *buffer++ = copy->WidthHeight; /* Initiates transfer */
  643. *buffer++ = 0x53C; /* Tag - DMAOutputAddress */
  644. *buffer++ = virt_to_phys((void*)screenbuf);
  645. *buffer++ = 0x53D; /* Tag - DMAOutputCount */
  646. *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
  647. /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
  648. /* Now put it back to the screen */
  649. *buffer++ = 0x180; /* Tag (FilterMode) */
  650. *buffer++ = 0x400; /* Allow Sync through */
  651. *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
  652. *buffer++ = 0x155; /* FBSourceData | count */
  653. *buffer++ = 0x537; /* Tag */
  654. *buffer++ = copy->Pitch;
  655. *buffer++ = 0x536; /* Tag */
  656. *buffer++ = copy->DstAddress;
  657. *buffer++ = 0x535; /* Tag */
  658. *buffer++ = copy->WidthHeight; /* Initiates transfer */
  659. *buffer++ = 0x530; /* Tag - DMAAddr */
  660. *buffer++ = virt_to_phys((void*)screenbuf);
  661. *buffer++ = 0x531;
  662. *buffer++ = copy->Count; /* initiates DMA transfer of color data */
  663. #endif
  664. /* need to dispatch it now */
  665. return 0;
  666. }
  667. int gamma_dma_copy( struct inode *inode, struct file *filp,
  668. unsigned int cmd, unsigned long arg )
  669. {
  670. drm_file_t *priv = filp->private_data;
  671. drm_device_t *dev = priv->dev;
  672. drm_gamma_copy_t copy;
  673. if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
  674. return -EFAULT;
  675. return gamma_do_copy_dma( dev, &copy );
  676. }
  677. /* =============================================================
  678. * Per Context SAREA Support
  679. */
  680. int gamma_getsareactx(struct inode *inode, struct file *filp,
  681. unsigned int cmd, unsigned long arg)
  682. {
  683. drm_file_t *priv = filp->private_data;
  684. drm_device_t *dev = priv->dev;
  685. drm_ctx_priv_map_t __user *argp = (void __user *)arg;
  686. drm_ctx_priv_map_t request;
  687. drm_map_t *map;
  688. if (copy_from_user(&request, argp, sizeof(request)))
  689. return -EFAULT;
  690. down(&dev->struct_sem);
  691. if ((int)request.ctx_id >= dev->max_context) {
  692. up(&dev->struct_sem);
  693. return -EINVAL;
  694. }
  695. map = dev->context_sareas[request.ctx_id];
  696. up(&dev->struct_sem);
  697. request.handle = map->handle;
  698. if (copy_to_user(argp, &request, sizeof(request)))
  699. return -EFAULT;
  700. return 0;
  701. }
  702. int gamma_setsareactx(struct inode *inode, struct file *filp,
  703. unsigned int cmd, unsigned long arg)
  704. {
  705. drm_file_t *priv = filp->private_data;
  706. drm_device_t *dev = priv->dev;
  707. drm_ctx_priv_map_t request;
  708. drm_map_t *map = NULL;
  709. drm_map_list_t *r_list;
  710. struct list_head *list;
  711. if (copy_from_user(&request,
  712. (drm_ctx_priv_map_t __user *)arg,
  713. sizeof(request)))
  714. return -EFAULT;
  715. down(&dev->struct_sem);
  716. r_list = NULL;
  717. list_for_each(list, &dev->maplist->head) {
  718. r_list = list_entry(list, drm_map_list_t, head);
  719. if(r_list->map &&
  720. r_list->map->handle == request.handle) break;
  721. }
  722. if (list == &(dev->maplist->head)) {
  723. up(&dev->struct_sem);
  724. return -EINVAL;
  725. }
  726. map = r_list->map;
  727. up(&dev->struct_sem);
  728. if (!map) return -EINVAL;
  729. down(&dev->struct_sem);
  730. if ((int)request.ctx_id >= dev->max_context) {
  731. up(&dev->struct_sem);
  732. return -EINVAL;
  733. }
  734. dev->context_sareas[request.ctx_id] = map;
  735. up(&dev->struct_sem);
  736. return 0;
  737. }
  738. void gamma_driver_irq_preinstall( drm_device_t *dev ) {
  739. drm_gamma_private_t *dev_priv =
  740. (drm_gamma_private_t *)dev->dev_private;
  741. while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
  742. cpu_relax();
  743. GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
  744. GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
  745. }
  746. void gamma_driver_irq_postinstall( drm_device_t *dev ) {
  747. drm_gamma_private_t *dev_priv =
  748. (drm_gamma_private_t *)dev->dev_private;
  749. while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
  750. cpu_relax();
  751. GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
  752. GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
  753. GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
  754. }
  755. void gamma_driver_irq_uninstall( drm_device_t *dev ) {
  756. drm_gamma_private_t *dev_priv =
  757. (drm_gamma_private_t *)dev->dev_private;
  758. if (!dev_priv)
  759. return;
  760. while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
  761. cpu_relax();
  762. GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
  763. GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
  764. GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
  765. }
  766. extern drm_ioctl_desc_t DRM(ioctls)[];
  767. static int gamma_driver_preinit(drm_device_t *dev)
  768. {
  769. /* reset the finish ioctl */
  770. DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
  771. return 0;
  772. }
  773. static void gamma_driver_pretakedown(drm_device_t *dev)
  774. {
  775. gamma_do_cleanup_dma(dev);
  776. }
  777. static void gamma_driver_dma_ready(drm_device_t *dev)
  778. {
  779. gamma_dma_ready(dev);
  780. }
  781. static int gamma_driver_dma_quiescent(drm_device_t *dev)
  782. {
  783. drm_gamma_private_t *dev_priv = (
  784. drm_gamma_private_t *)dev->dev_private;
  785. if (dev_priv->num_rast == 2)
  786. gamma_dma_quiescent_dual(dev);
  787. else gamma_dma_quiescent_single(dev);
  788. return 0;
  789. }
  790. void gamma_driver_register_fns(drm_device_t *dev)
  791. {
  792. dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
  793. DRM(fops).read = gamma_fops_read;
  794. DRM(fops).poll = gamma_fops_poll;
  795. dev->driver.preinit = gamma_driver_preinit;
  796. dev->driver.pretakedown = gamma_driver_pretakedown;
  797. dev->driver.dma_ready = gamma_driver_dma_ready;
  798. dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
  799. dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
  800. dev->driver.dma_flush_unblock = gamma_flush_unblock;
  801. }