gamma_old_dma.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
  2. * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
  3. *
  4. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  5. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the next
  16. * paragraph) shall be included in all copies or substantial portions of the
  17. * Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25. * OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. * Authors:
  28. * Rickard E. (Rik) Faith <faith@valinux.com>
  29. * Gareth Hughes <gareth@valinux.com>
  30. */
  31. /* Gamma-specific code pulled from drm_dma.h:
  32. */
  33. void DRM(clear_next_buffer)(drm_device_t *dev)
  34. {
  35. drm_device_dma_t *dma = dev->dma;
  36. dma->next_buffer = NULL;
  37. if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
  38. wake_up_interruptible(&dma->next_queue->flush_queue);
  39. }
  40. dma->next_queue = NULL;
  41. }
  42. int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
  43. {
  44. int i;
  45. int candidate = -1;
  46. int j = jiffies;
  47. if (!dev) {
  48. DRM_ERROR("No device\n");
  49. return -1;
  50. }
  51. if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
  52. /* This only happens between the time the
  53. interrupt is initialized and the time
  54. the queues are initialized. */
  55. return -1;
  56. }
  57. /* Doing "while locked" DMA? */
  58. if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
  59. return DRM_KERNEL_CONTEXT;
  60. }
  61. /* If there are buffers on the last_context
  62. queue, and we have not been executing
  63. this context very long, continue to
  64. execute this context. */
  65. if (dev->last_switch <= j
  66. && dev->last_switch + DRM_TIME_SLICE > j
  67. && DRM_WAITCOUNT(dev, dev->last_context)) {
  68. return dev->last_context;
  69. }
  70. /* Otherwise, find a candidate */
  71. for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
  72. if (DRM_WAITCOUNT(dev, i)) {
  73. candidate = dev->last_checked = i;
  74. break;
  75. }
  76. }
  77. if (candidate < 0) {
  78. for (i = 0; i < dev->queue_count; i++) {
  79. if (DRM_WAITCOUNT(dev, i)) {
  80. candidate = dev->last_checked = i;
  81. break;
  82. }
  83. }
  84. }
  85. if (wrapper
  86. && candidate >= 0
  87. && candidate != dev->last_context
  88. && dev->last_switch <= j
  89. && dev->last_switch + DRM_TIME_SLICE > j) {
  90. if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
  91. del_timer(&dev->timer);
  92. dev->timer.function = wrapper;
  93. dev->timer.data = (unsigned long)dev;
  94. dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
  95. add_timer(&dev->timer);
  96. }
  97. return -1;
  98. }
  99. return candidate;
  100. }
  101. int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
  102. {
  103. drm_file_t *priv = filp->private_data;
  104. drm_device_t *dev = priv->dev;
  105. int i;
  106. drm_queue_t *q;
  107. drm_buf_t *buf;
  108. int idx;
  109. int while_locked = 0;
  110. drm_device_dma_t *dma = dev->dma;
  111. int *ind;
  112. int err;
  113. DECLARE_WAITQUEUE(entry, current);
  114. DRM_DEBUG("%d\n", d->send_count);
  115. if (d->flags & _DRM_DMA_WHILE_LOCKED) {
  116. int context = dev->lock.hw_lock->lock;
  117. if (!_DRM_LOCK_IS_HELD(context)) {
  118. DRM_ERROR("No lock held during \"while locked\""
  119. " request\n");
  120. return -EINVAL;
  121. }
  122. if (d->context != _DRM_LOCKING_CONTEXT(context)
  123. && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
  124. DRM_ERROR("Lock held by %d while %d makes"
  125. " \"while locked\" request\n",
  126. _DRM_LOCKING_CONTEXT(context),
  127. d->context);
  128. return -EINVAL;
  129. }
  130. q = dev->queuelist[DRM_KERNEL_CONTEXT];
  131. while_locked = 1;
  132. } else {
  133. q = dev->queuelist[d->context];
  134. }
  135. atomic_inc(&q->use_count);
  136. if (atomic_read(&q->block_write)) {
  137. add_wait_queue(&q->write_queue, &entry);
  138. atomic_inc(&q->block_count);
  139. for (;;) {
  140. current->state = TASK_INTERRUPTIBLE;
  141. if (!atomic_read(&q->block_write)) break;
  142. schedule();
  143. if (signal_pending(current)) {
  144. atomic_dec(&q->use_count);
  145. remove_wait_queue(&q->write_queue, &entry);
  146. return -EINTR;
  147. }
  148. }
  149. atomic_dec(&q->block_count);
  150. current->state = TASK_RUNNING;
  151. remove_wait_queue(&q->write_queue, &entry);
  152. }
  153. ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
  154. if (!ind)
  155. return -ENOMEM;
  156. if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
  157. err = -EFAULT;
  158. goto out;
  159. }
  160. err = -EINVAL;
  161. for (i = 0; i < d->send_count; i++) {
  162. idx = ind[i];
  163. if (idx < 0 || idx >= dma->buf_count) {
  164. DRM_ERROR("Index %d (of %d max)\n",
  165. ind[i], dma->buf_count - 1);
  166. goto out;
  167. }
  168. buf = dma->buflist[ idx ];
  169. if (buf->filp != filp) {
  170. DRM_ERROR("Process %d using buffer not owned\n",
  171. current->pid);
  172. goto out;
  173. }
  174. if (buf->list != DRM_LIST_NONE) {
  175. DRM_ERROR("Process %d using buffer %d on list %d\n",
  176. current->pid, buf->idx, buf->list);
  177. goto out;
  178. }
  179. buf->used = ind[i];
  180. buf->while_locked = while_locked;
  181. buf->context = d->context;
  182. if (!buf->used) {
  183. DRM_ERROR("Queueing 0 length buffer\n");
  184. }
  185. if (buf->pending) {
  186. DRM_ERROR("Queueing pending buffer:"
  187. " buffer %d, offset %d\n",
  188. ind[i], i);
  189. goto out;
  190. }
  191. if (buf->waiting) {
  192. DRM_ERROR("Queueing waiting buffer:"
  193. " buffer %d, offset %d\n",
  194. ind[i], i);
  195. goto out;
  196. }
  197. buf->waiting = 1;
  198. if (atomic_read(&q->use_count) == 1
  199. || atomic_read(&q->finalization)) {
  200. DRM(free_buffer)(dev, buf);
  201. } else {
  202. DRM(waitlist_put)(&q->waitlist, buf);
  203. atomic_inc(&q->total_queued);
  204. }
  205. }
  206. atomic_dec(&q->use_count);
  207. return 0;
  208. out:
  209. DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
  210. atomic_dec(&q->use_count);
  211. return err;
  212. }
  213. static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
  214. int order)
  215. {
  216. drm_file_t *priv = filp->private_data;
  217. drm_device_t *dev = priv->dev;
  218. int i;
  219. drm_buf_t *buf;
  220. drm_device_dma_t *dma = dev->dma;
  221. for (i = d->granted_count; i < d->request_count; i++) {
  222. buf = DRM(freelist_get)(&dma->bufs[order].freelist,
  223. d->flags & _DRM_DMA_WAIT);
  224. if (!buf) break;
  225. if (buf->pending || buf->waiting) {
  226. DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
  227. buf->idx,
  228. buf->filp,
  229. buf->waiting,
  230. buf->pending);
  231. }
  232. buf->filp = filp;
  233. if (copy_to_user(&d->request_indices[i],
  234. &buf->idx,
  235. sizeof(buf->idx)))
  236. return -EFAULT;
  237. if (copy_to_user(&d->request_sizes[i],
  238. &buf->total,
  239. sizeof(buf->total)))
  240. return -EFAULT;
  241. ++d->granted_count;
  242. }
  243. return 0;
  244. }
  245. int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
  246. {
  247. int order;
  248. int retcode = 0;
  249. int tmp_order;
  250. order = DRM(order)(dma->request_size);
  251. dma->granted_count = 0;
  252. retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
  253. if (dma->granted_count < dma->request_count
  254. && (dma->flags & _DRM_DMA_SMALLER_OK)) {
  255. for (tmp_order = order - 1;
  256. !retcode
  257. && dma->granted_count < dma->request_count
  258. && tmp_order >= DRM_MIN_ORDER;
  259. --tmp_order) {
  260. retcode = DRM(dma_get_buffers_of_order)(filp, dma,
  261. tmp_order);
  262. }
  263. }
  264. if (dma->granted_count < dma->request_count
  265. && (dma->flags & _DRM_DMA_LARGER_OK)) {
  266. for (tmp_order = order + 1;
  267. !retcode
  268. && dma->granted_count < dma->request_count
  269. && tmp_order <= DRM_MAX_ORDER;
  270. ++tmp_order) {
  271. retcode = DRM(dma_get_buffers_of_order)(filp, dma,
  272. tmp_order);
  273. }
  274. }
  275. return 0;
  276. }