gamma_context.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. /* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
  2. * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
  3. *
  4. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  5. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the next
  16. * paragraph) shall be included in all copies or substantial portions of the
  17. * Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25. * OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. * Authors:
  28. * Rickard E. (Rik) Faith <faith@valinux.com>
  29. * Gareth Hughes <gareth@valinux.com>
  30. * ChangeLog:
  31. * 2001-11-16 Torsten Duwe <duwe@caldera.de>
  32. * added context constructor/destructor hooks,
  33. * needed by SiS driver's memory management.
  34. */
  35. /* ================================================================
  36. * Old-style context support -- only used by gamma.
  37. */
  38. /* The drm_read and drm_write_string code (especially that which manages
  39. the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
  40. DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
  41. ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
  42. {
  43. drm_file_t *priv = filp->private_data;
  44. drm_device_t *dev = priv->dev;
  45. int left;
  46. int avail;
  47. int send;
  48. int cur;
  49. DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
  50. while (dev->buf_rp == dev->buf_wp) {
  51. DRM_DEBUG(" sleeping\n");
  52. if (filp->f_flags & O_NONBLOCK) {
  53. return -EAGAIN;
  54. }
  55. interruptible_sleep_on(&dev->buf_readers);
  56. if (signal_pending(current)) {
  57. DRM_DEBUG(" interrupted\n");
  58. return -ERESTARTSYS;
  59. }
  60. DRM_DEBUG(" awake\n");
  61. }
  62. left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
  63. avail = DRM_BSZ - left;
  64. send = DRM_MIN(avail, count);
  65. while (send) {
  66. if (dev->buf_wp > dev->buf_rp) {
  67. cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
  68. } else {
  69. cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
  70. }
  71. if (copy_to_user(buf, dev->buf_rp, cur))
  72. return -EFAULT;
  73. dev->buf_rp += cur;
  74. if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
  75. send -= cur;
  76. }
  77. wake_up_interruptible(&dev->buf_writers);
  78. return DRM_MIN(avail, count);
  79. }
  80. /* In an incredibly convoluted setup, the kernel module actually calls
  81. * back into the X server to perform context switches on behalf of the
  82. * 3d clients.
  83. */
  84. int DRM(write_string)(drm_device_t *dev, const char *s)
  85. {
  86. int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
  87. int send = strlen(s);
  88. int count;
  89. DRM_DEBUG("%d left, %d to send (%p, %p)\n",
  90. left, send, dev->buf_rp, dev->buf_wp);
  91. if (left == 1 || dev->buf_wp != dev->buf_rp) {
  92. DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
  93. left,
  94. dev->buf_wp,
  95. dev->buf_rp);
  96. }
  97. while (send) {
  98. if (dev->buf_wp >= dev->buf_rp) {
  99. count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
  100. if (count == left) --count; /* Leave a hole */
  101. } else {
  102. count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
  103. }
  104. strncpy(dev->buf_wp, s, count);
  105. dev->buf_wp += count;
  106. if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
  107. send -= count;
  108. }
  109. if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
  110. DRM_DEBUG("waking\n");
  111. wake_up_interruptible(&dev->buf_readers);
  112. return 0;
  113. }
  114. unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
  115. {
  116. drm_file_t *priv = filp->private_data;
  117. drm_device_t *dev = priv->dev;
  118. poll_wait(filp, &dev->buf_readers, wait);
  119. if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
  120. return 0;
  121. }
  122. int DRM(context_switch)(drm_device_t *dev, int old, int new)
  123. {
  124. char buf[64];
  125. drm_queue_t *q;
  126. if (test_and_set_bit(0, &dev->context_flag)) {
  127. DRM_ERROR("Reentering -- FIXME\n");
  128. return -EBUSY;
  129. }
  130. DRM_DEBUG("Context switch from %d to %d\n", old, new);
  131. if (new >= dev->queue_count) {
  132. clear_bit(0, &dev->context_flag);
  133. return -EINVAL;
  134. }
  135. if (new == dev->last_context) {
  136. clear_bit(0, &dev->context_flag);
  137. return 0;
  138. }
  139. q = dev->queuelist[new];
  140. atomic_inc(&q->use_count);
  141. if (atomic_read(&q->use_count) == 1) {
  142. atomic_dec(&q->use_count);
  143. clear_bit(0, &dev->context_flag);
  144. return -EINVAL;
  145. }
  146. /* This causes the X server to wake up & do a bunch of hardware
  147. * interaction to actually effect the context switch.
  148. */
  149. sprintf(buf, "C %d %d\n", old, new);
  150. DRM(write_string)(dev, buf);
  151. atomic_dec(&q->use_count);
  152. return 0;
  153. }
  154. int DRM(context_switch_complete)(drm_device_t *dev, int new)
  155. {
  156. drm_device_dma_t *dma = dev->dma;
  157. dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
  158. dev->last_switch = jiffies;
  159. if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
  160. DRM_ERROR("Lock isn't held after context switch\n");
  161. }
  162. if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
  163. if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
  164. DRM_KERNEL_CONTEXT)) {
  165. DRM_ERROR("Cannot free lock\n");
  166. }
  167. }
  168. clear_bit(0, &dev->context_flag);
  169. wake_up_interruptible(&dev->context_wait);
  170. return 0;
  171. }
  172. static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
  173. {
  174. DRM_DEBUG("\n");
  175. if (atomic_read(&q->use_count) != 1
  176. || atomic_read(&q->finalization)
  177. || atomic_read(&q->block_count)) {
  178. DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
  179. atomic_read(&q->use_count),
  180. atomic_read(&q->finalization),
  181. atomic_read(&q->block_count));
  182. }
  183. atomic_set(&q->finalization, 0);
  184. atomic_set(&q->block_count, 0);
  185. atomic_set(&q->block_read, 0);
  186. atomic_set(&q->block_write, 0);
  187. atomic_set(&q->total_queued, 0);
  188. atomic_set(&q->total_flushed, 0);
  189. atomic_set(&q->total_locks, 0);
  190. init_waitqueue_head(&q->write_queue);
  191. init_waitqueue_head(&q->read_queue);
  192. init_waitqueue_head(&q->flush_queue);
  193. q->flags = ctx->flags;
  194. DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
  195. return 0;
  196. }
  197. /* drm_alloc_queue:
  198. PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
  199. disappear (so all deallocation must be done after IOCTLs are off)
  200. 2) dev->queue_count < dev->queue_slots
  201. 3) dev->queuelist[i].use_count == 0 and
  202. dev->queuelist[i].finalization == 0 if i not in use
  203. POST: 1) dev->queuelist[i].use_count == 1
  204. 2) dev->queue_count < dev->queue_slots */
  205. static int DRM(alloc_queue)(drm_device_t *dev)
  206. {
  207. int i;
  208. drm_queue_t *queue;
  209. int oldslots;
  210. int newslots;
  211. /* Check for a free queue */
  212. for (i = 0; i < dev->queue_count; i++) {
  213. atomic_inc(&dev->queuelist[i]->use_count);
  214. if (atomic_read(&dev->queuelist[i]->use_count) == 1
  215. && !atomic_read(&dev->queuelist[i]->finalization)) {
  216. DRM_DEBUG("%d (free)\n", i);
  217. return i;
  218. }
  219. atomic_dec(&dev->queuelist[i]->use_count);
  220. }
  221. /* Allocate a new queue */
  222. down(&dev->struct_sem);
  223. queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
  224. memset(queue, 0, sizeof(*queue));
  225. atomic_set(&queue->use_count, 1);
  226. ++dev->queue_count;
  227. if (dev->queue_count >= dev->queue_slots) {
  228. oldslots = dev->queue_slots * sizeof(*dev->queuelist);
  229. if (!dev->queue_slots) dev->queue_slots = 1;
  230. dev->queue_slots *= 2;
  231. newslots = dev->queue_slots * sizeof(*dev->queuelist);
  232. dev->queuelist = DRM(realloc)(dev->queuelist,
  233. oldslots,
  234. newslots,
  235. DRM_MEM_QUEUES);
  236. if (!dev->queuelist) {
  237. up(&dev->struct_sem);
  238. DRM_DEBUG("out of memory\n");
  239. return -ENOMEM;
  240. }
  241. }
  242. dev->queuelist[dev->queue_count-1] = queue;
  243. up(&dev->struct_sem);
  244. DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
  245. return dev->queue_count - 1;
  246. }
  247. int DRM(resctx)(struct inode *inode, struct file *filp,
  248. unsigned int cmd, unsigned long arg)
  249. {
  250. drm_ctx_res_t __user *argp = (void __user *)arg;
  251. drm_ctx_res_t res;
  252. drm_ctx_t ctx;
  253. int i;
  254. DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
  255. if (copy_from_user(&res, argp, sizeof(res)))
  256. return -EFAULT;
  257. if (res.count >= DRM_RESERVED_CONTEXTS) {
  258. memset(&ctx, 0, sizeof(ctx));
  259. for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
  260. ctx.handle = i;
  261. if (copy_to_user(&res.contexts[i],
  262. &i,
  263. sizeof(i)))
  264. return -EFAULT;
  265. }
  266. }
  267. res.count = DRM_RESERVED_CONTEXTS;
  268. if (copy_to_user(argp, &res, sizeof(res)))
  269. return -EFAULT;
  270. return 0;
  271. }
  272. int DRM(addctx)(struct inode *inode, struct file *filp,
  273. unsigned int cmd, unsigned long arg)
  274. {
  275. drm_file_t *priv = filp->private_data;
  276. drm_device_t *dev = priv->dev;
  277. drm_ctx_t ctx;
  278. drm_ctx_t __user *argp = (void __user *)arg;
  279. if (copy_from_user(&ctx, argp, sizeof(ctx)))
  280. return -EFAULT;
  281. if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
  282. /* Init kernel's context and get a new one. */
  283. DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
  284. ctx.handle = DRM(alloc_queue)(dev);
  285. }
  286. DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
  287. DRM_DEBUG("%d\n", ctx.handle);
  288. if (copy_to_user(argp, &ctx, sizeof(ctx)))
  289. return -EFAULT;
  290. return 0;
  291. }
  292. int DRM(modctx)(struct inode *inode, struct file *filp,
  293. unsigned int cmd, unsigned long arg)
  294. {
  295. drm_file_t *priv = filp->private_data;
  296. drm_device_t *dev = priv->dev;
  297. drm_ctx_t ctx;
  298. drm_queue_t *q;
  299. if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
  300. return -EFAULT;
  301. DRM_DEBUG("%d\n", ctx.handle);
  302. if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
  303. q = dev->queuelist[ctx.handle];
  304. atomic_inc(&q->use_count);
  305. if (atomic_read(&q->use_count) == 1) {
  306. /* No longer in use */
  307. atomic_dec(&q->use_count);
  308. return -EINVAL;
  309. }
  310. if (DRM_BUFCOUNT(&q->waitlist)) {
  311. atomic_dec(&q->use_count);
  312. return -EBUSY;
  313. }
  314. q->flags = ctx.flags;
  315. atomic_dec(&q->use_count);
  316. return 0;
  317. }
  318. int DRM(getctx)(struct inode *inode, struct file *filp,
  319. unsigned int cmd, unsigned long arg)
  320. {
  321. drm_file_t *priv = filp->private_data;
  322. drm_device_t *dev = priv->dev;
  323. drm_ctx_t __user *argp = (void __user *)arg;
  324. drm_ctx_t ctx;
  325. drm_queue_t *q;
  326. if (copy_from_user(&ctx, argp, sizeof(ctx)))
  327. return -EFAULT;
  328. DRM_DEBUG("%d\n", ctx.handle);
  329. if (ctx.handle >= dev->queue_count) return -EINVAL;
  330. q = dev->queuelist[ctx.handle];
  331. atomic_inc(&q->use_count);
  332. if (atomic_read(&q->use_count) == 1) {
  333. /* No longer in use */
  334. atomic_dec(&q->use_count);
  335. return -EINVAL;
  336. }
  337. ctx.flags = q->flags;
  338. atomic_dec(&q->use_count);
  339. if (copy_to_user(argp, &ctx, sizeof(ctx)))
  340. return -EFAULT;
  341. return 0;
  342. }
  343. int DRM(switchctx)(struct inode *inode, struct file *filp,
  344. unsigned int cmd, unsigned long arg)
  345. {
  346. drm_file_t *priv = filp->private_data;
  347. drm_device_t *dev = priv->dev;
  348. drm_ctx_t ctx;
  349. if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
  350. return -EFAULT;
  351. DRM_DEBUG("%d\n", ctx.handle);
  352. return DRM(context_switch)(dev, dev->last_context, ctx.handle);
  353. }
  354. int DRM(newctx)(struct inode *inode, struct file *filp,
  355. unsigned int cmd, unsigned long arg)
  356. {
  357. drm_file_t *priv = filp->private_data;
  358. drm_device_t *dev = priv->dev;
  359. drm_ctx_t ctx;
  360. if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
  361. return -EFAULT;
  362. DRM_DEBUG("%d\n", ctx.handle);
  363. DRM(context_switch_complete)(dev, ctx.handle);
  364. return 0;
  365. }
  366. int DRM(rmctx)(struct inode *inode, struct file *filp,
  367. unsigned int cmd, unsigned long arg)
  368. {
  369. drm_file_t *priv = filp->private_data;
  370. drm_device_t *dev = priv->dev;
  371. drm_ctx_t ctx;
  372. drm_queue_t *q;
  373. drm_buf_t *buf;
  374. if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
  375. return -EFAULT;
  376. DRM_DEBUG("%d\n", ctx.handle);
  377. if (ctx.handle >= dev->queue_count) return -EINVAL;
  378. q = dev->queuelist[ctx.handle];
  379. atomic_inc(&q->use_count);
  380. if (atomic_read(&q->use_count) == 1) {
  381. /* No longer in use */
  382. atomic_dec(&q->use_count);
  383. return -EINVAL;
  384. }
  385. atomic_inc(&q->finalization); /* Mark queue in finalization state */
  386. atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
  387. finalization) */
  388. while (test_and_set_bit(0, &dev->interrupt_flag)) {
  389. schedule();
  390. if (signal_pending(current)) {
  391. clear_bit(0, &dev->interrupt_flag);
  392. return -EINTR;
  393. }
  394. }
  395. /* Remove queued buffers */
  396. while ((buf = DRM(waitlist_get)(&q->waitlist))) {
  397. DRM(free_buffer)(dev, buf);
  398. }
  399. clear_bit(0, &dev->interrupt_flag);
  400. /* Wakeup blocked processes */
  401. wake_up_interruptible(&q->read_queue);
  402. wake_up_interruptible(&q->write_queue);
  403. wake_up_interruptible(&q->flush_queue);
  404. /* Finalization over. Queue is made
  405. available when both use_count and
  406. finalization become 0, which won't
  407. happen until all the waiting processes
  408. stop waiting. */
  409. atomic_dec(&q->finalization);
  410. return 0;
  411. }