drm_irq.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /**
  2. * \file drm_irq.c
  3. * IRQ support
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
  10. *
  11. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include "drmP.h"
  35. #include <linux/interrupt.h> /* For task queue support */
  36. /**
  37. * Get interrupt from bus id.
  38. *
  39. * \param inode device inode.
  40. * \param file_priv DRM file private.
  41. * \param cmd command.
  42. * \param arg user argument, pointing to a drm_irq_busid structure.
  43. * \return zero on success or a negative number on failure.
  44. *
  45. * Finds the PCI device with the specified bus id and gets its IRQ number.
  46. * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
  47. * to that of the device that this DRM instance attached to.
  48. */
  49. int drm_irq_by_busid(struct drm_device *dev, void *data,
  50. struct drm_file *file_priv)
  51. {
  52. struct drm_irq_busid *p = data;
  53. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  54. return -EINVAL;
  55. if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
  56. (p->busnum & 0xff) != dev->pdev->bus->number ||
  57. p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
  58. return -EINVAL;
  59. p->irq = dev->irq;
  60. DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
  61. p->irq);
  62. return 0;
  63. }
  64. static void vblank_disable_fn(unsigned long arg)
  65. {
  66. struct drm_device *dev = (struct drm_device *)arg;
  67. unsigned long irqflags;
  68. int i;
  69. for (i = 0; i < dev->num_crtcs; i++) {
  70. spin_lock_irqsave(&dev->vbl_lock, irqflags);
  71. if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
  72. dev->vblank_enabled[i]) {
  73. dev->driver->disable_vblank(dev, i);
  74. dev->vblank_enabled[i] = 0;
  75. }
  76. spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  77. }
  78. }
  79. static void drm_vblank_cleanup(struct drm_device *dev)
  80. {
  81. /* Bail if the driver didn't call drm_vblank_init() */
  82. if (dev->num_crtcs == 0)
  83. return;
  84. del_timer(&dev->vblank_disable_timer);
  85. vblank_disable_fn((unsigned long)dev);
  86. drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
  87. DRM_MEM_DRIVER);
  88. drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
  89. DRM_MEM_DRIVER);
  90. drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
  91. dev->num_crtcs, DRM_MEM_DRIVER);
  92. drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
  93. dev->num_crtcs, DRM_MEM_DRIVER);
  94. drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
  95. dev->num_crtcs, DRM_MEM_DRIVER);
  96. drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
  97. DRM_MEM_DRIVER);
  98. drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
  99. dev->num_crtcs, DRM_MEM_DRIVER);
  100. drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
  101. DRM_MEM_DRIVER);
  102. dev->num_crtcs = 0;
  103. }
  104. int drm_vblank_init(struct drm_device *dev, int num_crtcs)
  105. {
  106. int i, ret = -ENOMEM;
  107. setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
  108. (unsigned long)dev);
  109. spin_lock_init(&dev->vbl_lock);
  110. atomic_set(&dev->vbl_signal_pending, 0);
  111. dev->num_crtcs = num_crtcs;
  112. dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
  113. DRM_MEM_DRIVER);
  114. if (!dev->vbl_queue)
  115. goto err;
  116. dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
  117. DRM_MEM_DRIVER);
  118. if (!dev->vbl_sigs)
  119. goto err;
  120. dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
  121. DRM_MEM_DRIVER);
  122. if (!dev->_vblank_count)
  123. goto err;
  124. dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
  125. DRM_MEM_DRIVER);
  126. if (!dev->vblank_refcount)
  127. goto err;
  128. dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
  129. DRM_MEM_DRIVER);
  130. if (!dev->vblank_enabled)
  131. goto err;
  132. dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
  133. if (!dev->last_vblank)
  134. goto err;
  135. dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
  136. DRM_MEM_DRIVER);
  137. if (!dev->vblank_premodeset)
  138. goto err;
  139. dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
  140. if (!dev->vblank_offset)
  141. goto err;
  142. /* Zero per-crtc vblank stuff */
  143. for (i = 0; i < num_crtcs; i++) {
  144. init_waitqueue_head(&dev->vbl_queue[i]);
  145. INIT_LIST_HEAD(&dev->vbl_sigs[i]);
  146. atomic_set(&dev->_vblank_count[i], 0);
  147. atomic_set(&dev->vblank_refcount[i], 0);
  148. }
  149. return 0;
  150. err:
  151. drm_vblank_cleanup(dev);
  152. return ret;
  153. }
  154. EXPORT_SYMBOL(drm_vblank_init);
  155. /**
  156. * Install IRQ handler.
  157. *
  158. * \param dev DRM device.
  159. * \param irq IRQ number.
  160. *
  161. * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
  162. * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
  163. * before and after the installation.
  164. */
  165. static int drm_irq_install(struct drm_device * dev)
  166. {
  167. int ret;
  168. unsigned long sh_flags = 0;
  169. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  170. return -EINVAL;
  171. if (dev->irq == 0)
  172. return -EINVAL;
  173. mutex_lock(&dev->struct_mutex);
  174. /* Driver must have been initialized */
  175. if (!dev->dev_private) {
  176. mutex_unlock(&dev->struct_mutex);
  177. return -EINVAL;
  178. }
  179. if (dev->irq_enabled) {
  180. mutex_unlock(&dev->struct_mutex);
  181. return -EBUSY;
  182. }
  183. dev->irq_enabled = 1;
  184. mutex_unlock(&dev->struct_mutex);
  185. DRM_DEBUG("irq=%d\n", dev->irq);
  186. /* Before installing handler */
  187. dev->driver->irq_preinstall(dev);
  188. /* Install handler */
  189. if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
  190. sh_flags = IRQF_SHARED;
  191. ret = request_irq(dev->irq, dev->driver->irq_handler,
  192. sh_flags, dev->devname, dev);
  193. if (ret < 0) {
  194. mutex_lock(&dev->struct_mutex);
  195. dev->irq_enabled = 0;
  196. mutex_unlock(&dev->struct_mutex);
  197. return ret;
  198. }
  199. /* After installing handler */
  200. ret = dev->driver->irq_postinstall(dev);
  201. if (ret < 0) {
  202. mutex_lock(&dev->struct_mutex);
  203. dev->irq_enabled = 0;
  204. mutex_unlock(&dev->struct_mutex);
  205. }
  206. return ret;
  207. }
  208. /**
  209. * Uninstall the IRQ handler.
  210. *
  211. * \param dev DRM device.
  212. *
  213. * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
  214. */
  215. int drm_irq_uninstall(struct drm_device * dev)
  216. {
  217. int irq_enabled;
  218. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  219. return -EINVAL;
  220. mutex_lock(&dev->struct_mutex);
  221. irq_enabled = dev->irq_enabled;
  222. dev->irq_enabled = 0;
  223. mutex_unlock(&dev->struct_mutex);
  224. if (!irq_enabled)
  225. return -EINVAL;
  226. DRM_DEBUG("irq=%d\n", dev->irq);
  227. dev->driver->irq_uninstall(dev);
  228. free_irq(dev->irq, dev);
  229. drm_vblank_cleanup(dev);
  230. dev->locked_tasklet_func = NULL;
  231. return 0;
  232. }
  233. EXPORT_SYMBOL(drm_irq_uninstall);
  234. /**
  235. * IRQ control ioctl.
  236. *
  237. * \param inode device inode.
  238. * \param file_priv DRM file private.
  239. * \param cmd command.
  240. * \param arg user argument, pointing to a drm_control structure.
  241. * \return zero on success or a negative number on failure.
  242. *
  243. * Calls irq_install() or irq_uninstall() according to \p arg.
  244. */
  245. int drm_control(struct drm_device *dev, void *data,
  246. struct drm_file *file_priv)
  247. {
  248. struct drm_control *ctl = data;
  249. /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
  250. switch (ctl->func) {
  251. case DRM_INST_HANDLER:
  252. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  253. return 0;
  254. if (dev->if_version < DRM_IF_VERSION(1, 2) &&
  255. ctl->irq != dev->irq)
  256. return -EINVAL;
  257. return drm_irq_install(dev);
  258. case DRM_UNINST_HANDLER:
  259. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  260. return 0;
  261. return drm_irq_uninstall(dev);
  262. default:
  263. return -EINVAL;
  264. }
  265. }
  266. /**
  267. * drm_vblank_count - retrieve "cooked" vblank counter value
  268. * @dev: DRM device
  269. * @crtc: which counter to retrieve
  270. *
  271. * Fetches the "cooked" vblank count value that represents the number of
  272. * vblank events since the system was booted, including lost events due to
  273. * modesetting activity.
  274. */
  275. u32 drm_vblank_count(struct drm_device *dev, int crtc)
  276. {
  277. return atomic_read(&dev->_vblank_count[crtc]) +
  278. dev->vblank_offset[crtc];
  279. }
  280. EXPORT_SYMBOL(drm_vblank_count);
  281. /**
  282. * drm_update_vblank_count - update the master vblank counter
  283. * @dev: DRM device
  284. * @crtc: counter to update
  285. *
  286. * Call back into the driver to update the appropriate vblank counter
  287. * (specified by @crtc). Deal with wraparound, if it occurred, and
  288. * update the last read value so we can deal with wraparound on the next
  289. * call if necessary.
  290. */
  291. void drm_update_vblank_count(struct drm_device *dev, int crtc)
  292. {
  293. unsigned long irqflags;
  294. u32 cur_vblank, diff;
  295. /*
  296. * Interrupts were disabled prior to this call, so deal with counter
  297. * wrap if needed.
  298. * NOTE! It's possible we lost a full dev->max_vblank_count events
  299. * here if the register is small or we had vblank interrupts off for
  300. * a long time.
  301. */
  302. cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
  303. spin_lock_irqsave(&dev->vbl_lock, irqflags);
  304. if (cur_vblank < dev->last_vblank[crtc]) {
  305. diff = dev->max_vblank_count -
  306. dev->last_vblank[crtc];
  307. diff += cur_vblank;
  308. } else {
  309. diff = cur_vblank - dev->last_vblank[crtc];
  310. }
  311. dev->last_vblank[crtc] = cur_vblank;
  312. spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  313. atomic_add(diff, &dev->_vblank_count[crtc]);
  314. }
  315. EXPORT_SYMBOL(drm_update_vblank_count);
  316. /**
  317. * drm_vblank_get - get a reference count on vblank events
  318. * @dev: DRM device
  319. * @crtc: which CRTC to own
  320. *
  321. * Acquire a reference count on vblank events to avoid having them disabled
  322. * while in use. Note callers will probably want to update the master counter
  323. * using drm_update_vblank_count() above before calling this routine so that
  324. * wakeups occur on the right vblank event.
  325. *
  326. * RETURNS
  327. * Zero on success, nonzero on failure.
  328. */
  329. int drm_vblank_get(struct drm_device *dev, int crtc)
  330. {
  331. unsigned long irqflags;
  332. int ret = 0;
  333. spin_lock_irqsave(&dev->vbl_lock, irqflags);
  334. /* Going from 0->1 means we have to enable interrupts again */
  335. if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
  336. !dev->vblank_enabled[crtc]) {
  337. ret = dev->driver->enable_vblank(dev, crtc);
  338. if (ret)
  339. atomic_dec(&dev->vblank_refcount[crtc]);
  340. else
  341. dev->vblank_enabled[crtc] = 1;
  342. }
  343. spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  344. return ret;
  345. }
  346. EXPORT_SYMBOL(drm_vblank_get);
  347. /**
  348. * drm_vblank_put - give up ownership of vblank events
  349. * @dev: DRM device
  350. * @crtc: which counter to give up
  351. *
  352. * Release ownership of a given vblank counter, turning off interrupts
  353. * if possible.
  354. */
  355. void drm_vblank_put(struct drm_device *dev, int crtc)
  356. {
  357. /* Last user schedules interrupt disable */
  358. if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
  359. mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
  360. }
  361. EXPORT_SYMBOL(drm_vblank_put);
  362. /**
  363. * drm_modeset_ctl - handle vblank event counter changes across mode switch
  364. * @DRM_IOCTL_ARGS: standard ioctl arguments
  365. *
  366. * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
  367. * ioctls around modesetting so that any lost vblank events are accounted for.
  368. */
  369. int drm_modeset_ctl(struct drm_device *dev, void *data,
  370. struct drm_file *file_priv)
  371. {
  372. struct drm_modeset_ctl *modeset = data;
  373. int crtc, ret = 0;
  374. u32 new;
  375. crtc = modeset->arg;
  376. if (crtc >= dev->num_crtcs) {
  377. ret = -EINVAL;
  378. goto out;
  379. }
  380. switch (modeset->cmd) {
  381. case _DRM_PRE_MODESET:
  382. dev->vblank_premodeset[crtc] =
  383. dev->driver->get_vblank_counter(dev, crtc);
  384. break;
  385. case _DRM_POST_MODESET:
  386. new = dev->driver->get_vblank_counter(dev, crtc);
  387. dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
  388. break;
  389. default:
  390. ret = -EINVAL;
  391. break;
  392. }
  393. out:
  394. return ret;
  395. }
  396. /**
  397. * Wait for VBLANK.
  398. *
  399. * \param inode device inode.
  400. * \param file_priv DRM file private.
  401. * \param cmd command.
  402. * \param data user argument, pointing to a drm_wait_vblank structure.
  403. * \return zero on success or a negative number on failure.
  404. *
  405. * Verifies the IRQ is installed.
  406. *
  407. * If a signal is requested checks if this task has already scheduled the same signal
  408. * for the same vblank sequence number - nothing to be done in
  409. * that case. If the number of tasks waiting for the interrupt exceeds 100 the
  410. * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
  411. * task.
  412. *
  413. * If a signal is not requested, then calls vblank_wait().
  414. */
  415. int drm_wait_vblank(struct drm_device *dev, void *data,
  416. struct drm_file *file_priv)
  417. {
  418. union drm_wait_vblank *vblwait = data;
  419. struct timeval now;
  420. int ret = 0;
  421. unsigned int flags, seq, crtc;
  422. if ((!dev->irq) || (!dev->irq_enabled))
  423. return -EINVAL;
  424. if (vblwait->request.type &
  425. ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
  426. DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
  427. vblwait->request.type,
  428. (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
  429. return -EINVAL;
  430. }
  431. flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
  432. crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
  433. if (crtc >= dev->num_crtcs)
  434. return -EINVAL;
  435. drm_update_vblank_count(dev, crtc);
  436. seq = drm_vblank_count(dev, crtc);
  437. switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
  438. case _DRM_VBLANK_RELATIVE:
  439. vblwait->request.sequence += seq;
  440. vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
  441. case _DRM_VBLANK_ABSOLUTE:
  442. break;
  443. default:
  444. return -EINVAL;
  445. }
  446. if ((flags & _DRM_VBLANK_NEXTONMISS) &&
  447. (seq - vblwait->request.sequence) <= (1<<23)) {
  448. vblwait->request.sequence = seq + 1;
  449. }
  450. if (flags & _DRM_VBLANK_SIGNAL) {
  451. unsigned long irqflags;
  452. struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
  453. struct drm_vbl_sig *vbl_sig;
  454. spin_lock_irqsave(&dev->vbl_lock, irqflags);
  455. /* Check if this task has already scheduled the same signal
  456. * for the same vblank sequence number; nothing to be done in
  457. * that case
  458. */
  459. list_for_each_entry(vbl_sig, vbl_sigs, head) {
  460. if (vbl_sig->sequence == vblwait->request.sequence
  461. && vbl_sig->info.si_signo ==
  462. vblwait->request.signal
  463. && vbl_sig->task == current) {
  464. spin_unlock_irqrestore(&dev->vbl_lock,
  465. irqflags);
  466. vblwait->reply.sequence = seq;
  467. goto done;
  468. }
  469. }
  470. if (atomic_read(&dev->vbl_signal_pending) >= 100) {
  471. spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  472. return -EBUSY;
  473. }
  474. spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  475. vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
  476. DRM_MEM_DRIVER);
  477. if (!vbl_sig)
  478. return -ENOMEM;
  479. ret = drm_vblank_get(dev, crtc);
  480. if (ret) {
  481. drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
  482. DRM_MEM_DRIVER);
  483. return ret;
  484. }
  485. atomic_inc(&dev->vbl_signal_pending);
  486. vbl_sig->sequence = vblwait->request.sequence;
  487. vbl_sig->info.si_signo = vblwait->request.signal;
  488. vbl_sig->task = current;
  489. spin_lock_irqsave(&dev->vbl_lock, irqflags);
  490. list_add_tail(&vbl_sig->head, vbl_sigs);
  491. spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  492. vblwait->reply.sequence = seq;
  493. } else {
  494. unsigned long cur_vblank;
  495. ret = drm_vblank_get(dev, crtc);
  496. if (ret)
  497. return ret;
  498. DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
  499. (((cur_vblank = drm_vblank_count(dev, crtc))
  500. - vblwait->request.sequence) <= (1 << 23)));
  501. drm_vblank_put(dev, crtc);
  502. do_gettimeofday(&now);
  503. vblwait->reply.tval_sec = now.tv_sec;
  504. vblwait->reply.tval_usec = now.tv_usec;
  505. vblwait->reply.sequence = cur_vblank;
  506. }
  507. done:
  508. return ret;
  509. }
  510. /**
  511. * Send the VBLANK signals.
  512. *
  513. * \param dev DRM device.
  514. * \param crtc CRTC where the vblank event occurred
  515. *
  516. * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
  517. *
  518. * If a signal is not requested, then calls vblank_wait().
  519. */
  520. static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
  521. {
  522. struct drm_vbl_sig *vbl_sig, *tmp;
  523. struct list_head *vbl_sigs;
  524. unsigned int vbl_seq;
  525. unsigned long flags;
  526. spin_lock_irqsave(&dev->vbl_lock, flags);
  527. vbl_sigs = &dev->vbl_sigs[crtc];
  528. vbl_seq = drm_vblank_count(dev, crtc);
  529. list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
  530. if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
  531. vbl_sig->info.si_code = vbl_seq;
  532. send_sig_info(vbl_sig->info.si_signo,
  533. &vbl_sig->info, vbl_sig->task);
  534. list_del(&vbl_sig->head);
  535. drm_free(vbl_sig, sizeof(*vbl_sig),
  536. DRM_MEM_DRIVER);
  537. atomic_dec(&dev->vbl_signal_pending);
  538. drm_vblank_put(dev, crtc);
  539. }
  540. }
  541. spin_unlock_irqrestore(&dev->vbl_lock, flags);
  542. }
  543. /**
  544. * drm_handle_vblank - handle a vblank event
  545. * @dev: DRM device
  546. * @crtc: where this event occurred
  547. *
  548. * Drivers should call this routine in their vblank interrupt handlers to
  549. * update the vblank counter and send any signals that may be pending.
  550. */
  551. void drm_handle_vblank(struct drm_device *dev, int crtc)
  552. {
  553. drm_update_vblank_count(dev, crtc);
  554. DRM_WAKEUP(&dev->vbl_queue[crtc]);
  555. drm_vbl_send_signals(dev, crtc);
  556. }
  557. EXPORT_SYMBOL(drm_handle_vblank);
  558. /**
  559. * Tasklet wrapper function.
  560. *
  561. * \param data DRM device in disguise.
  562. *
  563. * Attempts to grab the HW lock and calls the driver callback on success. On
  564. * failure, leave the lock marked as contended so the callback can be called
  565. * from drm_unlock().
  566. */
  567. static void drm_locked_tasklet_func(unsigned long data)
  568. {
  569. struct drm_device *dev = (struct drm_device *)data;
  570. unsigned long irqflags;
  571. spin_lock_irqsave(&dev->tasklet_lock, irqflags);
  572. if (!dev->locked_tasklet_func ||
  573. !drm_lock_take(&dev->lock,
  574. DRM_KERNEL_CONTEXT)) {
  575. spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
  576. return;
  577. }
  578. dev->lock.lock_time = jiffies;
  579. atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
  580. dev->locked_tasklet_func(dev);
  581. drm_lock_free(&dev->lock,
  582. DRM_KERNEL_CONTEXT);
  583. dev->locked_tasklet_func = NULL;
  584. spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
  585. }
  586. /**
  587. * Schedule a tasklet to call back a driver hook with the HW lock held.
  588. *
  589. * \param dev DRM device.
  590. * \param func Driver callback.
  591. *
  592. * This is intended for triggering actions that require the HW lock from an
  593. * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
  594. * completes. Note that the callback may be called from interrupt or process
  595. * context, it must not make any assumptions about this. Also, the HW lock will
  596. * be held with the kernel context or any client context.
  597. */
  598. void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
  599. {
  600. unsigned long irqflags;
  601. static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
  602. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
  603. test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
  604. return;
  605. spin_lock_irqsave(&dev->tasklet_lock, irqflags);
  606. if (dev->locked_tasklet_func) {
  607. spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
  608. return;
  609. }
  610. dev->locked_tasklet_func = func;
  611. spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
  612. drm_tasklet.data = (unsigned long)dev;
  613. tasklet_hi_schedule(&drm_tasklet);
  614. }
  615. EXPORT_SYMBOL(drm_locked_tasklet);