i915_irq.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #include "drmP.h"
  29. #include "drm.h"
  30. #include "i915_drm.h"
  31. #include "i915_drv.h"
  32. #define MAX_NOPID ((u32)~0)
  33. /** These are the interrupts used by the driver */
  34. #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
  35. I915_ASLE_INTERRUPT | \
  36. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
  37. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
  38. void
  39. i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
  40. {
  41. if ((dev_priv->irq_mask_reg & mask) != 0) {
  42. dev_priv->irq_mask_reg &= ~mask;
  43. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  44. (void) I915_READ(IMR);
  45. }
  46. }
  47. static inline void
  48. i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
  49. {
  50. if ((dev_priv->irq_mask_reg & mask) != mask) {
  51. dev_priv->irq_mask_reg |= mask;
  52. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  53. (void) I915_READ(IMR);
  54. }
  55. }
  56. /**
  57. * i915_get_pipe - return the the pipe associated with a given plane
  58. * @dev: DRM device
  59. * @plane: plane to look for
  60. *
  61. * The Intel Mesa & 2D drivers call the vblank routines with a plane number
  62. * rather than a pipe number, since they may not always be equal. This routine
  63. * maps the given @plane back to a pipe number.
  64. */
  65. static int
  66. i915_get_pipe(struct drm_device *dev, int plane)
  67. {
  68. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  69. u32 dspcntr;
  70. dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
  71. return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
  72. }
  73. /**
  74. * i915_get_plane - return the the plane associated with a given pipe
  75. * @dev: DRM device
  76. * @pipe: pipe to look for
  77. *
  78. * The Intel Mesa & 2D drivers call the vblank routines with a plane number
  79. * rather than a plane number, since they may not always be equal. This routine
  80. * maps the given @pipe back to a plane number.
  81. */
  82. static int
  83. i915_get_plane(struct drm_device *dev, int pipe)
  84. {
  85. if (i915_get_pipe(dev, 0) == pipe)
  86. return 0;
  87. return 1;
  88. }
  89. /**
  90. * i915_pipe_enabled - check if a pipe is enabled
  91. * @dev: DRM device
  92. * @pipe: pipe to check
  93. *
  94. * Reading certain registers when the pipe is disabled can hang the chip.
  95. * Use this routine to make sure the PLL is running and the pipe is active
  96. * before reading such registers if unsure.
  97. */
  98. static int
  99. i915_pipe_enabled(struct drm_device *dev, int pipe)
  100. {
  101. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  102. unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
  103. if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
  104. return 1;
  105. return 0;
  106. }
  107. /**
  108. * Emit blits for scheduled buffer swaps.
  109. *
  110. * This function will be called with the HW lock held.
  111. */
  112. static void i915_vblank_tasklet(struct drm_device *dev)
  113. {
  114. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  115. unsigned long irqflags;
  116. struct list_head *list, *tmp, hits, *hit;
  117. int nhits, nrects, slice[2], upper[2], lower[2], i;
  118. unsigned counter[2];
  119. struct drm_drawable_info *drw;
  120. drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
  121. u32 cpp = dev_priv->cpp;
  122. u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
  123. XY_SRC_COPY_BLT_WRITE_ALPHA |
  124. XY_SRC_COPY_BLT_WRITE_RGB)
  125. : XY_SRC_COPY_BLT_CMD;
  126. u32 src_pitch = sarea_priv->pitch * cpp;
  127. u32 dst_pitch = sarea_priv->pitch * cpp;
  128. u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
  129. RING_LOCALS;
  130. if (IS_I965G(dev) && sarea_priv->front_tiled) {
  131. cmd |= XY_SRC_COPY_BLT_DST_TILED;
  132. dst_pitch >>= 2;
  133. }
  134. if (IS_I965G(dev) && sarea_priv->back_tiled) {
  135. cmd |= XY_SRC_COPY_BLT_SRC_TILED;
  136. src_pitch >>= 2;
  137. }
  138. counter[0] = drm_vblank_count(dev, 0);
  139. counter[1] = drm_vblank_count(dev, 1);
  140. DRM_DEBUG("\n");
  141. INIT_LIST_HEAD(&hits);
  142. nhits = nrects = 0;
  143. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  144. /* Find buffer swaps scheduled for this vertical blank */
  145. list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
  146. drm_i915_vbl_swap_t *vbl_swap =
  147. list_entry(list, drm_i915_vbl_swap_t, head);
  148. int pipe = i915_get_pipe(dev, vbl_swap->plane);
  149. if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
  150. continue;
  151. list_del(list);
  152. dev_priv->swaps_pending--;
  153. drm_vblank_put(dev, pipe);
  154. spin_unlock(&dev_priv->swaps_lock);
  155. spin_lock(&dev->drw_lock);
  156. drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
  157. if (!drw) {
  158. spin_unlock(&dev->drw_lock);
  159. drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  160. spin_lock(&dev_priv->swaps_lock);
  161. continue;
  162. }
  163. list_for_each(hit, &hits) {
  164. drm_i915_vbl_swap_t *swap_cmp =
  165. list_entry(hit, drm_i915_vbl_swap_t, head);
  166. struct drm_drawable_info *drw_cmp =
  167. drm_get_drawable_info(dev, swap_cmp->drw_id);
  168. if (drw_cmp &&
  169. drw_cmp->rects[0].y1 > drw->rects[0].y1) {
  170. list_add_tail(list, hit);
  171. break;
  172. }
  173. }
  174. spin_unlock(&dev->drw_lock);
  175. /* List of hits was empty, or we reached the end of it */
  176. if (hit == &hits)
  177. list_add_tail(list, hits.prev);
  178. nhits++;
  179. spin_lock(&dev_priv->swaps_lock);
  180. }
  181. if (nhits == 0) {
  182. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  183. return;
  184. }
  185. spin_unlock(&dev_priv->swaps_lock);
  186. i915_kernel_lost_context(dev);
  187. if (IS_I965G(dev)) {
  188. BEGIN_LP_RING(4);
  189. OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  190. OUT_RING(0);
  191. OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
  192. OUT_RING(0);
  193. ADVANCE_LP_RING();
  194. } else {
  195. BEGIN_LP_RING(6);
  196. OUT_RING(GFX_OP_DRAWRECT_INFO);
  197. OUT_RING(0);
  198. OUT_RING(0);
  199. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  200. OUT_RING(sarea_priv->width | sarea_priv->height << 16);
  201. OUT_RING(0);
  202. ADVANCE_LP_RING();
  203. }
  204. sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
  205. upper[0] = upper[1] = 0;
  206. slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
  207. slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
  208. lower[0] = sarea_priv->pipeA_y + slice[0];
  209. lower[1] = sarea_priv->pipeB_y + slice[0];
  210. spin_lock(&dev->drw_lock);
  211. /* Emit blits for buffer swaps, partitioning both outputs into as many
  212. * slices as there are buffer swaps scheduled in order to avoid tearing
  213. * (based on the assumption that a single buffer swap would always
  214. * complete before scanout starts).
  215. */
  216. for (i = 0; i++ < nhits;
  217. upper[0] = lower[0], lower[0] += slice[0],
  218. upper[1] = lower[1], lower[1] += slice[1]) {
  219. if (i == nhits)
  220. lower[0] = lower[1] = sarea_priv->height;
  221. list_for_each(hit, &hits) {
  222. drm_i915_vbl_swap_t *swap_hit =
  223. list_entry(hit, drm_i915_vbl_swap_t, head);
  224. struct drm_clip_rect *rect;
  225. int num_rects, plane;
  226. unsigned short top, bottom;
  227. drw = drm_get_drawable_info(dev, swap_hit->drw_id);
  228. if (!drw)
  229. continue;
  230. rect = drw->rects;
  231. plane = swap_hit->plane;
  232. top = upper[plane];
  233. bottom = lower[plane];
  234. for (num_rects = drw->num_rects; num_rects--; rect++) {
  235. int y1 = max(rect->y1, top);
  236. int y2 = min(rect->y2, bottom);
  237. if (y1 >= y2)
  238. continue;
  239. BEGIN_LP_RING(8);
  240. OUT_RING(cmd);
  241. OUT_RING(ropcpp | dst_pitch);
  242. OUT_RING((y1 << 16) | rect->x1);
  243. OUT_RING((y2 << 16) | rect->x2);
  244. OUT_RING(sarea_priv->front_offset);
  245. OUT_RING((y1 << 16) | rect->x1);
  246. OUT_RING(src_pitch);
  247. OUT_RING(sarea_priv->back_offset);
  248. ADVANCE_LP_RING();
  249. }
  250. }
  251. }
  252. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  253. list_for_each_safe(hit, tmp, &hits) {
  254. drm_i915_vbl_swap_t *swap_hit =
  255. list_entry(hit, drm_i915_vbl_swap_t, head);
  256. list_del(hit);
  257. drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
  258. }
  259. }
  260. u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
  261. {
  262. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  263. unsigned long high_frame;
  264. unsigned long low_frame;
  265. u32 high1, high2, low, count;
  266. int pipe;
  267. pipe = i915_get_pipe(dev, plane);
  268. high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
  269. low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
  270. if (!i915_pipe_enabled(dev, pipe)) {
  271. DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
  272. return 0;
  273. }
  274. /*
  275. * High & low register fields aren't synchronized, so make sure
  276. * we get a low value that's stable across two reads of the high
  277. * register.
  278. */
  279. do {
  280. high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  281. PIPE_FRAME_HIGH_SHIFT);
  282. low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
  283. PIPE_FRAME_LOW_SHIFT);
  284. high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  285. PIPE_FRAME_HIGH_SHIFT);
  286. } while (high1 != high2);
  287. count = (high1 << 8) | low;
  288. return count;
  289. }
  290. void
  291. i915_gem_vblank_work_handler(struct work_struct *work)
  292. {
  293. drm_i915_private_t *dev_priv;
  294. struct drm_device *dev;
  295. dev_priv = container_of(work, drm_i915_private_t,
  296. mm.vblank_work);
  297. dev = dev_priv->dev;
  298. mutex_lock(&dev->struct_mutex);
  299. i915_vblank_tasklet(dev);
  300. mutex_unlock(&dev->struct_mutex);
  301. }
  302. irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
  303. {
  304. struct drm_device *dev = (struct drm_device *) arg;
  305. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  306. u32 iir;
  307. u32 pipea_stats, pipeb_stats;
  308. int vblank = 0;
  309. atomic_inc(&dev_priv->irq_received);
  310. if (dev->pdev->msi_enabled)
  311. I915_WRITE(IMR, ~0);
  312. iir = I915_READ(IIR);
  313. if (iir == 0) {
  314. if (dev->pdev->msi_enabled) {
  315. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  316. (void) I915_READ(IMR);
  317. }
  318. return IRQ_NONE;
  319. }
  320. /*
  321. * Clear the PIPE(A|B)STAT regs before the IIR otherwise
  322. * we may get extra interrupts.
  323. */
  324. if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
  325. pipea_stats = I915_READ(PIPEASTAT);
  326. if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
  327. pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  328. PIPE_VBLANK_INTERRUPT_ENABLE);
  329. else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
  330. PIPE_VBLANK_INTERRUPT_STATUS)) {
  331. vblank++;
  332. drm_handle_vblank(dev, i915_get_plane(dev, 0));
  333. }
  334. I915_WRITE(PIPEASTAT, pipea_stats);
  335. }
  336. if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
  337. pipeb_stats = I915_READ(PIPEBSTAT);
  338. /* Ack the event */
  339. I915_WRITE(PIPEBSTAT, pipeb_stats);
  340. /* The vblank interrupt gets enabled even if we didn't ask for
  341. it, so make sure it's shut down again */
  342. if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
  343. pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  344. PIPE_VBLANK_INTERRUPT_ENABLE);
  345. else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
  346. PIPE_VBLANK_INTERRUPT_STATUS)) {
  347. vblank++;
  348. drm_handle_vblank(dev, i915_get_plane(dev, 1));
  349. }
  350. if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
  351. opregion_asle_intr(dev);
  352. I915_WRITE(PIPEBSTAT, pipeb_stats);
  353. }
  354. I915_WRITE(IIR, iir);
  355. if (dev->pdev->msi_enabled)
  356. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  357. (void) I915_READ(IIR); /* Flush posted writes */
  358. if (dev_priv->sarea_priv)
  359. dev_priv->sarea_priv->last_dispatch =
  360. READ_BREADCRUMB(dev_priv);
  361. if (iir & I915_USER_INTERRUPT) {
  362. dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
  363. DRM_WAKEUP(&dev_priv->irq_queue);
  364. }
  365. if (iir & I915_ASLE_INTERRUPT)
  366. opregion_asle_intr(dev);
  367. if (vblank && dev_priv->swaps_pending > 0) {
  368. if (dev_priv->ring.ring_obj == NULL)
  369. drm_locked_tasklet(dev, i915_vblank_tasklet);
  370. else
  371. schedule_work(&dev_priv->mm.vblank_work);
  372. }
  373. return IRQ_HANDLED;
  374. }
  375. static int i915_emit_irq(struct drm_device * dev)
  376. {
  377. drm_i915_private_t *dev_priv = dev->dev_private;
  378. RING_LOCALS;
  379. i915_kernel_lost_context(dev);
  380. DRM_DEBUG("\n");
  381. dev_priv->counter++;
  382. if (dev_priv->counter > 0x7FFFFFFFUL)
  383. dev_priv->counter = 1;
  384. if (dev_priv->sarea_priv)
  385. dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
  386. BEGIN_LP_RING(6);
  387. OUT_RING(MI_STORE_DWORD_INDEX);
  388. OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
  389. OUT_RING(dev_priv->counter);
  390. OUT_RING(0);
  391. OUT_RING(0);
  392. OUT_RING(MI_USER_INTERRUPT);
  393. ADVANCE_LP_RING();
  394. return dev_priv->counter;
  395. }
  396. void i915_user_irq_get(struct drm_device *dev)
  397. {
  398. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  399. spin_lock(&dev_priv->user_irq_lock);
  400. if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
  401. i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
  402. spin_unlock(&dev_priv->user_irq_lock);
  403. }
  404. void i915_user_irq_put(struct drm_device *dev)
  405. {
  406. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  407. spin_lock(&dev_priv->user_irq_lock);
  408. BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
  409. if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
  410. i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
  411. spin_unlock(&dev_priv->user_irq_lock);
  412. }
  413. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  414. {
  415. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  416. int ret = 0;
  417. DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
  418. READ_BREADCRUMB(dev_priv));
  419. if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  420. if (dev_priv->sarea_priv) {
  421. dev_priv->sarea_priv->last_dispatch =
  422. READ_BREADCRUMB(dev_priv);
  423. }
  424. return 0;
  425. }
  426. if (dev_priv->sarea_priv)
  427. dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  428. i915_user_irq_get(dev);
  429. DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
  430. READ_BREADCRUMB(dev_priv) >= irq_nr);
  431. i915_user_irq_put(dev);
  432. if (ret == -EBUSY) {
  433. DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  434. READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
  435. }
  436. if (dev_priv->sarea_priv)
  437. dev_priv->sarea_priv->last_dispatch =
  438. READ_BREADCRUMB(dev_priv);
  439. return ret;
  440. }
  441. /* Needs the lock as it touches the ring.
  442. */
  443. int i915_irq_emit(struct drm_device *dev, void *data,
  444. struct drm_file *file_priv)
  445. {
  446. drm_i915_private_t *dev_priv = dev->dev_private;
  447. drm_i915_irq_emit_t *emit = data;
  448. int result;
  449. RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  450. if (!dev_priv) {
  451. DRM_ERROR("called with no initialization\n");
  452. return -EINVAL;
  453. }
  454. mutex_lock(&dev->struct_mutex);
  455. result = i915_emit_irq(dev);
  456. mutex_unlock(&dev->struct_mutex);
  457. if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  458. DRM_ERROR("copy_to_user\n");
  459. return -EFAULT;
  460. }
  461. return 0;
  462. }
  463. /* Doesn't need the hardware lock.
  464. */
  465. int i915_irq_wait(struct drm_device *dev, void *data,
  466. struct drm_file *file_priv)
  467. {
  468. drm_i915_private_t *dev_priv = dev->dev_private;
  469. drm_i915_irq_wait_t *irqwait = data;
  470. if (!dev_priv) {
  471. DRM_ERROR("called with no initialization\n");
  472. return -EINVAL;
  473. }
  474. return i915_wait_irq(dev, irqwait->irq_seq);
  475. }
  476. int i915_enable_vblank(struct drm_device *dev, int plane)
  477. {
  478. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  479. int pipe = i915_get_pipe(dev, plane);
  480. u32 pipestat_reg = 0;
  481. u32 pipestat;
  482. switch (pipe) {
  483. case 0:
  484. pipestat_reg = PIPEASTAT;
  485. i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
  486. break;
  487. case 1:
  488. pipestat_reg = PIPEBSTAT;
  489. i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
  490. break;
  491. default:
  492. DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
  493. pipe);
  494. break;
  495. }
  496. if (pipestat_reg) {
  497. pipestat = I915_READ(pipestat_reg);
  498. if (IS_I965G(dev))
  499. pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
  500. else
  501. pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
  502. /* Clear any stale interrupt status */
  503. pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
  504. PIPE_VBLANK_INTERRUPT_STATUS);
  505. I915_WRITE(pipestat_reg, pipestat);
  506. }
  507. return 0;
  508. }
  509. void i915_disable_vblank(struct drm_device *dev, int plane)
  510. {
  511. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  512. int pipe = i915_get_pipe(dev, plane);
  513. u32 pipestat_reg = 0;
  514. u32 pipestat;
  515. switch (pipe) {
  516. case 0:
  517. pipestat_reg = PIPEASTAT;
  518. i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
  519. break;
  520. case 1:
  521. pipestat_reg = PIPEBSTAT;
  522. i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
  523. break;
  524. default:
  525. DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
  526. pipe);
  527. break;
  528. }
  529. if (pipestat_reg) {
  530. pipestat = I915_READ(pipestat_reg);
  531. pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
  532. PIPE_VBLANK_INTERRUPT_ENABLE);
  533. /* Clear any stale interrupt status */
  534. pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
  535. PIPE_VBLANK_INTERRUPT_STATUS);
  536. I915_WRITE(pipestat_reg, pipestat);
  537. }
  538. }
  539. /* Set the vblank monitor pipe
  540. */
  541. int i915_vblank_pipe_set(struct drm_device *dev, void *data,
  542. struct drm_file *file_priv)
  543. {
  544. drm_i915_private_t *dev_priv = dev->dev_private;
  545. if (!dev_priv) {
  546. DRM_ERROR("called with no initialization\n");
  547. return -EINVAL;
  548. }
  549. return 0;
  550. }
  551. int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  552. struct drm_file *file_priv)
  553. {
  554. drm_i915_private_t *dev_priv = dev->dev_private;
  555. drm_i915_vblank_pipe_t *pipe = data;
  556. if (!dev_priv) {
  557. DRM_ERROR("called with no initialization\n");
  558. return -EINVAL;
  559. }
  560. pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  561. return 0;
  562. }
  563. /**
  564. * Schedule buffer swap at given vertical blank.
  565. */
  566. int i915_vblank_swap(struct drm_device *dev, void *data,
  567. struct drm_file *file_priv)
  568. {
  569. drm_i915_private_t *dev_priv = dev->dev_private;
  570. drm_i915_vblank_swap_t *swap = data;
  571. drm_i915_vbl_swap_t *vbl_swap;
  572. unsigned int pipe, seqtype, curseq, plane;
  573. unsigned long irqflags;
  574. struct list_head *list;
  575. int ret;
  576. if (!dev_priv || !dev_priv->sarea_priv) {
  577. DRM_ERROR("%s called with no initialization\n", __func__);
  578. return -EINVAL;
  579. }
  580. if (dev_priv->sarea_priv->rotation) {
  581. DRM_DEBUG("Rotation not supported\n");
  582. return -EINVAL;
  583. }
  584. if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
  585. _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
  586. DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
  587. return -EINVAL;
  588. }
  589. plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
  590. pipe = i915_get_pipe(dev, plane);
  591. seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
  592. if (!(dev_priv->vblank_pipe & (1 << pipe))) {
  593. DRM_ERROR("Invalid pipe %d\n", pipe);
  594. return -EINVAL;
  595. }
  596. spin_lock_irqsave(&dev->drw_lock, irqflags);
  597. if (!drm_get_drawable_info(dev, swap->drawable)) {
  598. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  599. DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
  600. return -EINVAL;
  601. }
  602. spin_unlock_irqrestore(&dev->drw_lock, irqflags);
  603. /*
  604. * We take the ref here and put it when the swap actually completes
  605. * in the tasklet.
  606. */
  607. ret = drm_vblank_get(dev, pipe);
  608. if (ret)
  609. return ret;
  610. curseq = drm_vblank_count(dev, pipe);
  611. if (seqtype == _DRM_VBLANK_RELATIVE)
  612. swap->sequence += curseq;
  613. if ((curseq - swap->sequence) <= (1<<23)) {
  614. if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
  615. swap->sequence = curseq + 1;
  616. } else {
  617. DRM_DEBUG("Missed target sequence\n");
  618. drm_vblank_put(dev, pipe);
  619. return -EINVAL;
  620. }
  621. }
  622. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  623. list_for_each(list, &dev_priv->vbl_swaps.head) {
  624. vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
  625. if (vbl_swap->drw_id == swap->drawable &&
  626. vbl_swap->plane == plane &&
  627. vbl_swap->sequence == swap->sequence) {
  628. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  629. DRM_DEBUG("Already scheduled\n");
  630. return 0;
  631. }
  632. }
  633. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  634. if (dev_priv->swaps_pending >= 100) {
  635. DRM_DEBUG("Too many swaps queued\n");
  636. drm_vblank_put(dev, pipe);
  637. return -EBUSY;
  638. }
  639. vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
  640. if (!vbl_swap) {
  641. DRM_ERROR("Failed to allocate memory to queue swap\n");
  642. drm_vblank_put(dev, pipe);
  643. return -ENOMEM;
  644. }
  645. DRM_DEBUG("\n");
  646. vbl_swap->drw_id = swap->drawable;
  647. vbl_swap->plane = plane;
  648. vbl_swap->sequence = swap->sequence;
  649. spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
  650. list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
  651. dev_priv->swaps_pending++;
  652. spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
  653. return 0;
  654. }
  655. /* drm_dma.h hooks
  656. */
  657. void i915_driver_irq_preinstall(struct drm_device * dev)
  658. {
  659. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  660. I915_WRITE(HWSTAM, 0xeffe);
  661. I915_WRITE(IMR, 0xffffffff);
  662. I915_WRITE(IER, 0x0);
  663. }
  664. int i915_driver_irq_postinstall(struct drm_device *dev)
  665. {
  666. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  667. int ret, num_pipes = 2;
  668. spin_lock_init(&dev_priv->swaps_lock);
  669. INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
  670. dev_priv->swaps_pending = 0;
  671. /* Set initial unmasked IRQs to just the selected vblank pipes. */
  672. dev_priv->irq_mask_reg = ~0;
  673. ret = drm_vblank_init(dev, num_pipes);
  674. if (ret)
  675. return ret;
  676. dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  677. dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
  678. dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
  679. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  680. dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
  681. I915_WRITE(IMR, dev_priv->irq_mask_reg);
  682. I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
  683. (void) I915_READ(IER);
  684. opregion_enable_asle(dev);
  685. DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
  686. return 0;
  687. }
  688. void i915_driver_irq_uninstall(struct drm_device * dev)
  689. {
  690. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  691. u32 temp;
  692. if (!dev_priv)
  693. return;
  694. dev_priv->vblank_pipe = 0;
  695. I915_WRITE(HWSTAM, 0xffffffff);
  696. I915_WRITE(IMR, 0xffffffff);
  697. I915_WRITE(IER, 0x0);
  698. temp = I915_READ(PIPEASTAT);
  699. I915_WRITE(PIPEASTAT, temp);
  700. temp = I915_READ(PIPEBSTAT);
  701. I915_WRITE(PIPEBSTAT, temp);
  702. temp = I915_READ(IIR);
  703. I915_WRITE(IIR, temp);
  704. }