intel_overlay.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542
  1. /*
  2. * Copyright © 2009
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Daniel Vetter <daniel@ffwll.ch>
  25. *
  26. * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
  27. */
  28. #include <linux/seq_file.h>
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "i915_drm.h"
  32. #include "i915_drv.h"
  33. #include "i915_reg.h"
  34. #include "intel_drv.h"
  35. /* Limits for overlay size. According to intel doc, the real limits are:
  36. * Y width: 4095, UV width (planar): 2047, Y height: 2047,
  37. * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
  38. * the mininum of both. */
  39. #define IMAGE_MAX_WIDTH 2048
  40. #define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
  41. /* on 830 and 845 these large limits result in the card hanging */
  42. #define IMAGE_MAX_WIDTH_LEGACY 1024
  43. #define IMAGE_MAX_HEIGHT_LEGACY 1088
  44. /* overlay register definitions */
  45. /* OCMD register */
  46. #define OCMD_TILED_SURFACE (0x1<<19)
  47. #define OCMD_MIRROR_MASK (0x3<<17)
  48. #define OCMD_MIRROR_MODE (0x3<<17)
  49. #define OCMD_MIRROR_HORIZONTAL (0x1<<17)
  50. #define OCMD_MIRROR_VERTICAL (0x2<<17)
  51. #define OCMD_MIRROR_BOTH (0x3<<17)
  52. #define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
  53. #define OCMD_UV_SWAP (0x1<<14) /* YVYU */
  54. #define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
  55. #define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
  56. #define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
  57. #define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
  58. #define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
  59. #define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
  60. #define OCMD_YUV_422_PACKED (0x8<<10)
  61. #define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
  62. #define OCMD_YUV_420_PLANAR (0xc<<10)
  63. #define OCMD_YUV_422_PLANAR (0xd<<10)
  64. #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
  65. #define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
  66. #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
  67. #define OCMD_BUF_TYPE_MASK (0x1<<5)
  68. #define OCMD_BUF_TYPE_FRAME (0x0<<5)
  69. #define OCMD_BUF_TYPE_FIELD (0x1<<5)
  70. #define OCMD_TEST_MODE (0x1<<4)
  71. #define OCMD_BUFFER_SELECT (0x3<<2)
  72. #define OCMD_BUFFER0 (0x0<<2)
  73. #define OCMD_BUFFER1 (0x1<<2)
  74. #define OCMD_FIELD_SELECT (0x1<<2)
  75. #define OCMD_FIELD0 (0x0<<1)
  76. #define OCMD_FIELD1 (0x1<<1)
  77. #define OCMD_ENABLE (0x1<<0)
  78. /* OCONFIG register */
  79. #define OCONF_PIPE_MASK (0x1<<18)
  80. #define OCONF_PIPE_A (0x0<<18)
  81. #define OCONF_PIPE_B (0x1<<18)
  82. #define OCONF_GAMMA2_ENABLE (0x1<<16)
  83. #define OCONF_CSC_MODE_BT601 (0x0<<5)
  84. #define OCONF_CSC_MODE_BT709 (0x1<<5)
  85. #define OCONF_CSC_BYPASS (0x1<<4)
  86. #define OCONF_CC_OUT_8BIT (0x1<<3)
  87. #define OCONF_TEST_MODE (0x1<<2)
  88. #define OCONF_THREE_LINE_BUFFER (0x1<<0)
  89. #define OCONF_TWO_LINE_BUFFER (0x0<<0)
  90. /* DCLRKM (dst-key) register */
  91. #define DST_KEY_ENABLE (0x1<<31)
  92. #define CLK_RGB24_MASK 0x0
  93. #define CLK_RGB16_MASK 0x070307
  94. #define CLK_RGB15_MASK 0x070707
  95. #define CLK_RGB8I_MASK 0xffffff
  96. #define RGB16_TO_COLORKEY(c) \
  97. (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
  98. #define RGB15_TO_COLORKEY(c) \
  99. (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
  100. /* overlay flip addr flag */
  101. #define OFC_UPDATE 0x1
  102. /* polyphase filter coefficients */
  103. #define N_HORIZ_Y_TAPS 5
  104. #define N_VERT_Y_TAPS 3
  105. #define N_HORIZ_UV_TAPS 3
  106. #define N_VERT_UV_TAPS 3
  107. #define N_PHASES 17
  108. #define MAX_TAPS 5
  109. /* memory bufferd overlay registers */
  110. struct overlay_registers {
  111. u32 OBUF_0Y;
  112. u32 OBUF_1Y;
  113. u32 OBUF_0U;
  114. u32 OBUF_0V;
  115. u32 OBUF_1U;
  116. u32 OBUF_1V;
  117. u32 OSTRIDE;
  118. u32 YRGB_VPH;
  119. u32 UV_VPH;
  120. u32 HORZ_PH;
  121. u32 INIT_PHS;
  122. u32 DWINPOS;
  123. u32 DWINSZ;
  124. u32 SWIDTH;
  125. u32 SWIDTHSW;
  126. u32 SHEIGHT;
  127. u32 YRGBSCALE;
  128. u32 UVSCALE;
  129. u32 OCLRC0;
  130. u32 OCLRC1;
  131. u32 DCLRKV;
  132. u32 DCLRKM;
  133. u32 SCLRKVH;
  134. u32 SCLRKVL;
  135. u32 SCLRKEN;
  136. u32 OCONFIG;
  137. u32 OCMD;
  138. u32 RESERVED1; /* 0x6C */
  139. u32 OSTART_0Y;
  140. u32 OSTART_1Y;
  141. u32 OSTART_0U;
  142. u32 OSTART_0V;
  143. u32 OSTART_1U;
  144. u32 OSTART_1V;
  145. u32 OTILEOFF_0Y;
  146. u32 OTILEOFF_1Y;
  147. u32 OTILEOFF_0U;
  148. u32 OTILEOFF_0V;
  149. u32 OTILEOFF_1U;
  150. u32 OTILEOFF_1V;
  151. u32 FASTHSCALE; /* 0xA0 */
  152. u32 UVSCALEV; /* 0xA4 */
  153. u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
  154. u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
  155. u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
  156. u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
  157. u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
  158. u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
  159. u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
  160. u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
  161. u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
  162. };
  163. /* overlay flip addr flag */
  164. #define OFC_UPDATE 0x1
  165. static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
  166. {
  167. drm_i915_private_t *dev_priv = overlay->dev->dev_private;
  168. struct overlay_registers *regs;
  169. /* no recursive mappings */
  170. BUG_ON(overlay->virt_addr);
  171. if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) {
  172. regs = overlay->reg_bo->phys_obj->handle->vaddr;
  173. } else {
  174. regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
  175. overlay->reg_bo->gtt_offset,
  176. KM_USER0);
  177. if (!regs) {
  178. DRM_ERROR("failed to map overlay regs in GTT\n");
  179. return NULL;
  180. }
  181. }
  182. return overlay->virt_addr = regs;
  183. }
  184. static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
  185. {
  186. if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
  187. io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
  188. overlay->virt_addr = NULL;
  189. return;
  190. }
  191. /* overlay needs to be disable in OCMD reg */
  192. static int intel_overlay_on(struct intel_overlay *overlay)
  193. {
  194. struct drm_device *dev = overlay->dev;
  195. int ret;
  196. drm_i915_private_t *dev_priv = dev->dev_private;
  197. BUG_ON(overlay->active);
  198. overlay->active = 1;
  199. overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
  200. BEGIN_LP_RING(4);
  201. OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
  202. OUT_RING(overlay->flip_addr | OFC_UPDATE);
  203. OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  204. OUT_RING(MI_NOOP);
  205. ADVANCE_LP_RING();
  206. overlay->last_flip_req =
  207. i915_add_request(dev, NULL, &dev_priv->render_ring);
  208. if (overlay->last_flip_req == 0)
  209. return -ENOMEM;
  210. ret = i915_do_wait_request(dev,
  211. overlay->last_flip_req, true,
  212. &dev_priv->render_ring);
  213. if (ret != 0)
  214. return ret;
  215. overlay->hw_wedged = 0;
  216. overlay->last_flip_req = 0;
  217. return 0;
  218. }
  219. /* overlay needs to be enabled in OCMD reg */
  220. static void intel_overlay_continue(struct intel_overlay *overlay,
  221. bool load_polyphase_filter)
  222. {
  223. struct drm_device *dev = overlay->dev;
  224. drm_i915_private_t *dev_priv = dev->dev_private;
  225. u32 flip_addr = overlay->flip_addr;
  226. u32 tmp;
  227. BUG_ON(!overlay->active);
  228. if (load_polyphase_filter)
  229. flip_addr |= OFC_UPDATE;
  230. /* check for underruns */
  231. tmp = I915_READ(DOVSTA);
  232. if (tmp & (1 << 17))
  233. DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
  234. BEGIN_LP_RING(2);
  235. OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
  236. OUT_RING(flip_addr);
  237. ADVANCE_LP_RING();
  238. overlay->last_flip_req =
  239. i915_add_request(dev, NULL, &dev_priv->render_ring);
  240. }
  241. static int intel_overlay_wait_flip(struct intel_overlay *overlay)
  242. {
  243. struct drm_device *dev = overlay->dev;
  244. drm_i915_private_t *dev_priv = dev->dev_private;
  245. int ret;
  246. u32 tmp;
  247. if (overlay->last_flip_req != 0) {
  248. ret = i915_do_wait_request(dev,
  249. overlay->last_flip_req, true,
  250. &dev_priv->render_ring);
  251. if (ret == 0) {
  252. overlay->last_flip_req = 0;
  253. tmp = I915_READ(ISR);
  254. if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
  255. return 0;
  256. }
  257. }
  258. /* synchronous slowpath */
  259. overlay->hw_wedged = RELEASE_OLD_VID;
  260. BEGIN_LP_RING(2);
  261. OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  262. OUT_RING(MI_NOOP);
  263. ADVANCE_LP_RING();
  264. overlay->last_flip_req =
  265. i915_add_request(dev, NULL, &dev_priv->render_ring);
  266. if (overlay->last_flip_req == 0)
  267. return -ENOMEM;
  268. ret = i915_do_wait_request(dev,
  269. overlay->last_flip_req, true,
  270. &dev_priv->render_ring);
  271. if (ret != 0)
  272. return ret;
  273. overlay->hw_wedged = 0;
  274. overlay->last_flip_req = 0;
  275. return 0;
  276. }
  277. /* overlay needs to be disabled in OCMD reg */
  278. static int intel_overlay_off(struct intel_overlay *overlay)
  279. {
  280. u32 flip_addr = overlay->flip_addr;
  281. struct drm_device *dev = overlay->dev;
  282. drm_i915_private_t *dev_priv = dev->dev_private;
  283. int ret;
  284. BUG_ON(!overlay->active);
  285. /* According to intel docs the overlay hw may hang (when switching
  286. * off) without loading the filter coeffs. It is however unclear whether
  287. * this applies to the disabling of the overlay or to the switching off
  288. * of the hw. Do it in both cases */
  289. flip_addr |= OFC_UPDATE;
  290. /* wait for overlay to go idle */
  291. overlay->hw_wedged = SWITCH_OFF_STAGE_1;
  292. BEGIN_LP_RING(4);
  293. OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
  294. OUT_RING(flip_addr);
  295. OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  296. OUT_RING(MI_NOOP);
  297. ADVANCE_LP_RING();
  298. overlay->last_flip_req =
  299. i915_add_request(dev, NULL, &dev_priv->render_ring);
  300. if (overlay->last_flip_req == 0)
  301. return -ENOMEM;
  302. ret = i915_do_wait_request(dev,
  303. overlay->last_flip_req, true,
  304. &dev_priv->render_ring);
  305. if (ret != 0)
  306. return ret;
  307. /* turn overlay off */
  308. overlay->hw_wedged = SWITCH_OFF_STAGE_2;
  309. BEGIN_LP_RING(4);
  310. OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
  311. OUT_RING(flip_addr);
  312. OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  313. OUT_RING(MI_NOOP);
  314. ADVANCE_LP_RING();
  315. overlay->last_flip_req =
  316. i915_add_request(dev, NULL, &dev_priv->render_ring);
  317. if (overlay->last_flip_req == 0)
  318. return -ENOMEM;
  319. ret = i915_do_wait_request(dev,
  320. overlay->last_flip_req, true,
  321. &dev_priv->render_ring);
  322. if (ret != 0)
  323. return ret;
  324. overlay->hw_wedged = 0;
  325. overlay->last_flip_req = 0;
  326. return ret;
  327. }
  328. static void intel_overlay_off_tail(struct intel_overlay *overlay)
  329. {
  330. struct drm_gem_object *obj;
  331. /* never have the overlay hw on without showing a frame */
  332. BUG_ON(!overlay->vid_bo);
  333. obj = &overlay->vid_bo->base;
  334. i915_gem_object_unpin(obj);
  335. drm_gem_object_unreference(obj);
  336. overlay->vid_bo = NULL;
  337. overlay->crtc->overlay = NULL;
  338. overlay->crtc = NULL;
  339. overlay->active = 0;
  340. }
  341. /* recover from an interruption due to a signal
  342. * We have to be careful not to repeat work forever an make forward progess. */
  343. int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
  344. bool interruptible)
  345. {
  346. struct drm_device *dev = overlay->dev;
  347. struct drm_gem_object *obj;
  348. drm_i915_private_t *dev_priv = dev->dev_private;
  349. u32 flip_addr;
  350. int ret;
  351. if (overlay->hw_wedged == HW_WEDGED)
  352. return -EIO;
  353. if (overlay->last_flip_req == 0) {
  354. overlay->last_flip_req =
  355. i915_add_request(dev, NULL, &dev_priv->render_ring);
  356. if (overlay->last_flip_req == 0)
  357. return -ENOMEM;
  358. }
  359. ret = i915_do_wait_request(dev, overlay->last_flip_req,
  360. interruptible, &dev_priv->render_ring);
  361. if (ret != 0)
  362. return ret;
  363. switch (overlay->hw_wedged) {
  364. case RELEASE_OLD_VID:
  365. obj = &overlay->old_vid_bo->base;
  366. i915_gem_object_unpin(obj);
  367. drm_gem_object_unreference(obj);
  368. overlay->old_vid_bo = NULL;
  369. break;
  370. case SWITCH_OFF_STAGE_1:
  371. flip_addr = overlay->flip_addr;
  372. flip_addr |= OFC_UPDATE;
  373. overlay->hw_wedged = SWITCH_OFF_STAGE_2;
  374. BEGIN_LP_RING(4);
  375. OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
  376. OUT_RING(flip_addr);
  377. OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
  378. OUT_RING(MI_NOOP);
  379. ADVANCE_LP_RING();
  380. overlay->last_flip_req =
  381. i915_add_request(dev, NULL,
  382. &dev_priv->render_ring);
  383. if (overlay->last_flip_req == 0)
  384. return -ENOMEM;
  385. ret = i915_do_wait_request(dev, overlay->last_flip_req,
  386. interruptible,
  387. &dev_priv->render_ring);
  388. if (ret != 0)
  389. return ret;
  390. case SWITCH_OFF_STAGE_2:
  391. intel_overlay_off_tail(overlay);
  392. break;
  393. default:
  394. BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
  395. }
  396. overlay->hw_wedged = 0;
  397. overlay->last_flip_req = 0;
  398. return 0;
  399. }
  400. /* Wait for pending overlay flip and release old frame.
  401. * Needs to be called before the overlay register are changed
  402. * via intel_overlay_(un)map_regs_atomic */
  403. static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
  404. {
  405. int ret;
  406. struct drm_gem_object *obj;
  407. /* only wait if there is actually an old frame to release to
  408. * guarantee forward progress */
  409. if (!overlay->old_vid_bo)
  410. return 0;
  411. ret = intel_overlay_wait_flip(overlay);
  412. if (ret != 0)
  413. return ret;
  414. obj = &overlay->old_vid_bo->base;
  415. i915_gem_object_unpin(obj);
  416. drm_gem_object_unreference(obj);
  417. overlay->old_vid_bo = NULL;
  418. return 0;
  419. }
  420. struct put_image_params {
  421. int format;
  422. short dst_x;
  423. short dst_y;
  424. short dst_w;
  425. short dst_h;
  426. short src_w;
  427. short src_scan_h;
  428. short src_scan_w;
  429. short src_h;
  430. short stride_Y;
  431. short stride_UV;
  432. int offset_Y;
  433. int offset_U;
  434. int offset_V;
  435. };
  436. static int packed_depth_bytes(u32 format)
  437. {
  438. switch (format & I915_OVERLAY_DEPTH_MASK) {
  439. case I915_OVERLAY_YUV422:
  440. return 4;
  441. case I915_OVERLAY_YUV411:
  442. /* return 6; not implemented */
  443. default:
  444. return -EINVAL;
  445. }
  446. }
  447. static int packed_width_bytes(u32 format, short width)
  448. {
  449. switch (format & I915_OVERLAY_DEPTH_MASK) {
  450. case I915_OVERLAY_YUV422:
  451. return width << 1;
  452. default:
  453. return -EINVAL;
  454. }
  455. }
  456. static int uv_hsubsampling(u32 format)
  457. {
  458. switch (format & I915_OVERLAY_DEPTH_MASK) {
  459. case I915_OVERLAY_YUV422:
  460. case I915_OVERLAY_YUV420:
  461. return 2;
  462. case I915_OVERLAY_YUV411:
  463. case I915_OVERLAY_YUV410:
  464. return 4;
  465. default:
  466. return -EINVAL;
  467. }
  468. }
  469. static int uv_vsubsampling(u32 format)
  470. {
  471. switch (format & I915_OVERLAY_DEPTH_MASK) {
  472. case I915_OVERLAY_YUV420:
  473. case I915_OVERLAY_YUV410:
  474. return 2;
  475. case I915_OVERLAY_YUV422:
  476. case I915_OVERLAY_YUV411:
  477. return 1;
  478. default:
  479. return -EINVAL;
  480. }
  481. }
  482. static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
  483. {
  484. u32 mask, shift, ret;
  485. if (IS_I9XX(dev)) {
  486. mask = 0x3f;
  487. shift = 6;
  488. } else {
  489. mask = 0x1f;
  490. shift = 5;
  491. }
  492. ret = ((offset + width + mask) >> shift) - (offset >> shift);
  493. if (IS_I9XX(dev))
  494. ret <<= 1;
  495. ret -=1;
  496. return ret << 2;
  497. }
  498. static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
  499. 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
  500. 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
  501. 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
  502. 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
  503. 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
  504. 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
  505. 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
  506. 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
  507. 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
  508. 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
  509. 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
  510. 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
  511. 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
  512. 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
  513. 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
  514. 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
  515. 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
  516. };
  517. static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
  518. 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
  519. 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
  520. 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
  521. 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
  522. 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
  523. 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
  524. 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
  525. 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
  526. 0x3000, 0x0800, 0x3000
  527. };
  528. static void update_polyphase_filter(struct overlay_registers *regs)
  529. {
  530. memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
  531. memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
  532. }
  533. static bool update_scaling_factors(struct intel_overlay *overlay,
  534. struct overlay_registers *regs,
  535. struct put_image_params *params)
  536. {
  537. /* fixed point with a 12 bit shift */
  538. u32 xscale, yscale, xscale_UV, yscale_UV;
  539. #define FP_SHIFT 12
  540. #define FRACT_MASK 0xfff
  541. bool scale_changed = false;
  542. int uv_hscale = uv_hsubsampling(params->format);
  543. int uv_vscale = uv_vsubsampling(params->format);
  544. if (params->dst_w > 1)
  545. xscale = ((params->src_scan_w - 1) << FP_SHIFT)
  546. /(params->dst_w);
  547. else
  548. xscale = 1 << FP_SHIFT;
  549. if (params->dst_h > 1)
  550. yscale = ((params->src_scan_h - 1) << FP_SHIFT)
  551. /(params->dst_h);
  552. else
  553. yscale = 1 << FP_SHIFT;
  554. /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
  555. xscale_UV = xscale/uv_hscale;
  556. yscale_UV = yscale/uv_vscale;
  557. /* make the Y scale to UV scale ratio an exact multiply */
  558. xscale = xscale_UV * uv_hscale;
  559. yscale = yscale_UV * uv_vscale;
  560. /*} else {
  561. xscale_UV = 0;
  562. yscale_UV = 0;
  563. }*/
  564. if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
  565. scale_changed = true;
  566. overlay->old_xscale = xscale;
  567. overlay->old_yscale = yscale;
  568. regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
  569. ((xscale >> FP_SHIFT) << 16) |
  570. ((xscale & FRACT_MASK) << 3));
  571. regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
  572. ((xscale_UV >> FP_SHIFT) << 16) |
  573. ((xscale_UV & FRACT_MASK) << 3));
  574. regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
  575. ((yscale_UV >> FP_SHIFT) << 0)));
  576. if (scale_changed)
  577. update_polyphase_filter(regs);
  578. return scale_changed;
  579. }
  580. static void update_colorkey(struct intel_overlay *overlay,
  581. struct overlay_registers *regs)
  582. {
  583. u32 key = overlay->color_key;
  584. switch (overlay->crtc->base.fb->bits_per_pixel) {
  585. case 8:
  586. regs->DCLRKV = 0;
  587. regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
  588. break;
  589. case 16:
  590. if (overlay->crtc->base.fb->depth == 15) {
  591. regs->DCLRKV = RGB15_TO_COLORKEY(key);
  592. regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
  593. } else {
  594. regs->DCLRKV = RGB16_TO_COLORKEY(key);
  595. regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
  596. }
  597. break;
  598. case 24:
  599. case 32:
  600. regs->DCLRKV = key;
  601. regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
  602. break;
  603. }
  604. }
  605. static u32 overlay_cmd_reg(struct put_image_params *params)
  606. {
  607. u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
  608. if (params->format & I915_OVERLAY_YUV_PLANAR) {
  609. switch (params->format & I915_OVERLAY_DEPTH_MASK) {
  610. case I915_OVERLAY_YUV422:
  611. cmd |= OCMD_YUV_422_PLANAR;
  612. break;
  613. case I915_OVERLAY_YUV420:
  614. cmd |= OCMD_YUV_420_PLANAR;
  615. break;
  616. case I915_OVERLAY_YUV411:
  617. case I915_OVERLAY_YUV410:
  618. cmd |= OCMD_YUV_410_PLANAR;
  619. break;
  620. }
  621. } else { /* YUV packed */
  622. switch (params->format & I915_OVERLAY_DEPTH_MASK) {
  623. case I915_OVERLAY_YUV422:
  624. cmd |= OCMD_YUV_422_PACKED;
  625. break;
  626. case I915_OVERLAY_YUV411:
  627. cmd |= OCMD_YUV_411_PACKED;
  628. break;
  629. }
  630. switch (params->format & I915_OVERLAY_SWAP_MASK) {
  631. case I915_OVERLAY_NO_SWAP:
  632. break;
  633. case I915_OVERLAY_UV_SWAP:
  634. cmd |= OCMD_UV_SWAP;
  635. break;
  636. case I915_OVERLAY_Y_SWAP:
  637. cmd |= OCMD_Y_SWAP;
  638. break;
  639. case I915_OVERLAY_Y_AND_UV_SWAP:
  640. cmd |= OCMD_Y_AND_UV_SWAP;
  641. break;
  642. }
  643. }
  644. return cmd;
  645. }
  646. int intel_overlay_do_put_image(struct intel_overlay *overlay,
  647. struct drm_gem_object *new_bo,
  648. struct put_image_params *params)
  649. {
  650. int ret, tmp_width;
  651. struct overlay_registers *regs;
  652. bool scale_changed = false;
  653. struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
  654. struct drm_device *dev = overlay->dev;
  655. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  656. BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
  657. BUG_ON(!overlay);
  658. ret = intel_overlay_release_old_vid(overlay);
  659. if (ret != 0)
  660. return ret;
  661. ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
  662. if (ret != 0)
  663. return ret;
  664. ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
  665. if (ret != 0)
  666. goto out_unpin;
  667. if (!overlay->active) {
  668. regs = intel_overlay_map_regs_atomic(overlay);
  669. if (!regs) {
  670. ret = -ENOMEM;
  671. goto out_unpin;
  672. }
  673. regs->OCONFIG = OCONF_CC_OUT_8BIT;
  674. if (IS_I965GM(overlay->dev))
  675. regs->OCONFIG |= OCONF_CSC_MODE_BT709;
  676. regs->OCONFIG |= overlay->crtc->pipe == 0 ?
  677. OCONF_PIPE_A : OCONF_PIPE_B;
  678. intel_overlay_unmap_regs_atomic(overlay);
  679. ret = intel_overlay_on(overlay);
  680. if (ret != 0)
  681. goto out_unpin;
  682. }
  683. regs = intel_overlay_map_regs_atomic(overlay);
  684. if (!regs) {
  685. ret = -ENOMEM;
  686. goto out_unpin;
  687. }
  688. regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
  689. regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
  690. if (params->format & I915_OVERLAY_YUV_PACKED)
  691. tmp_width = packed_width_bytes(params->format, params->src_w);
  692. else
  693. tmp_width = params->src_w;
  694. regs->SWIDTH = params->src_w;
  695. regs->SWIDTHSW = calc_swidthsw(overlay->dev,
  696. params->offset_Y, tmp_width);
  697. regs->SHEIGHT = params->src_h;
  698. regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
  699. regs->OSTRIDE = params->stride_Y;
  700. if (params->format & I915_OVERLAY_YUV_PLANAR) {
  701. int uv_hscale = uv_hsubsampling(params->format);
  702. int uv_vscale = uv_vsubsampling(params->format);
  703. u32 tmp_U, tmp_V;
  704. regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
  705. tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
  706. params->src_w/uv_hscale);
  707. tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
  708. params->src_w/uv_hscale);
  709. regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
  710. regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
  711. regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
  712. regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
  713. regs->OSTRIDE |= params->stride_UV << 16;
  714. }
  715. scale_changed = update_scaling_factors(overlay, regs, params);
  716. update_colorkey(overlay, regs);
  717. regs->OCMD = overlay_cmd_reg(params);
  718. intel_overlay_unmap_regs_atomic(overlay);
  719. intel_overlay_continue(overlay, scale_changed);
  720. overlay->old_vid_bo = overlay->vid_bo;
  721. overlay->vid_bo = to_intel_bo(new_bo);
  722. return 0;
  723. out_unpin:
  724. i915_gem_object_unpin(new_bo);
  725. return ret;
  726. }
  727. int intel_overlay_switch_off(struct intel_overlay *overlay)
  728. {
  729. int ret;
  730. struct overlay_registers *regs;
  731. struct drm_device *dev = overlay->dev;
  732. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  733. BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
  734. if (overlay->hw_wedged) {
  735. ret = intel_overlay_recover_from_interrupt(overlay, 1);
  736. if (ret != 0)
  737. return ret;
  738. }
  739. if (!overlay->active)
  740. return 0;
  741. ret = intel_overlay_release_old_vid(overlay);
  742. if (ret != 0)
  743. return ret;
  744. regs = intel_overlay_map_regs_atomic(overlay);
  745. regs->OCMD = 0;
  746. intel_overlay_unmap_regs_atomic(overlay);
  747. ret = intel_overlay_off(overlay);
  748. if (ret != 0)
  749. return ret;
  750. intel_overlay_off_tail(overlay);
  751. return 0;
  752. }
  753. static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
  754. struct intel_crtc *crtc)
  755. {
  756. drm_i915_private_t *dev_priv = overlay->dev->dev_private;
  757. u32 pipeconf;
  758. int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
  759. if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
  760. return -EINVAL;
  761. pipeconf = I915_READ(pipeconf_reg);
  762. /* can't use the overlay with double wide pipe */
  763. if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
  764. return -EINVAL;
  765. return 0;
  766. }
  767. static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
  768. {
  769. struct drm_device *dev = overlay->dev;
  770. drm_i915_private_t *dev_priv = dev->dev_private;
  771. u32 ratio;
  772. u32 pfit_control = I915_READ(PFIT_CONTROL);
  773. /* XXX: This is not the same logic as in the xorg driver, but more in
  774. * line with the intel documentation for the i965 */
  775. if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
  776. ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
  777. } else { /* on i965 use the PGM reg to read out the autoscaler values */
  778. ratio = I915_READ(PFIT_PGM_RATIOS);
  779. if (IS_I965G(dev))
  780. ratio >>= PFIT_VERT_SCALE_SHIFT_965;
  781. else
  782. ratio >>= PFIT_VERT_SCALE_SHIFT;
  783. }
  784. overlay->pfit_vscale_ratio = ratio;
  785. }
  786. static int check_overlay_dst(struct intel_overlay *overlay,
  787. struct drm_intel_overlay_put_image *rec)
  788. {
  789. struct drm_display_mode *mode = &overlay->crtc->base.mode;
  790. if (rec->dst_x < mode->crtc_hdisplay &&
  791. rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
  792. rec->dst_y < mode->crtc_vdisplay &&
  793. rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
  794. return 0;
  795. else
  796. return -EINVAL;
  797. }
  798. static int check_overlay_scaling(struct put_image_params *rec)
  799. {
  800. u32 tmp;
  801. /* downscaling limit is 8.0 */
  802. tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
  803. if (tmp > 7)
  804. return -EINVAL;
  805. tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
  806. if (tmp > 7)
  807. return -EINVAL;
  808. return 0;
  809. }
  810. static int check_overlay_src(struct drm_device *dev,
  811. struct drm_intel_overlay_put_image *rec,
  812. struct drm_gem_object *new_bo)
  813. {
  814. u32 stride_mask;
  815. int depth;
  816. int uv_hscale = uv_hsubsampling(rec->flags);
  817. int uv_vscale = uv_vsubsampling(rec->flags);
  818. size_t tmp;
  819. /* check src dimensions */
  820. if (IS_845G(dev) || IS_I830(dev)) {
  821. if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
  822. rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
  823. return -EINVAL;
  824. } else {
  825. if (rec->src_height > IMAGE_MAX_HEIGHT ||
  826. rec->src_width > IMAGE_MAX_WIDTH)
  827. return -EINVAL;
  828. }
  829. /* better safe than sorry, use 4 as the maximal subsampling ratio */
  830. if (rec->src_height < N_VERT_Y_TAPS*4 ||
  831. rec->src_width < N_HORIZ_Y_TAPS*4)
  832. return -EINVAL;
  833. /* check alignment constraints */
  834. switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
  835. case I915_OVERLAY_RGB:
  836. /* not implemented */
  837. return -EINVAL;
  838. case I915_OVERLAY_YUV_PACKED:
  839. depth = packed_depth_bytes(rec->flags);
  840. if (uv_vscale != 1)
  841. return -EINVAL;
  842. if (depth < 0)
  843. return depth;
  844. /* ignore UV planes */
  845. rec->stride_UV = 0;
  846. rec->offset_U = 0;
  847. rec->offset_V = 0;
  848. /* check pixel alignment */
  849. if (rec->offset_Y % depth)
  850. return -EINVAL;
  851. break;
  852. case I915_OVERLAY_YUV_PLANAR:
  853. if (uv_vscale < 0 || uv_hscale < 0)
  854. return -EINVAL;
  855. /* no offset restrictions for planar formats */
  856. break;
  857. default:
  858. return -EINVAL;
  859. }
  860. if (rec->src_width % uv_hscale)
  861. return -EINVAL;
  862. /* stride checking */
  863. if (IS_I830(dev) || IS_845G(dev))
  864. stride_mask = 255;
  865. else
  866. stride_mask = 63;
  867. if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
  868. return -EINVAL;
  869. if (IS_I965G(dev) && rec->stride_Y < 512)
  870. return -EINVAL;
  871. tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
  872. 4 : 8;
  873. if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
  874. return -EINVAL;
  875. /* check buffer dimensions */
  876. switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
  877. case I915_OVERLAY_RGB:
  878. case I915_OVERLAY_YUV_PACKED:
  879. /* always 4 Y values per depth pixels */
  880. if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
  881. return -EINVAL;
  882. tmp = rec->stride_Y*rec->src_height;
  883. if (rec->offset_Y + tmp > new_bo->size)
  884. return -EINVAL;
  885. break;
  886. case I915_OVERLAY_YUV_PLANAR:
  887. if (rec->src_width > rec->stride_Y)
  888. return -EINVAL;
  889. if (rec->src_width/uv_hscale > rec->stride_UV)
  890. return -EINVAL;
  891. tmp = rec->stride_Y*rec->src_height;
  892. if (rec->offset_Y + tmp > new_bo->size)
  893. return -EINVAL;
  894. tmp = rec->stride_UV*rec->src_height;
  895. tmp /= uv_vscale;
  896. if (rec->offset_U + tmp > new_bo->size ||
  897. rec->offset_V + tmp > new_bo->size)
  898. return -EINVAL;
  899. break;
  900. }
  901. return 0;
  902. }
  903. int intel_overlay_put_image(struct drm_device *dev, void *data,
  904. struct drm_file *file_priv)
  905. {
  906. struct drm_intel_overlay_put_image *put_image_rec = data;
  907. drm_i915_private_t *dev_priv = dev->dev_private;
  908. struct intel_overlay *overlay;
  909. struct drm_mode_object *drmmode_obj;
  910. struct intel_crtc *crtc;
  911. struct drm_gem_object *new_bo;
  912. struct put_image_params *params;
  913. int ret;
  914. if (!dev_priv) {
  915. DRM_ERROR("called with no initialization\n");
  916. return -EINVAL;
  917. }
  918. overlay = dev_priv->overlay;
  919. if (!overlay) {
  920. DRM_DEBUG("userspace bug: no overlay\n");
  921. return -ENODEV;
  922. }
  923. if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
  924. mutex_lock(&dev->mode_config.mutex);
  925. mutex_lock(&dev->struct_mutex);
  926. ret = intel_overlay_switch_off(overlay);
  927. mutex_unlock(&dev->struct_mutex);
  928. mutex_unlock(&dev->mode_config.mutex);
  929. return ret;
  930. }
  931. params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
  932. if (!params)
  933. return -ENOMEM;
  934. drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
  935. DRM_MODE_OBJECT_CRTC);
  936. if (!drmmode_obj) {
  937. ret = -ENOENT;
  938. goto out_free;
  939. }
  940. crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
  941. new_bo = drm_gem_object_lookup(dev, file_priv,
  942. put_image_rec->bo_handle);
  943. if (!new_bo) {
  944. ret = -ENOENT;
  945. goto out_free;
  946. }
  947. mutex_lock(&dev->mode_config.mutex);
  948. mutex_lock(&dev->struct_mutex);
  949. if (overlay->hw_wedged) {
  950. ret = intel_overlay_recover_from_interrupt(overlay, 1);
  951. if (ret != 0)
  952. goto out_unlock;
  953. }
  954. if (overlay->crtc != crtc) {
  955. struct drm_display_mode *mode = &crtc->base.mode;
  956. ret = intel_overlay_switch_off(overlay);
  957. if (ret != 0)
  958. goto out_unlock;
  959. ret = check_overlay_possible_on_crtc(overlay, crtc);
  960. if (ret != 0)
  961. goto out_unlock;
  962. overlay->crtc = crtc;
  963. crtc->overlay = overlay;
  964. if (intel_panel_fitter_pipe(dev) == crtc->pipe
  965. /* and line to wide, i.e. one-line-mode */
  966. && mode->hdisplay > 1024) {
  967. overlay->pfit_active = 1;
  968. update_pfit_vscale_ratio(overlay);
  969. } else
  970. overlay->pfit_active = 0;
  971. }
  972. ret = check_overlay_dst(overlay, put_image_rec);
  973. if (ret != 0)
  974. goto out_unlock;
  975. if (overlay->pfit_active) {
  976. params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
  977. overlay->pfit_vscale_ratio);
  978. /* shifting right rounds downwards, so add 1 */
  979. params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
  980. overlay->pfit_vscale_ratio) + 1;
  981. } else {
  982. params->dst_y = put_image_rec->dst_y;
  983. params->dst_h = put_image_rec->dst_height;
  984. }
  985. params->dst_x = put_image_rec->dst_x;
  986. params->dst_w = put_image_rec->dst_width;
  987. params->src_w = put_image_rec->src_width;
  988. params->src_h = put_image_rec->src_height;
  989. params->src_scan_w = put_image_rec->src_scan_width;
  990. params->src_scan_h = put_image_rec->src_scan_height;
  991. if (params->src_scan_h > params->src_h ||
  992. params->src_scan_w > params->src_w) {
  993. ret = -EINVAL;
  994. goto out_unlock;
  995. }
  996. ret = check_overlay_src(dev, put_image_rec, new_bo);
  997. if (ret != 0)
  998. goto out_unlock;
  999. params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
  1000. params->stride_Y = put_image_rec->stride_Y;
  1001. params->stride_UV = put_image_rec->stride_UV;
  1002. params->offset_Y = put_image_rec->offset_Y;
  1003. params->offset_U = put_image_rec->offset_U;
  1004. params->offset_V = put_image_rec->offset_V;
  1005. /* Check scaling after src size to prevent a divide-by-zero. */
  1006. ret = check_overlay_scaling(params);
  1007. if (ret != 0)
  1008. goto out_unlock;
  1009. ret = intel_overlay_do_put_image(overlay, new_bo, params);
  1010. if (ret != 0)
  1011. goto out_unlock;
  1012. mutex_unlock(&dev->struct_mutex);
  1013. mutex_unlock(&dev->mode_config.mutex);
  1014. kfree(params);
  1015. return 0;
  1016. out_unlock:
  1017. mutex_unlock(&dev->struct_mutex);
  1018. mutex_unlock(&dev->mode_config.mutex);
  1019. drm_gem_object_unreference_unlocked(new_bo);
  1020. out_free:
  1021. kfree(params);
  1022. return ret;
  1023. }
  1024. static void update_reg_attrs(struct intel_overlay *overlay,
  1025. struct overlay_registers *regs)
  1026. {
  1027. regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
  1028. regs->OCLRC1 = overlay->saturation;
  1029. }
  1030. static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
  1031. {
  1032. int i;
  1033. if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
  1034. return false;
  1035. for (i = 0; i < 3; i++) {
  1036. if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
  1037. return false;
  1038. }
  1039. return true;
  1040. }
  1041. static bool check_gamma5_errata(u32 gamma5)
  1042. {
  1043. int i;
  1044. for (i = 0; i < 3; i++) {
  1045. if (((gamma5 >> i*8) & 0xff) == 0x80)
  1046. return false;
  1047. }
  1048. return true;
  1049. }
  1050. static int check_gamma(struct drm_intel_overlay_attrs *attrs)
  1051. {
  1052. if (!check_gamma_bounds(0, attrs->gamma0) ||
  1053. !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
  1054. !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
  1055. !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
  1056. !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
  1057. !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
  1058. !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
  1059. return -EINVAL;
  1060. if (!check_gamma5_errata(attrs->gamma5))
  1061. return -EINVAL;
  1062. return 0;
  1063. }
  1064. int intel_overlay_attrs(struct drm_device *dev, void *data,
  1065. struct drm_file *file_priv)
  1066. {
  1067. struct drm_intel_overlay_attrs *attrs = data;
  1068. drm_i915_private_t *dev_priv = dev->dev_private;
  1069. struct intel_overlay *overlay;
  1070. struct overlay_registers *regs;
  1071. int ret;
  1072. if (!dev_priv) {
  1073. DRM_ERROR("called with no initialization\n");
  1074. return -EINVAL;
  1075. }
  1076. overlay = dev_priv->overlay;
  1077. if (!overlay) {
  1078. DRM_DEBUG("userspace bug: no overlay\n");
  1079. return -ENODEV;
  1080. }
  1081. mutex_lock(&dev->mode_config.mutex);
  1082. mutex_lock(&dev->struct_mutex);
  1083. if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
  1084. attrs->color_key = overlay->color_key;
  1085. attrs->brightness = overlay->brightness;
  1086. attrs->contrast = overlay->contrast;
  1087. attrs->saturation = overlay->saturation;
  1088. if (IS_I9XX(dev)) {
  1089. attrs->gamma0 = I915_READ(OGAMC0);
  1090. attrs->gamma1 = I915_READ(OGAMC1);
  1091. attrs->gamma2 = I915_READ(OGAMC2);
  1092. attrs->gamma3 = I915_READ(OGAMC3);
  1093. attrs->gamma4 = I915_READ(OGAMC4);
  1094. attrs->gamma5 = I915_READ(OGAMC5);
  1095. }
  1096. ret = 0;
  1097. } else {
  1098. overlay->color_key = attrs->color_key;
  1099. if (attrs->brightness >= -128 && attrs->brightness <= 127) {
  1100. overlay->brightness = attrs->brightness;
  1101. } else {
  1102. ret = -EINVAL;
  1103. goto out_unlock;
  1104. }
  1105. if (attrs->contrast <= 255) {
  1106. overlay->contrast = attrs->contrast;
  1107. } else {
  1108. ret = -EINVAL;
  1109. goto out_unlock;
  1110. }
  1111. if (attrs->saturation <= 1023) {
  1112. overlay->saturation = attrs->saturation;
  1113. } else {
  1114. ret = -EINVAL;
  1115. goto out_unlock;
  1116. }
  1117. regs = intel_overlay_map_regs_atomic(overlay);
  1118. if (!regs) {
  1119. ret = -ENOMEM;
  1120. goto out_unlock;
  1121. }
  1122. update_reg_attrs(overlay, regs);
  1123. intel_overlay_unmap_regs_atomic(overlay);
  1124. if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
  1125. if (!IS_I9XX(dev)) {
  1126. ret = -EINVAL;
  1127. goto out_unlock;
  1128. }
  1129. if (overlay->active) {
  1130. ret = -EBUSY;
  1131. goto out_unlock;
  1132. }
  1133. ret = check_gamma(attrs);
  1134. if (ret != 0)
  1135. goto out_unlock;
  1136. I915_WRITE(OGAMC0, attrs->gamma0);
  1137. I915_WRITE(OGAMC1, attrs->gamma1);
  1138. I915_WRITE(OGAMC2, attrs->gamma2);
  1139. I915_WRITE(OGAMC3, attrs->gamma3);
  1140. I915_WRITE(OGAMC4, attrs->gamma4);
  1141. I915_WRITE(OGAMC5, attrs->gamma5);
  1142. }
  1143. ret = 0;
  1144. }
  1145. out_unlock:
  1146. mutex_unlock(&dev->struct_mutex);
  1147. mutex_unlock(&dev->mode_config.mutex);
  1148. return ret;
  1149. }
  1150. void intel_setup_overlay(struct drm_device *dev)
  1151. {
  1152. drm_i915_private_t *dev_priv = dev->dev_private;
  1153. struct intel_overlay *overlay;
  1154. struct drm_gem_object *reg_bo;
  1155. struct overlay_registers *regs;
  1156. int ret;
  1157. if (!HAS_OVERLAY(dev))
  1158. return;
  1159. overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
  1160. if (!overlay)
  1161. return;
  1162. overlay->dev = dev;
  1163. reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
  1164. if (!reg_bo)
  1165. goto out_free;
  1166. overlay->reg_bo = to_intel_bo(reg_bo);
  1167. if (OVERLAY_NEEDS_PHYSICAL(dev)) {
  1168. ret = i915_gem_attach_phys_object(dev, reg_bo,
  1169. I915_GEM_PHYS_OVERLAY_REGS,
  1170. 0);
  1171. if (ret) {
  1172. DRM_ERROR("failed to attach phys overlay regs\n");
  1173. goto out_free_bo;
  1174. }
  1175. overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
  1176. } else {
  1177. ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
  1178. if (ret) {
  1179. DRM_ERROR("failed to pin overlay register bo\n");
  1180. goto out_free_bo;
  1181. }
  1182. overlay->flip_addr = overlay->reg_bo->gtt_offset;
  1183. ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
  1184. if (ret) {
  1185. DRM_ERROR("failed to move overlay register bo into the GTT\n");
  1186. goto out_unpin_bo;
  1187. }
  1188. }
  1189. /* init all values */
  1190. overlay->color_key = 0x0101fe;
  1191. overlay->brightness = -19;
  1192. overlay->contrast = 75;
  1193. overlay->saturation = 146;
  1194. regs = intel_overlay_map_regs_atomic(overlay);
  1195. if (!regs)
  1196. goto out_free_bo;
  1197. memset(regs, 0, sizeof(struct overlay_registers));
  1198. update_polyphase_filter(regs);
  1199. update_reg_attrs(overlay, regs);
  1200. intel_overlay_unmap_regs_atomic(overlay);
  1201. dev_priv->overlay = overlay;
  1202. DRM_INFO("initialized overlay support\n");
  1203. return;
  1204. out_unpin_bo:
  1205. i915_gem_object_unpin(reg_bo);
  1206. out_free_bo:
  1207. drm_gem_object_unreference(reg_bo);
  1208. out_free:
  1209. kfree(overlay);
  1210. return;
  1211. }
  1212. void intel_cleanup_overlay(struct drm_device *dev)
  1213. {
  1214. drm_i915_private_t *dev_priv = dev->dev_private;
  1215. if (dev_priv->overlay) {
  1216. /* The bo's should be free'd by the generic code already.
  1217. * Furthermore modesetting teardown happens beforehand so the
  1218. * hardware should be off already */
  1219. BUG_ON(dev_priv->overlay->active);
  1220. kfree(dev_priv->overlay);
  1221. }
  1222. }
  1223. struct intel_overlay_error_state {
  1224. struct overlay_registers regs;
  1225. unsigned long base;
  1226. u32 dovsta;
  1227. u32 isr;
  1228. };
  1229. struct intel_overlay_error_state *
  1230. intel_overlay_capture_error_state(struct drm_device *dev)
  1231. {
  1232. drm_i915_private_t *dev_priv = dev->dev_private;
  1233. struct intel_overlay *overlay = dev_priv->overlay;
  1234. struct intel_overlay_error_state *error;
  1235. struct overlay_registers __iomem *regs;
  1236. if (!overlay || !overlay->active)
  1237. return NULL;
  1238. error = kmalloc(sizeof(*error), GFP_ATOMIC);
  1239. if (error == NULL)
  1240. return NULL;
  1241. error->dovsta = I915_READ(DOVSTA);
  1242. error->isr = I915_READ(ISR);
  1243. if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
  1244. error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
  1245. else
  1246. error->base = (long) overlay->reg_bo->gtt_offset;
  1247. regs = intel_overlay_map_regs_atomic(overlay);
  1248. if (!regs)
  1249. goto err;
  1250. memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
  1251. intel_overlay_unmap_regs_atomic(overlay);
  1252. return error;
  1253. err:
  1254. kfree(error);
  1255. return NULL;
  1256. }
  1257. void
  1258. intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
  1259. {
  1260. seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
  1261. error->dovsta, error->isr);
  1262. seq_printf(m, " Register file at 0x%08lx:\n",
  1263. error->base);
  1264. #define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
  1265. P(OBUF_0Y);
  1266. P(OBUF_1Y);
  1267. P(OBUF_0U);
  1268. P(OBUF_0V);
  1269. P(OBUF_1U);
  1270. P(OBUF_1V);
  1271. P(OSTRIDE);
  1272. P(YRGB_VPH);
  1273. P(UV_VPH);
  1274. P(HORZ_PH);
  1275. P(INIT_PHS);
  1276. P(DWINPOS);
  1277. P(DWINSZ);
  1278. P(SWIDTH);
  1279. P(SWIDTHSW);
  1280. P(SHEIGHT);
  1281. P(YRGBSCALE);
  1282. P(UVSCALE);
  1283. P(OCLRC0);
  1284. P(OCLRC1);
  1285. P(DCLRKV);
  1286. P(DCLRKM);
  1287. P(SCLRKVH);
  1288. P(SCLRKVL);
  1289. P(SCLRKEN);
  1290. P(OCONFIG);
  1291. P(OCMD);
  1292. P(OSTART_0Y);
  1293. P(OSTART_1Y);
  1294. P(OSTART_0U);
  1295. P(OSTART_0V);
  1296. P(OSTART_1U);
  1297. P(OSTART_1V);
  1298. P(OTILEOFF_0Y);
  1299. P(OTILEOFF_1Y);
  1300. P(OTILEOFF_0U);
  1301. P(OTILEOFF_0V);
  1302. P(OTILEOFF_1U);
  1303. P(OTILEOFF_1V);
  1304. P(FASTHSCALE);
  1305. P(UVSCALEV);
  1306. #undef P
  1307. }