i915_drv.h 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484
  1. /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
  2. */
  3. /*
  4. *
  5. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the
  10. * "Software"), to deal in the Software without restriction, including
  11. * without limitation the rights to use, copy, modify, merge, publish,
  12. * distribute, sub license, and/or sell copies of the Software, and to
  13. * permit persons to whom the Software is furnished to do so, subject to
  14. * the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the
  17. * next paragraph) shall be included in all copies or substantial portions
  18. * of the Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27. *
  28. */
  29. #ifndef _I915_DRV_H_
  30. #define _I915_DRV_H_
  31. #include <uapi/drm/i915_drm.h>
  32. #include "i915_reg.h"
  33. #include "intel_bios.h"
  34. #include "intel_ringbuffer.h"
  35. #include <linux/io-mapping.h>
  36. #include <linux/i2c.h>
  37. #include <linux/i2c-algo-bit.h>
  38. #include <drm/intel-gtt.h>
  39. #include <linux/backlight.h>
  40. #include <linux/intel-iommu.h>
  41. #include <linux/kref.h>
  42. #include <linux/pm_qos.h>
  43. /* General customization:
  44. */
  45. #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
  46. #define DRIVER_NAME "i915"
  47. #define DRIVER_DESC "Intel Graphics"
  48. #define DRIVER_DATE "20080730"
  49. enum pipe {
  50. PIPE_A = 0,
  51. PIPE_B,
  52. PIPE_C,
  53. I915_MAX_PIPES
  54. };
  55. #define pipe_name(p) ((p) + 'A')
  56. enum transcoder {
  57. TRANSCODER_A = 0,
  58. TRANSCODER_B,
  59. TRANSCODER_C,
  60. TRANSCODER_EDP = 0xF,
  61. };
  62. #define transcoder_name(t) ((t) + 'A')
  63. enum plane {
  64. PLANE_A = 0,
  65. PLANE_B,
  66. PLANE_C,
  67. };
  68. #define plane_name(p) ((p) + 'A')
  69. #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
  70. enum port {
  71. PORT_A = 0,
  72. PORT_B,
  73. PORT_C,
  74. PORT_D,
  75. PORT_E,
  76. I915_MAX_PORTS
  77. };
  78. #define port_name(p) ((p) + 'A')
  79. enum intel_display_power_domain {
  80. POWER_DOMAIN_PIPE_A,
  81. POWER_DOMAIN_PIPE_B,
  82. POWER_DOMAIN_PIPE_C,
  83. POWER_DOMAIN_PIPE_A_PANEL_FITTER,
  84. POWER_DOMAIN_PIPE_B_PANEL_FITTER,
  85. POWER_DOMAIN_PIPE_C_PANEL_FITTER,
  86. POWER_DOMAIN_TRANSCODER_A,
  87. POWER_DOMAIN_TRANSCODER_B,
  88. POWER_DOMAIN_TRANSCODER_C,
  89. POWER_DOMAIN_TRANSCODER_EDP,
  90. POWER_DOMAIN_VGA,
  91. POWER_DOMAIN_INIT,
  92. POWER_DOMAIN_NUM,
  93. };
  94. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  95. #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
  96. #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
  97. ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
  98. #define POWER_DOMAIN_TRANSCODER(tran) \
  99. ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
  100. (tran) + POWER_DOMAIN_TRANSCODER_A)
  101. #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
  102. BIT(POWER_DOMAIN_PIPE_A) | \
  103. BIT(POWER_DOMAIN_TRANSCODER_EDP))
  104. enum hpd_pin {
  105. HPD_NONE = 0,
  106. HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
  107. HPD_TV = HPD_NONE, /* TV is known to be unreliable */
  108. HPD_CRT,
  109. HPD_SDVO_B,
  110. HPD_SDVO_C,
  111. HPD_PORT_B,
  112. HPD_PORT_C,
  113. HPD_PORT_D,
  114. HPD_NUM_PINS
  115. };
  116. #define I915_GEM_GPU_DOMAINS \
  117. (I915_GEM_DOMAIN_RENDER | \
  118. I915_GEM_DOMAIN_SAMPLER | \
  119. I915_GEM_DOMAIN_COMMAND | \
  120. I915_GEM_DOMAIN_INSTRUCTION | \
  121. I915_GEM_DOMAIN_VERTEX)
  122. #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
  123. #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
  124. list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
  125. if ((intel_encoder)->base.crtc == (__crtc))
  126. struct drm_i915_private;
  127. enum intel_dpll_id {
  128. DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
  129. /* real shared dpll ids must be >= 0 */
  130. DPLL_ID_PCH_PLL_A,
  131. DPLL_ID_PCH_PLL_B,
  132. };
  133. #define I915_NUM_PLLS 2
  134. struct intel_dpll_hw_state {
  135. uint32_t dpll;
  136. uint32_t dpll_md;
  137. uint32_t fp0;
  138. uint32_t fp1;
  139. };
  140. struct intel_shared_dpll {
  141. int refcount; /* count of number of CRTCs sharing this PLL */
  142. int active; /* count of number of active CRTCs (i.e. DPMS on) */
  143. bool on; /* is the PLL actually active? Disabled during modeset */
  144. const char *name;
  145. /* should match the index in the dev_priv->shared_dplls array */
  146. enum intel_dpll_id id;
  147. struct intel_dpll_hw_state hw_state;
  148. void (*mode_set)(struct drm_i915_private *dev_priv,
  149. struct intel_shared_dpll *pll);
  150. void (*enable)(struct drm_i915_private *dev_priv,
  151. struct intel_shared_dpll *pll);
  152. void (*disable)(struct drm_i915_private *dev_priv,
  153. struct intel_shared_dpll *pll);
  154. bool (*get_hw_state)(struct drm_i915_private *dev_priv,
  155. struct intel_shared_dpll *pll,
  156. struct intel_dpll_hw_state *hw_state);
  157. };
  158. /* Used by dp and fdi links */
  159. struct intel_link_m_n {
  160. uint32_t tu;
  161. uint32_t gmch_m;
  162. uint32_t gmch_n;
  163. uint32_t link_m;
  164. uint32_t link_n;
  165. };
  166. void intel_link_compute_m_n(int bpp, int nlanes,
  167. int pixel_clock, int link_clock,
  168. struct intel_link_m_n *m_n);
  169. struct intel_ddi_plls {
  170. int spll_refcount;
  171. int wrpll1_refcount;
  172. int wrpll2_refcount;
  173. };
  174. /* Interface history:
  175. *
  176. * 1.1: Original.
  177. * 1.2: Add Power Management
  178. * 1.3: Add vblank support
  179. * 1.4: Fix cmdbuffer path, add heap destroy
  180. * 1.5: Add vblank pipe configuration
  181. * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
  182. * - Support vertical blank on secondary display pipe
  183. */
  184. #define DRIVER_MAJOR 1
  185. #define DRIVER_MINOR 6
  186. #define DRIVER_PATCHLEVEL 0
  187. #define WATCH_LISTS 0
  188. #define WATCH_GTT 0
  189. #define I915_GEM_PHYS_CURSOR_0 1
  190. #define I915_GEM_PHYS_CURSOR_1 2
  191. #define I915_GEM_PHYS_OVERLAY_REGS 3
  192. #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
  193. struct drm_i915_gem_phys_object {
  194. int id;
  195. struct page **page_list;
  196. drm_dma_handle_t *handle;
  197. struct drm_i915_gem_object *cur_obj;
  198. };
  199. struct opregion_header;
  200. struct opregion_acpi;
  201. struct opregion_swsci;
  202. struct opregion_asle;
  203. struct intel_opregion {
  204. struct opregion_header __iomem *header;
  205. struct opregion_acpi __iomem *acpi;
  206. struct opregion_swsci __iomem *swsci;
  207. u32 swsci_gbda_sub_functions;
  208. u32 swsci_sbcb_sub_functions;
  209. struct opregion_asle __iomem *asle;
  210. void __iomem *vbt;
  211. u32 __iomem *lid_state;
  212. };
  213. #define OPREGION_SIZE (8*1024)
  214. struct intel_overlay;
  215. struct intel_overlay_error_state;
  216. struct drm_i915_master_private {
  217. drm_local_map_t *sarea;
  218. struct _drm_i915_sarea *sarea_priv;
  219. };
  220. #define I915_FENCE_REG_NONE -1
  221. #define I915_MAX_NUM_FENCES 32
  222. /* 32 fences + sign bit for FENCE_REG_NONE */
  223. #define I915_MAX_NUM_FENCE_BITS 6
  224. struct drm_i915_fence_reg {
  225. struct list_head lru_list;
  226. struct drm_i915_gem_object *obj;
  227. int pin_count;
  228. };
  229. struct sdvo_device_mapping {
  230. u8 initialized;
  231. u8 dvo_port;
  232. u8 slave_addr;
  233. u8 dvo_wiring;
  234. u8 i2c_pin;
  235. u8 ddc_pin;
  236. };
  237. struct intel_display_error_state;
  238. struct drm_i915_error_state {
  239. struct kref ref;
  240. u32 eir;
  241. u32 pgtbl_er;
  242. u32 ier;
  243. u32 ccid;
  244. u32 derrmr;
  245. u32 forcewake;
  246. bool waiting[I915_NUM_RINGS];
  247. u32 pipestat[I915_MAX_PIPES];
  248. u32 tail[I915_NUM_RINGS];
  249. u32 head[I915_NUM_RINGS];
  250. u32 ctl[I915_NUM_RINGS];
  251. u32 ipeir[I915_NUM_RINGS];
  252. u32 ipehr[I915_NUM_RINGS];
  253. u32 instdone[I915_NUM_RINGS];
  254. u32 acthd[I915_NUM_RINGS];
  255. u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
  256. u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
  257. u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
  258. /* our own tracking of ring head and tail */
  259. u32 cpu_ring_head[I915_NUM_RINGS];
  260. u32 cpu_ring_tail[I915_NUM_RINGS];
  261. u32 error; /* gen6+ */
  262. u32 err_int; /* gen7 */
  263. u32 bbstate[I915_NUM_RINGS];
  264. u32 instpm[I915_NUM_RINGS];
  265. u32 instps[I915_NUM_RINGS];
  266. u32 extra_instdone[I915_NUM_INSTDONE_REG];
  267. u32 seqno[I915_NUM_RINGS];
  268. u64 bbaddr;
  269. u32 fault_reg[I915_NUM_RINGS];
  270. u32 done_reg;
  271. u32 faddr[I915_NUM_RINGS];
  272. u64 fence[I915_MAX_NUM_FENCES];
  273. struct timeval time;
  274. struct drm_i915_error_ring {
  275. struct drm_i915_error_object {
  276. int page_count;
  277. u32 gtt_offset;
  278. u32 *pages[0];
  279. } *ringbuffer, *batchbuffer, *ctx;
  280. struct drm_i915_error_request {
  281. long jiffies;
  282. u32 seqno;
  283. u32 tail;
  284. } *requests;
  285. int num_requests;
  286. } ring[I915_NUM_RINGS];
  287. struct drm_i915_error_buffer {
  288. u32 size;
  289. u32 name;
  290. u32 rseqno, wseqno;
  291. u32 gtt_offset;
  292. u32 read_domains;
  293. u32 write_domain;
  294. s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
  295. s32 pinned:2;
  296. u32 tiling:2;
  297. u32 dirty:1;
  298. u32 purgeable:1;
  299. s32 ring:4;
  300. u32 cache_level:3;
  301. } **active_bo, **pinned_bo;
  302. u32 *active_bo_count, *pinned_bo_count;
  303. struct intel_overlay_error_state *overlay;
  304. struct intel_display_error_state *display;
  305. int hangcheck_score[I915_NUM_RINGS];
  306. enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
  307. };
  308. struct intel_crtc_config;
  309. struct intel_crtc;
  310. struct intel_limit;
  311. struct dpll;
  312. struct drm_i915_display_funcs {
  313. bool (*fbc_enabled)(struct drm_device *dev);
  314. void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
  315. void (*disable_fbc)(struct drm_device *dev);
  316. int (*get_display_clock_speed)(struct drm_device *dev);
  317. int (*get_fifo_size)(struct drm_device *dev, int plane);
  318. /**
  319. * find_dpll() - Find the best values for the PLL
  320. * @limit: limits for the PLL
  321. * @crtc: current CRTC
  322. * @target: target frequency in kHz
  323. * @refclk: reference clock frequency in kHz
  324. * @match_clock: if provided, @best_clock P divider must
  325. * match the P divider from @match_clock
  326. * used for LVDS downclocking
  327. * @best_clock: best PLL values found
  328. *
  329. * Returns true on success, false on failure.
  330. */
  331. bool (*find_dpll)(const struct intel_limit *limit,
  332. struct drm_crtc *crtc,
  333. int target, int refclk,
  334. struct dpll *match_clock,
  335. struct dpll *best_clock);
  336. void (*update_wm)(struct drm_crtc *crtc);
  337. void (*update_sprite_wm)(struct drm_plane *plane,
  338. struct drm_crtc *crtc,
  339. uint32_t sprite_width, int pixel_size,
  340. bool enable, bool scaled);
  341. void (*modeset_global_resources)(struct drm_device *dev);
  342. /* Returns the active state of the crtc, and if the crtc is active,
  343. * fills out the pipe-config with the hw state. */
  344. bool (*get_pipe_config)(struct intel_crtc *,
  345. struct intel_crtc_config *);
  346. int (*crtc_mode_set)(struct drm_crtc *crtc,
  347. int x, int y,
  348. struct drm_framebuffer *old_fb);
  349. void (*crtc_enable)(struct drm_crtc *crtc);
  350. void (*crtc_disable)(struct drm_crtc *crtc);
  351. void (*off)(struct drm_crtc *crtc);
  352. void (*write_eld)(struct drm_connector *connector,
  353. struct drm_crtc *crtc,
  354. struct drm_display_mode *mode);
  355. void (*fdi_link_train)(struct drm_crtc *crtc);
  356. void (*init_clock_gating)(struct drm_device *dev);
  357. int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
  358. struct drm_framebuffer *fb,
  359. struct drm_i915_gem_object *obj,
  360. uint32_t flags);
  361. int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  362. int x, int y);
  363. void (*hpd_irq_setup)(struct drm_device *dev);
  364. /* clock updates for mode set */
  365. /* cursor updates */
  366. /* render clock increase/decrease */
  367. /* display clock increase/decrease */
  368. /* pll clock increase/decrease */
  369. };
  370. struct intel_uncore_funcs {
  371. void (*force_wake_get)(struct drm_i915_private *dev_priv);
  372. void (*force_wake_put)(struct drm_i915_private *dev_priv);
  373. uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  374. uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  375. uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  376. uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
  377. void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
  378. uint8_t val, bool trace);
  379. void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
  380. uint16_t val, bool trace);
  381. void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
  382. uint32_t val, bool trace);
  383. void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
  384. uint64_t val, bool trace);
  385. };
  386. struct intel_uncore {
  387. spinlock_t lock; /** lock is also taken in irq contexts. */
  388. struct intel_uncore_funcs funcs;
  389. unsigned fifo_count;
  390. unsigned forcewake_count;
  391. struct delayed_work force_wake_work;
  392. };
  393. #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
  394. func(is_mobile) sep \
  395. func(is_i85x) sep \
  396. func(is_i915g) sep \
  397. func(is_i945gm) sep \
  398. func(is_g33) sep \
  399. func(need_gfx_hws) sep \
  400. func(is_g4x) sep \
  401. func(is_pineview) sep \
  402. func(is_broadwater) sep \
  403. func(is_crestline) sep \
  404. func(is_ivybridge) sep \
  405. func(is_valleyview) sep \
  406. func(is_haswell) sep \
  407. func(is_preliminary) sep \
  408. func(has_fbc) sep \
  409. func(has_pipe_cxsr) sep \
  410. func(has_hotplug) sep \
  411. func(cursor_needs_physical) sep \
  412. func(has_overlay) sep \
  413. func(overlay_needs_physical) sep \
  414. func(supports_tv) sep \
  415. func(has_llc) sep \
  416. func(has_ddi) sep \
  417. func(has_fpga_dbg)
  418. #define DEFINE_FLAG(name) u8 name:1
  419. #define SEP_SEMICOLON ;
  420. struct intel_device_info {
  421. u32 display_mmio_offset;
  422. u8 num_pipes:3;
  423. u8 gen;
  424. u8 ring_mask; /* Rings supported by the HW */
  425. DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
  426. };
  427. #undef DEFINE_FLAG
  428. #undef SEP_SEMICOLON
  429. enum i915_cache_level {
  430. I915_CACHE_NONE = 0,
  431. I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
  432. I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
  433. caches, eg sampler/render caches, and the
  434. large Last-Level-Cache. LLC is coherent with
  435. the CPU, but L3 is only visible to the GPU. */
  436. I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
  437. };
  438. typedef uint32_t gen6_gtt_pte_t;
  439. struct i915_address_space {
  440. struct drm_mm mm;
  441. struct drm_device *dev;
  442. struct list_head global_link;
  443. unsigned long start; /* Start offset always 0 for dri2 */
  444. size_t total; /* size addr space maps (ex. 2GB for ggtt) */
  445. struct {
  446. dma_addr_t addr;
  447. struct page *page;
  448. } scratch;
  449. /**
  450. * List of objects currently involved in rendering.
  451. *
  452. * Includes buffers having the contents of their GPU caches
  453. * flushed, not necessarily primitives. last_rendering_seqno
  454. * represents when the rendering involved will be completed.
  455. *
  456. * A reference is held on the buffer while on this list.
  457. */
  458. struct list_head active_list;
  459. /**
  460. * LRU list of objects which are not in the ringbuffer and
  461. * are ready to unbind, but are still in the GTT.
  462. *
  463. * last_rendering_seqno is 0 while an object is in this list.
  464. *
  465. * A reference is not held on the buffer while on this list,
  466. * as merely being GTT-bound shouldn't prevent its being
  467. * freed, and we'll pull it off the list in the free path.
  468. */
  469. struct list_head inactive_list;
  470. /* FIXME: Need a more generic return type */
  471. gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
  472. enum i915_cache_level level,
  473. bool valid); /* Create a valid PTE */
  474. void (*clear_range)(struct i915_address_space *vm,
  475. unsigned int first_entry,
  476. unsigned int num_entries,
  477. bool use_scratch);
  478. void (*insert_entries)(struct i915_address_space *vm,
  479. struct sg_table *st,
  480. unsigned int first_entry,
  481. enum i915_cache_level cache_level);
  482. void (*cleanup)(struct i915_address_space *vm);
  483. };
  484. /* The Graphics Translation Table is the way in which GEN hardware translates a
  485. * Graphics Virtual Address into a Physical Address. In addition to the normal
  486. * collateral associated with any va->pa translations GEN hardware also has a
  487. * portion of the GTT which can be mapped by the CPU and remain both coherent
  488. * and correct (in cases like swizzling). That region is referred to as GMADR in
  489. * the spec.
  490. */
  491. struct i915_gtt {
  492. struct i915_address_space base;
  493. size_t stolen_size; /* Total size of stolen memory */
  494. unsigned long mappable_end; /* End offset that we can CPU map */
  495. struct io_mapping *mappable; /* Mapping to our CPU mappable region */
  496. phys_addr_t mappable_base; /* PA of our GMADR */
  497. /** "Graphics Stolen Memory" holds the global PTEs */
  498. void __iomem *gsm;
  499. bool do_idle_maps;
  500. int mtrr;
  501. /* global gtt ops */
  502. int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
  503. size_t *stolen, phys_addr_t *mappable_base,
  504. unsigned long *mappable_end);
  505. };
  506. #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
  507. struct i915_hw_ppgtt {
  508. struct i915_address_space base;
  509. unsigned num_pd_entries;
  510. union {
  511. struct page **pt_pages;
  512. struct page *gen8_pt_pages;
  513. };
  514. struct page *pd_pages;
  515. int num_pd_pages;
  516. int num_pt_pages;
  517. union {
  518. uint32_t pd_offset;
  519. dma_addr_t pd_dma_addr[4];
  520. };
  521. union {
  522. dma_addr_t *pt_dma_addr;
  523. dma_addr_t *gen8_pt_dma_addr[4];
  524. };
  525. int (*enable)(struct drm_device *dev);
  526. };
  527. /**
  528. * A VMA represents a GEM BO that is bound into an address space. Therefore, a
  529. * VMA's presence cannot be guaranteed before binding, or after unbinding the
  530. * object into/from the address space.
  531. *
  532. * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
  533. * will always be <= an objects lifetime. So object refcounting should cover us.
  534. */
  535. struct i915_vma {
  536. struct drm_mm_node node;
  537. struct drm_i915_gem_object *obj;
  538. struct i915_address_space *vm;
  539. /** This object's place on the active/inactive lists */
  540. struct list_head mm_list;
  541. struct list_head vma_link; /* Link in the object's VMA list */
  542. /** This vma's place in the batchbuffer or on the eviction list */
  543. struct list_head exec_list;
  544. /**
  545. * Used for performing relocations during execbuffer insertion.
  546. */
  547. struct hlist_node exec_node;
  548. unsigned long exec_handle;
  549. struct drm_i915_gem_exec_object2 *exec_entry;
  550. };
  551. struct i915_ctx_hang_stats {
  552. /* This context had batch pending when hang was declared */
  553. unsigned batch_pending;
  554. /* This context had batch active when hang was declared */
  555. unsigned batch_active;
  556. /* Time when this context was last blamed for a GPU reset */
  557. unsigned long guilty_ts;
  558. /* This context is banned to submit more work */
  559. bool banned;
  560. };
  561. /* This must match up with the value previously used for execbuf2.rsvd1. */
  562. #define DEFAULT_CONTEXT_ID 0
  563. struct i915_hw_context {
  564. struct kref ref;
  565. int id;
  566. bool is_initialized;
  567. uint8_t remap_slice;
  568. struct drm_i915_file_private *file_priv;
  569. struct intel_ring_buffer *ring;
  570. struct drm_i915_gem_object *obj;
  571. struct i915_ctx_hang_stats hang_stats;
  572. struct list_head link;
  573. };
  574. struct i915_fbc {
  575. unsigned long size;
  576. unsigned int fb_id;
  577. enum plane plane;
  578. int y;
  579. struct drm_mm_node *compressed_fb;
  580. struct drm_mm_node *compressed_llb;
  581. struct intel_fbc_work {
  582. struct delayed_work work;
  583. struct drm_crtc *crtc;
  584. struct drm_framebuffer *fb;
  585. int interval;
  586. } *fbc_work;
  587. enum no_fbc_reason {
  588. FBC_OK, /* FBC is enabled */
  589. FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
  590. FBC_NO_OUTPUT, /* no outputs enabled to compress */
  591. FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
  592. FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
  593. FBC_MODE_TOO_LARGE, /* mode too large for compression */
  594. FBC_BAD_PLANE, /* fbc not supported on plane */
  595. FBC_NOT_TILED, /* buffer not tiled */
  596. FBC_MULTIPLE_PIPES, /* more than one pipe active */
  597. FBC_MODULE_PARAM,
  598. FBC_CHIP_DEFAULT, /* disabled by default on this chip */
  599. } no_fbc_reason;
  600. };
  601. struct i915_psr {
  602. bool sink_support;
  603. bool source_ok;
  604. };
  605. enum intel_pch {
  606. PCH_NONE = 0, /* No PCH present */
  607. PCH_IBX, /* Ibexpeak PCH */
  608. PCH_CPT, /* Cougarpoint PCH */
  609. PCH_LPT, /* Lynxpoint PCH */
  610. PCH_NOP,
  611. };
  612. enum intel_sbi_destination {
  613. SBI_ICLK,
  614. SBI_MPHY,
  615. };
  616. #define QUIRK_PIPEA_FORCE (1<<0)
  617. #define QUIRK_LVDS_SSC_DISABLE (1<<1)
  618. #define QUIRK_INVERT_BRIGHTNESS (1<<2)
  619. #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
  620. struct intel_fbdev;
  621. struct intel_fbc_work;
  622. struct intel_gmbus {
  623. struct i2c_adapter adapter;
  624. u32 force_bit;
  625. u32 reg0;
  626. u32 gpio_reg;
  627. struct i2c_algo_bit_data bit_algo;
  628. struct drm_i915_private *dev_priv;
  629. };
  630. struct i915_suspend_saved_registers {
  631. u8 saveLBB;
  632. u32 saveDSPACNTR;
  633. u32 saveDSPBCNTR;
  634. u32 saveDSPARB;
  635. u32 savePIPEACONF;
  636. u32 savePIPEBCONF;
  637. u32 savePIPEASRC;
  638. u32 savePIPEBSRC;
  639. u32 saveFPA0;
  640. u32 saveFPA1;
  641. u32 saveDPLL_A;
  642. u32 saveDPLL_A_MD;
  643. u32 saveHTOTAL_A;
  644. u32 saveHBLANK_A;
  645. u32 saveHSYNC_A;
  646. u32 saveVTOTAL_A;
  647. u32 saveVBLANK_A;
  648. u32 saveVSYNC_A;
  649. u32 saveBCLRPAT_A;
  650. u32 saveTRANSACONF;
  651. u32 saveTRANS_HTOTAL_A;
  652. u32 saveTRANS_HBLANK_A;
  653. u32 saveTRANS_HSYNC_A;
  654. u32 saveTRANS_VTOTAL_A;
  655. u32 saveTRANS_VBLANK_A;
  656. u32 saveTRANS_VSYNC_A;
  657. u32 savePIPEASTAT;
  658. u32 saveDSPASTRIDE;
  659. u32 saveDSPASIZE;
  660. u32 saveDSPAPOS;
  661. u32 saveDSPAADDR;
  662. u32 saveDSPASURF;
  663. u32 saveDSPATILEOFF;
  664. u32 savePFIT_PGM_RATIOS;
  665. u32 saveBLC_HIST_CTL;
  666. u32 saveBLC_PWM_CTL;
  667. u32 saveBLC_PWM_CTL2;
  668. u32 saveBLC_CPU_PWM_CTL;
  669. u32 saveBLC_CPU_PWM_CTL2;
  670. u32 saveFPB0;
  671. u32 saveFPB1;
  672. u32 saveDPLL_B;
  673. u32 saveDPLL_B_MD;
  674. u32 saveHTOTAL_B;
  675. u32 saveHBLANK_B;
  676. u32 saveHSYNC_B;
  677. u32 saveVTOTAL_B;
  678. u32 saveVBLANK_B;
  679. u32 saveVSYNC_B;
  680. u32 saveBCLRPAT_B;
  681. u32 saveTRANSBCONF;
  682. u32 saveTRANS_HTOTAL_B;
  683. u32 saveTRANS_HBLANK_B;
  684. u32 saveTRANS_HSYNC_B;
  685. u32 saveTRANS_VTOTAL_B;
  686. u32 saveTRANS_VBLANK_B;
  687. u32 saveTRANS_VSYNC_B;
  688. u32 savePIPEBSTAT;
  689. u32 saveDSPBSTRIDE;
  690. u32 saveDSPBSIZE;
  691. u32 saveDSPBPOS;
  692. u32 saveDSPBADDR;
  693. u32 saveDSPBSURF;
  694. u32 saveDSPBTILEOFF;
  695. u32 saveVGA0;
  696. u32 saveVGA1;
  697. u32 saveVGA_PD;
  698. u32 saveVGACNTRL;
  699. u32 saveADPA;
  700. u32 saveLVDS;
  701. u32 savePP_ON_DELAYS;
  702. u32 savePP_OFF_DELAYS;
  703. u32 saveDVOA;
  704. u32 saveDVOB;
  705. u32 saveDVOC;
  706. u32 savePP_ON;
  707. u32 savePP_OFF;
  708. u32 savePP_CONTROL;
  709. u32 savePP_DIVISOR;
  710. u32 savePFIT_CONTROL;
  711. u32 save_palette_a[256];
  712. u32 save_palette_b[256];
  713. u32 saveDPFC_CB_BASE;
  714. u32 saveFBC_CFB_BASE;
  715. u32 saveFBC_LL_BASE;
  716. u32 saveFBC_CONTROL;
  717. u32 saveFBC_CONTROL2;
  718. u32 saveIER;
  719. u32 saveIIR;
  720. u32 saveIMR;
  721. u32 saveDEIER;
  722. u32 saveDEIMR;
  723. u32 saveGTIER;
  724. u32 saveGTIMR;
  725. u32 saveFDI_RXA_IMR;
  726. u32 saveFDI_RXB_IMR;
  727. u32 saveCACHE_MODE_0;
  728. u32 saveMI_ARB_STATE;
  729. u32 saveSWF0[16];
  730. u32 saveSWF1[16];
  731. u32 saveSWF2[3];
  732. u8 saveMSR;
  733. u8 saveSR[8];
  734. u8 saveGR[25];
  735. u8 saveAR_INDEX;
  736. u8 saveAR[21];
  737. u8 saveDACMASK;
  738. u8 saveCR[37];
  739. uint64_t saveFENCE[I915_MAX_NUM_FENCES];
  740. u32 saveCURACNTR;
  741. u32 saveCURAPOS;
  742. u32 saveCURABASE;
  743. u32 saveCURBCNTR;
  744. u32 saveCURBPOS;
  745. u32 saveCURBBASE;
  746. u32 saveCURSIZE;
  747. u32 saveDP_B;
  748. u32 saveDP_C;
  749. u32 saveDP_D;
  750. u32 savePIPEA_GMCH_DATA_M;
  751. u32 savePIPEB_GMCH_DATA_M;
  752. u32 savePIPEA_GMCH_DATA_N;
  753. u32 savePIPEB_GMCH_DATA_N;
  754. u32 savePIPEA_DP_LINK_M;
  755. u32 savePIPEB_DP_LINK_M;
  756. u32 savePIPEA_DP_LINK_N;
  757. u32 savePIPEB_DP_LINK_N;
  758. u32 saveFDI_RXA_CTL;
  759. u32 saveFDI_TXA_CTL;
  760. u32 saveFDI_RXB_CTL;
  761. u32 saveFDI_TXB_CTL;
  762. u32 savePFA_CTL_1;
  763. u32 savePFB_CTL_1;
  764. u32 savePFA_WIN_SZ;
  765. u32 savePFB_WIN_SZ;
  766. u32 savePFA_WIN_POS;
  767. u32 savePFB_WIN_POS;
  768. u32 savePCH_DREF_CONTROL;
  769. u32 saveDISP_ARB_CTL;
  770. u32 savePIPEA_DATA_M1;
  771. u32 savePIPEA_DATA_N1;
  772. u32 savePIPEA_LINK_M1;
  773. u32 savePIPEA_LINK_N1;
  774. u32 savePIPEB_DATA_M1;
  775. u32 savePIPEB_DATA_N1;
  776. u32 savePIPEB_LINK_M1;
  777. u32 savePIPEB_LINK_N1;
  778. u32 saveMCHBAR_RENDER_STANDBY;
  779. u32 savePCH_PORT_HOTPLUG;
  780. };
  781. struct intel_gen6_power_mgmt {
  782. /* work and pm_iir are protected by dev_priv->irq_lock */
  783. struct work_struct work;
  784. u32 pm_iir;
  785. /* The below variables an all the rps hw state are protected by
  786. * dev->struct mutext. */
  787. u8 cur_delay;
  788. u8 min_delay;
  789. u8 max_delay;
  790. u8 rpe_delay;
  791. u8 rp1_delay;
  792. u8 rp0_delay;
  793. u8 hw_max;
  794. int last_adj;
  795. enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
  796. bool enabled;
  797. struct delayed_work delayed_resume_work;
  798. /*
  799. * Protects RPS/RC6 register access and PCU communication.
  800. * Must be taken after struct_mutex if nested.
  801. */
  802. struct mutex hw_lock;
  803. };
  804. /* defined intel_pm.c */
  805. extern spinlock_t mchdev_lock;
  806. struct intel_ilk_power_mgmt {
  807. u8 cur_delay;
  808. u8 min_delay;
  809. u8 max_delay;
  810. u8 fmax;
  811. u8 fstart;
  812. u64 last_count1;
  813. unsigned long last_time1;
  814. unsigned long chipset_power;
  815. u64 last_count2;
  816. struct timespec last_time2;
  817. unsigned long gfx_power;
  818. u8 corr;
  819. int c_m;
  820. int r_t;
  821. struct drm_i915_gem_object *pwrctx;
  822. struct drm_i915_gem_object *renderctx;
  823. };
  824. /* Power well structure for haswell */
  825. struct i915_power_well {
  826. /* power well enable/disable usage count */
  827. int count;
  828. };
  829. #define I915_MAX_POWER_WELLS 1
  830. struct i915_power_domains {
  831. /*
  832. * Power wells needed for initialization at driver init and suspend
  833. * time are on. They are kept on until after the first modeset.
  834. */
  835. bool init_power_on;
  836. struct mutex lock;
  837. struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
  838. };
  839. struct i915_dri1_state {
  840. unsigned allow_batchbuffer : 1;
  841. u32 __iomem *gfx_hws_cpu_addr;
  842. unsigned int cpp;
  843. int back_offset;
  844. int front_offset;
  845. int current_page;
  846. int page_flipping;
  847. uint32_t counter;
  848. };
  849. struct i915_ums_state {
  850. /**
  851. * Flag if the X Server, and thus DRM, is not currently in
  852. * control of the device.
  853. *
  854. * This is set between LeaveVT and EnterVT. It needs to be
  855. * replaced with a semaphore. It also needs to be
  856. * transitioned away from for kernel modesetting.
  857. */
  858. int mm_suspended;
  859. };
  860. #define MAX_L3_SLICES 2
  861. struct intel_l3_parity {
  862. u32 *remap_info[MAX_L3_SLICES];
  863. struct work_struct error_work;
  864. int which_slice;
  865. };
  866. struct i915_gem_mm {
  867. /** Memory allocator for GTT stolen memory */
  868. struct drm_mm stolen;
  869. /** List of all objects in gtt_space. Used to restore gtt
  870. * mappings on resume */
  871. struct list_head bound_list;
  872. /**
  873. * List of objects which are not bound to the GTT (thus
  874. * are idle and not used by the GPU) but still have
  875. * (presumably uncached) pages still attached.
  876. */
  877. struct list_head unbound_list;
  878. /** Usable portion of the GTT for GEM */
  879. unsigned long stolen_base; /* limited to low memory (32-bit) */
  880. /** PPGTT used for aliasing the PPGTT with the GTT */
  881. struct i915_hw_ppgtt *aliasing_ppgtt;
  882. struct shrinker inactive_shrinker;
  883. bool shrinker_no_lock_stealing;
  884. /** LRU list of objects with fence regs on them. */
  885. struct list_head fence_list;
  886. /**
  887. * We leave the user IRQ off as much as possible,
  888. * but this means that requests will finish and never
  889. * be retired once the system goes idle. Set a timer to
  890. * fire periodically while the ring is running. When it
  891. * fires, go retire requests.
  892. */
  893. struct delayed_work retire_work;
  894. /**
  895. * When we detect an idle GPU, we want to turn on
  896. * powersaving features. So once we see that there
  897. * are no more requests outstanding and no more
  898. * arrive within a small period of time, we fire
  899. * off the idle_work.
  900. */
  901. struct delayed_work idle_work;
  902. /**
  903. * Are we in a non-interruptible section of code like
  904. * modesetting?
  905. */
  906. bool interruptible;
  907. /** Bit 6 swizzling required for X tiling */
  908. uint32_t bit_6_swizzle_x;
  909. /** Bit 6 swizzling required for Y tiling */
  910. uint32_t bit_6_swizzle_y;
  911. /* storage for physical objects */
  912. struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
  913. /* accounting, useful for userland debugging */
  914. spinlock_t object_stat_lock;
  915. size_t object_memory;
  916. u32 object_count;
  917. };
  918. struct drm_i915_error_state_buf {
  919. unsigned bytes;
  920. unsigned size;
  921. int err;
  922. u8 *buf;
  923. loff_t start;
  924. loff_t pos;
  925. };
  926. struct i915_error_state_file_priv {
  927. struct drm_device *dev;
  928. struct drm_i915_error_state *error;
  929. };
  930. struct i915_gpu_error {
  931. /* For hangcheck timer */
  932. #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
  933. #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
  934. /* Hang gpu twice in this window and your context gets banned */
  935. #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
  936. struct timer_list hangcheck_timer;
  937. /* For reset and error_state handling. */
  938. spinlock_t lock;
  939. /* Protected by the above dev->gpu_error.lock. */
  940. struct drm_i915_error_state *first_error;
  941. struct work_struct work;
  942. unsigned long missed_irq_rings;
  943. /**
  944. * State variable and reset counter controlling the reset flow
  945. *
  946. * Upper bits are for the reset counter. This counter is used by the
  947. * wait_seqno code to race-free noticed that a reset event happened and
  948. * that it needs to restart the entire ioctl (since most likely the
  949. * seqno it waited for won't ever signal anytime soon).
  950. *
  951. * This is important for lock-free wait paths, where no contended lock
  952. * naturally enforces the correct ordering between the bail-out of the
  953. * waiter and the gpu reset work code.
  954. *
  955. * Lowest bit controls the reset state machine: Set means a reset is in
  956. * progress. This state will (presuming we don't have any bugs) decay
  957. * into either unset (successful reset) or the special WEDGED value (hw
  958. * terminally sour). All waiters on the reset_queue will be woken when
  959. * that happens.
  960. */
  961. atomic_t reset_counter;
  962. /**
  963. * Special values/flags for reset_counter
  964. *
  965. * Note that the code relies on
  966. * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
  967. * being true.
  968. */
  969. #define I915_RESET_IN_PROGRESS_FLAG 1
  970. #define I915_WEDGED 0xffffffff
  971. /**
  972. * Waitqueue to signal when the reset has completed. Used by clients
  973. * that wait for dev_priv->mm.wedged to settle.
  974. */
  975. wait_queue_head_t reset_queue;
  976. /* For gpu hang simulation. */
  977. unsigned int stop_rings;
  978. /* For missed irq/seqno simulation. */
  979. unsigned int test_irq_rings;
  980. };
  981. enum modeset_restore {
  982. MODESET_ON_LID_OPEN,
  983. MODESET_DONE,
  984. MODESET_SUSPENDED,
  985. };
  986. struct ddi_vbt_port_info {
  987. uint8_t hdmi_level_shift;
  988. uint8_t supports_dvi:1;
  989. uint8_t supports_hdmi:1;
  990. uint8_t supports_dp:1;
  991. };
  992. struct intel_vbt_data {
  993. struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
  994. struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
  995. /* Feature bits */
  996. unsigned int int_tv_support:1;
  997. unsigned int lvds_dither:1;
  998. unsigned int lvds_vbt:1;
  999. unsigned int int_crt_support:1;
  1000. unsigned int lvds_use_ssc:1;
  1001. unsigned int display_clock_mode:1;
  1002. unsigned int fdi_rx_polarity_inverted:1;
  1003. int lvds_ssc_freq;
  1004. unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
  1005. /* eDP */
  1006. int edp_rate;
  1007. int edp_lanes;
  1008. int edp_preemphasis;
  1009. int edp_vswing;
  1010. bool edp_initialized;
  1011. bool edp_support;
  1012. int edp_bpp;
  1013. struct edp_power_seq edp_pps;
  1014. /* MIPI DSI */
  1015. struct {
  1016. u16 panel_id;
  1017. } dsi;
  1018. int crt_ddc_pin;
  1019. int child_dev_num;
  1020. union child_device_config *child_dev;
  1021. struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
  1022. };
  1023. enum intel_ddb_partitioning {
  1024. INTEL_DDB_PART_1_2,
  1025. INTEL_DDB_PART_5_6, /* IVB+ */
  1026. };
  1027. struct intel_wm_level {
  1028. bool enable;
  1029. uint32_t pri_val;
  1030. uint32_t spr_val;
  1031. uint32_t cur_val;
  1032. uint32_t fbc_val;
  1033. };
  1034. struct hsw_wm_values {
  1035. uint32_t wm_pipe[3];
  1036. uint32_t wm_lp[3];
  1037. uint32_t wm_lp_spr[3];
  1038. uint32_t wm_linetime[3];
  1039. bool enable_fbc_wm;
  1040. enum intel_ddb_partitioning partitioning;
  1041. };
  1042. /*
  1043. * This struct tracks the state needed for the Package C8+ feature.
  1044. *
  1045. * Package states C8 and deeper are really deep PC states that can only be
  1046. * reached when all the devices on the system allow it, so even if the graphics
  1047. * device allows PC8+, it doesn't mean the system will actually get to these
  1048. * states.
  1049. *
  1050. * Our driver only allows PC8+ when all the outputs are disabled, the power well
  1051. * is disabled and the GPU is idle. When these conditions are met, we manually
  1052. * do the other conditions: disable the interrupts, clocks and switch LCPLL
  1053. * refclk to Fclk.
  1054. *
  1055. * When we really reach PC8 or deeper states (not just when we allow it) we lose
  1056. * the state of some registers, so when we come back from PC8+ we need to
  1057. * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
  1058. * need to take care of the registers kept by RC6.
  1059. *
  1060. * The interrupt disabling is part of the requirements. We can only leave the
  1061. * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
  1062. * can lock the machine.
  1063. *
  1064. * Ideally every piece of our code that needs PC8+ disabled would call
  1065. * hsw_disable_package_c8, which would increment disable_count and prevent the
  1066. * system from reaching PC8+. But we don't have a symmetric way to do this for
  1067. * everything, so we have the requirements_met and gpu_idle variables. When we
  1068. * switch requirements_met or gpu_idle to true we decrease disable_count, and
  1069. * increase it in the opposite case. The requirements_met variable is true when
  1070. * all the CRTCs, encoders and the power well are disabled. The gpu_idle
  1071. * variable is true when the GPU is idle.
  1072. *
  1073. * In addition to everything, we only actually enable PC8+ if disable_count
  1074. * stays at zero for at least some seconds. This is implemented with the
  1075. * enable_work variable. We do this so we don't enable/disable PC8 dozens of
  1076. * consecutive times when all screens are disabled and some background app
  1077. * queries the state of our connectors, or we have some application constantly
  1078. * waking up to use the GPU. Only after the enable_work function actually
  1079. * enables PC8+ the "enable" variable will become true, which means that it can
  1080. * be false even if disable_count is 0.
  1081. *
  1082. * The irqs_disabled variable becomes true exactly after we disable the IRQs and
  1083. * goes back to false exactly before we reenable the IRQs. We use this variable
  1084. * to check if someone is trying to enable/disable IRQs while they're supposed
  1085. * to be disabled. This shouldn't happen and we'll print some error messages in
  1086. * case it happens, but if it actually happens we'll also update the variables
  1087. * inside struct regsave so when we restore the IRQs they will contain the
  1088. * latest expected values.
  1089. *
  1090. * For more, read "Display Sequences for Package C8" on our documentation.
  1091. */
  1092. struct i915_package_c8 {
  1093. bool requirements_met;
  1094. bool gpu_idle;
  1095. bool irqs_disabled;
  1096. /* Only true after the delayed work task actually enables it. */
  1097. bool enabled;
  1098. int disable_count;
  1099. struct mutex lock;
  1100. struct delayed_work enable_work;
  1101. struct {
  1102. uint32_t deimr;
  1103. uint32_t sdeimr;
  1104. uint32_t gtimr;
  1105. uint32_t gtier;
  1106. uint32_t gen6_pmimr;
  1107. } regsave;
  1108. };
  1109. enum intel_pipe_crc_source {
  1110. INTEL_PIPE_CRC_SOURCE_NONE,
  1111. INTEL_PIPE_CRC_SOURCE_PLANE1,
  1112. INTEL_PIPE_CRC_SOURCE_PLANE2,
  1113. INTEL_PIPE_CRC_SOURCE_PF,
  1114. INTEL_PIPE_CRC_SOURCE_PIPE,
  1115. /* TV/DP on pre-gen5/vlv can't use the pipe source. */
  1116. INTEL_PIPE_CRC_SOURCE_TV,
  1117. INTEL_PIPE_CRC_SOURCE_DP_B,
  1118. INTEL_PIPE_CRC_SOURCE_DP_C,
  1119. INTEL_PIPE_CRC_SOURCE_DP_D,
  1120. INTEL_PIPE_CRC_SOURCE_AUTO,
  1121. INTEL_PIPE_CRC_SOURCE_MAX,
  1122. };
  1123. struct intel_pipe_crc_entry {
  1124. uint32_t frame;
  1125. uint32_t crc[5];
  1126. };
  1127. #define INTEL_PIPE_CRC_ENTRIES_NR 128
  1128. struct intel_pipe_crc {
  1129. spinlock_t lock;
  1130. bool opened; /* exclusive access to the result file */
  1131. struct intel_pipe_crc_entry *entries;
  1132. enum intel_pipe_crc_source source;
  1133. int head, tail;
  1134. wait_queue_head_t wq;
  1135. };
  1136. typedef struct drm_i915_private {
  1137. struct drm_device *dev;
  1138. struct kmem_cache *slab;
  1139. const struct intel_device_info *info;
  1140. int relative_constants_mode;
  1141. void __iomem *regs;
  1142. struct intel_uncore uncore;
  1143. struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
  1144. /** gmbus_mutex protects against concurrent usage of the single hw gmbus
  1145. * controller on different i2c buses. */
  1146. struct mutex gmbus_mutex;
  1147. /**
  1148. * Base address of the gmbus and gpio block.
  1149. */
  1150. uint32_t gpio_mmio_base;
  1151. wait_queue_head_t gmbus_wait_queue;
  1152. struct pci_dev *bridge_dev;
  1153. struct intel_ring_buffer ring[I915_NUM_RINGS];
  1154. uint32_t last_seqno, next_seqno;
  1155. drm_dma_handle_t *status_page_dmah;
  1156. struct resource mch_res;
  1157. atomic_t irq_received;
  1158. /* protects the irq masks */
  1159. spinlock_t irq_lock;
  1160. /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
  1161. struct pm_qos_request pm_qos;
  1162. /* DPIO indirect register protection */
  1163. struct mutex dpio_lock;
  1164. /** Cached value of IMR to avoid reads in updating the bitfield */
  1165. union {
  1166. u32 irq_mask;
  1167. u32 de_irq_mask[I915_MAX_PIPES];
  1168. };
  1169. u32 gt_irq_mask;
  1170. u32 pm_irq_mask;
  1171. struct work_struct hotplug_work;
  1172. bool enable_hotplug_processing;
  1173. struct {
  1174. unsigned long hpd_last_jiffies;
  1175. int hpd_cnt;
  1176. enum {
  1177. HPD_ENABLED = 0,
  1178. HPD_DISABLED = 1,
  1179. HPD_MARK_DISABLED = 2
  1180. } hpd_mark;
  1181. } hpd_stats[HPD_NUM_PINS];
  1182. u32 hpd_event_bits;
  1183. struct timer_list hotplug_reenable_timer;
  1184. int num_plane;
  1185. struct i915_fbc fbc;
  1186. struct intel_opregion opregion;
  1187. struct intel_vbt_data vbt;
  1188. /* overlay */
  1189. struct intel_overlay *overlay;
  1190. unsigned int sprite_scaling_enabled;
  1191. /* backlight */
  1192. struct {
  1193. int level;
  1194. bool enabled;
  1195. spinlock_t lock; /* bl registers and the above bl fields */
  1196. struct backlight_device *device;
  1197. } backlight;
  1198. /* LVDS info */
  1199. bool no_aux_handshake;
  1200. struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
  1201. int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
  1202. int num_fence_regs; /* 8 on pre-965, 16 otherwise */
  1203. unsigned int fsb_freq, mem_freq, is_ddr3;
  1204. /**
  1205. * wq - Driver workqueue for GEM.
  1206. *
  1207. * NOTE: Work items scheduled here are not allowed to grab any modeset
  1208. * locks, for otherwise the flushing done in the pageflip code will
  1209. * result in deadlocks.
  1210. */
  1211. struct workqueue_struct *wq;
  1212. /* Display functions */
  1213. struct drm_i915_display_funcs display;
  1214. /* PCH chipset type */
  1215. enum intel_pch pch_type;
  1216. unsigned short pch_id;
  1217. unsigned long quirks;
  1218. enum modeset_restore modeset_restore;
  1219. struct mutex modeset_restore_lock;
  1220. struct list_head vm_list; /* Global list of all address spaces */
  1221. struct i915_gtt gtt; /* VMA representing the global address space */
  1222. struct i915_gem_mm mm;
  1223. /* Kernel Modesetting */
  1224. struct sdvo_device_mapping sdvo_mappings[2];
  1225. struct drm_crtc *plane_to_crtc_mapping[3];
  1226. struct drm_crtc *pipe_to_crtc_mapping[3];
  1227. wait_queue_head_t pending_flip_queue;
  1228. #ifdef CONFIG_DEBUG_FS
  1229. struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
  1230. #endif
  1231. int num_shared_dpll;
  1232. struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
  1233. struct intel_ddi_plls ddi_plls;
  1234. /* Reclocking support */
  1235. bool render_reclock_avail;
  1236. bool lvds_downclock_avail;
  1237. /* indicates the reduced downclock for LVDS*/
  1238. int lvds_downclock;
  1239. u16 orig_clock;
  1240. bool mchbar_need_disable;
  1241. struct intel_l3_parity l3_parity;
  1242. /* Cannot be determined by PCIID. You must always read a register. */
  1243. size_t ellc_size;
  1244. /* gen6+ rps state */
  1245. struct intel_gen6_power_mgmt rps;
  1246. /* ilk-only ips/rps state. Everything in here is protected by the global
  1247. * mchdev_lock in intel_pm.c */
  1248. struct intel_ilk_power_mgmt ips;
  1249. struct i915_power_domains power_domains;
  1250. struct i915_psr psr;
  1251. struct i915_gpu_error gpu_error;
  1252. struct drm_i915_gem_object *vlv_pctx;
  1253. #ifdef CONFIG_DRM_I915_FBDEV
  1254. /* list of fbdev register on this device */
  1255. struct intel_fbdev *fbdev;
  1256. #endif
  1257. /*
  1258. * The console may be contended at resume, but we don't
  1259. * want it to block on it.
  1260. */
  1261. struct work_struct console_resume_work;
  1262. struct drm_property *broadcast_rgb_property;
  1263. struct drm_property *force_audio_property;
  1264. bool hw_contexts_disabled;
  1265. uint32_t hw_context_size;
  1266. struct list_head context_list;
  1267. u32 fdi_rx_config;
  1268. struct i915_suspend_saved_registers regfile;
  1269. struct {
  1270. /*
  1271. * Raw watermark latency values:
  1272. * in 0.1us units for WM0,
  1273. * in 0.5us units for WM1+.
  1274. */
  1275. /* primary */
  1276. uint16_t pri_latency[5];
  1277. /* sprite */
  1278. uint16_t spr_latency[5];
  1279. /* cursor */
  1280. uint16_t cur_latency[5];
  1281. /* current hardware state */
  1282. struct hsw_wm_values hw;
  1283. } wm;
  1284. struct i915_package_c8 pc8;
  1285. /* Old dri1 support infrastructure, beware the dragons ya fools entering
  1286. * here! */
  1287. struct i915_dri1_state dri1;
  1288. /* Old ums support infrastructure, same warning applies. */
  1289. struct i915_ums_state ums;
  1290. } drm_i915_private_t;
  1291. static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
  1292. {
  1293. return dev->dev_private;
  1294. }
  1295. /* Iterate over initialised rings */
  1296. #define for_each_ring(ring__, dev_priv__, i__) \
  1297. for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
  1298. if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
  1299. enum hdmi_force_audio {
  1300. HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
  1301. HDMI_AUDIO_OFF, /* force turn off HDMI audio */
  1302. HDMI_AUDIO_AUTO, /* trust EDID */
  1303. HDMI_AUDIO_ON, /* force turn on HDMI audio */
  1304. };
  1305. #define I915_GTT_OFFSET_NONE ((u32)-1)
  1306. struct drm_i915_gem_object_ops {
  1307. /* Interface between the GEM object and its backing storage.
  1308. * get_pages() is called once prior to the use of the associated set
  1309. * of pages before to binding them into the GTT, and put_pages() is
  1310. * called after we no longer need them. As we expect there to be
  1311. * associated cost with migrating pages between the backing storage
  1312. * and making them available for the GPU (e.g. clflush), we may hold
  1313. * onto the pages after they are no longer referenced by the GPU
  1314. * in case they may be used again shortly (for example migrating the
  1315. * pages to a different memory domain within the GTT). put_pages()
  1316. * will therefore most likely be called when the object itself is
  1317. * being released or under memory pressure (where we attempt to
  1318. * reap pages for the shrinker).
  1319. */
  1320. int (*get_pages)(struct drm_i915_gem_object *);
  1321. void (*put_pages)(struct drm_i915_gem_object *);
  1322. };
  1323. struct drm_i915_gem_object {
  1324. struct drm_gem_object base;
  1325. const struct drm_i915_gem_object_ops *ops;
  1326. /** List of VMAs backed by this object */
  1327. struct list_head vma_list;
  1328. /** Stolen memory for this object, instead of being backed by shmem. */
  1329. struct drm_mm_node *stolen;
  1330. struct list_head global_list;
  1331. struct list_head ring_list;
  1332. /** Used in execbuf to temporarily hold a ref */
  1333. struct list_head obj_exec_link;
  1334. /**
  1335. * This is set if the object is on the active lists (has pending
  1336. * rendering and so a non-zero seqno), and is not set if it i s on
  1337. * inactive (ready to be unbound) list.
  1338. */
  1339. unsigned int active:1;
  1340. /**
  1341. * This is set if the object has been written to since last bound
  1342. * to the GTT
  1343. */
  1344. unsigned int dirty:1;
  1345. /**
  1346. * Fence register bits (if any) for this object. Will be set
  1347. * as needed when mapped into the GTT.
  1348. * Protected by dev->struct_mutex.
  1349. */
  1350. signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
  1351. /**
  1352. * Advice: are the backing pages purgeable?
  1353. */
  1354. unsigned int madv:2;
  1355. /**
  1356. * Current tiling mode for the object.
  1357. */
  1358. unsigned int tiling_mode:2;
  1359. /**
  1360. * Whether the tiling parameters for the currently associated fence
  1361. * register have changed. Note that for the purposes of tracking
  1362. * tiling changes we also treat the unfenced register, the register
  1363. * slot that the object occupies whilst it executes a fenced
  1364. * command (such as BLT on gen2/3), as a "fence".
  1365. */
  1366. unsigned int fence_dirty:1;
  1367. /** How many users have pinned this object in GTT space. The following
  1368. * users can each hold at most one reference: pwrite/pread, pin_ioctl
  1369. * (via user_pin_count), execbuffer (objects are not allowed multiple
  1370. * times for the same batchbuffer), and the framebuffer code. When
  1371. * switching/pageflipping, the framebuffer code has at most two buffers
  1372. * pinned per crtc.
  1373. *
  1374. * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
  1375. * bits with absolutely no headroom. So use 4 bits. */
  1376. unsigned int pin_count:4;
  1377. #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
  1378. /**
  1379. * Is the object at the current location in the gtt mappable and
  1380. * fenceable? Used to avoid costly recalculations.
  1381. */
  1382. unsigned int map_and_fenceable:1;
  1383. /**
  1384. * Whether the current gtt mapping needs to be mappable (and isn't just
  1385. * mappable by accident). Track pin and fault separate for a more
  1386. * accurate mappable working set.
  1387. */
  1388. unsigned int fault_mappable:1;
  1389. unsigned int pin_mappable:1;
  1390. unsigned int pin_display:1;
  1391. /*
  1392. * Is the GPU currently using a fence to access this buffer,
  1393. */
  1394. unsigned int pending_fenced_gpu_access:1;
  1395. unsigned int fenced_gpu_access:1;
  1396. unsigned int cache_level:3;
  1397. unsigned int has_aliasing_ppgtt_mapping:1;
  1398. unsigned int has_global_gtt_mapping:1;
  1399. unsigned int has_dma_mapping:1;
  1400. struct sg_table *pages;
  1401. int pages_pin_count;
  1402. /* prime dma-buf support */
  1403. void *dma_buf_vmapping;
  1404. int vmapping_count;
  1405. struct intel_ring_buffer *ring;
  1406. /** Breadcrumb of last rendering to the buffer. */
  1407. uint32_t last_read_seqno;
  1408. uint32_t last_write_seqno;
  1409. /** Breadcrumb of last fenced GPU access to the buffer. */
  1410. uint32_t last_fenced_seqno;
  1411. /** Current tiling stride for the object, if it's tiled. */
  1412. uint32_t stride;
  1413. /** References from framebuffers, locks out tiling changes. */
  1414. unsigned long framebuffer_references;
  1415. /** Record of address bit 17 of each page at last unbind. */
  1416. unsigned long *bit_17;
  1417. /** User space pin count and filp owning the pin */
  1418. unsigned long user_pin_count;
  1419. struct drm_file *pin_filp;
  1420. /** for phy allocated objects */
  1421. struct drm_i915_gem_phys_object *phys_obj;
  1422. };
  1423. #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
  1424. #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  1425. /**
  1426. * Request queue structure.
  1427. *
  1428. * The request queue allows us to note sequence numbers that have been emitted
  1429. * and may be associated with active buffers to be retired.
  1430. *
  1431. * By keeping this list, we can avoid having to do questionable
  1432. * sequence-number comparisons on buffer last_rendering_seqnos, and associate
  1433. * an emission time with seqnos for tracking how far ahead of the GPU we are.
  1434. */
  1435. struct drm_i915_gem_request {
  1436. /** On Which ring this request was generated */
  1437. struct intel_ring_buffer *ring;
  1438. /** GEM sequence number associated with this request. */
  1439. uint32_t seqno;
  1440. /** Position in the ringbuffer of the start of the request */
  1441. u32 head;
  1442. /** Position in the ringbuffer of the end of the request */
  1443. u32 tail;
  1444. /** Context related to this request */
  1445. struct i915_hw_context *ctx;
  1446. /** Batch buffer related to this request if any */
  1447. struct drm_i915_gem_object *batch_obj;
  1448. /** Time at which this request was emitted, in jiffies. */
  1449. unsigned long emitted_jiffies;
  1450. /** global list entry for this request */
  1451. struct list_head list;
  1452. struct drm_i915_file_private *file_priv;
  1453. /** file_priv list entry for this request */
  1454. struct list_head client_list;
  1455. };
  1456. struct drm_i915_file_private {
  1457. struct drm_i915_private *dev_priv;
  1458. struct {
  1459. spinlock_t lock;
  1460. struct list_head request_list;
  1461. struct delayed_work idle_work;
  1462. } mm;
  1463. struct idr context_idr;
  1464. struct i915_ctx_hang_stats hang_stats;
  1465. atomic_t rps_wait_boost;
  1466. };
  1467. #define INTEL_INFO(dev) (to_i915(dev)->info)
  1468. #define IS_I830(dev) ((dev)->pdev->device == 0x3577)
  1469. #define IS_845G(dev) ((dev)->pdev->device == 0x2562)
  1470. #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
  1471. #define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
  1472. #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
  1473. #define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
  1474. #define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
  1475. #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
  1476. #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
  1477. #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
  1478. #define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
  1479. #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
  1480. #define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
  1481. #define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
  1482. #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
  1483. #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
  1484. #define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
  1485. #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
  1486. #define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
  1487. (dev)->pdev->device == 0x0152 || \
  1488. (dev)->pdev->device == 0x015a)
  1489. #define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
  1490. (dev)->pdev->device == 0x0106 || \
  1491. (dev)->pdev->device == 0x010A)
  1492. #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
  1493. #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
  1494. #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
  1495. #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
  1496. ((dev)->pdev->device & 0xFF00) == 0x0C00)
  1497. #define IS_ULT(dev) (IS_HASWELL(dev) && \
  1498. ((dev)->pdev->device & 0xFF00) == 0x0A00)
  1499. #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
  1500. ((dev)->pdev->device & 0x00F0) == 0x0020)
  1501. #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
  1502. /*
  1503. * The genX designation typically refers to the render engine, so render
  1504. * capability related checks should use IS_GEN, while display and other checks
  1505. * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
  1506. * chips, etc.).
  1507. */
  1508. #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
  1509. #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
  1510. #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
  1511. #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
  1512. #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
  1513. #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
  1514. #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
  1515. #define RENDER_RING (1<<RCS)
  1516. #define BSD_RING (1<<VCS)
  1517. #define BLT_RING (1<<BCS)
  1518. #define VEBOX_RING (1<<VECS)
  1519. #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
  1520. #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
  1521. #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
  1522. #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
  1523. #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
  1524. #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
  1525. #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
  1526. #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
  1527. #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
  1528. #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
  1529. /* Early gen2 have a totally busted CS tlb and require pinned batches. */
  1530. #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
  1531. /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  1532. * rows, which changed the alignment requirements and fence programming.
  1533. */
  1534. #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
  1535. IS_I915GM(dev)))
  1536. #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
  1537. #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
  1538. #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
  1539. #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
  1540. #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
  1541. #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
  1542. #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
  1543. #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
  1544. #define HAS_IPS(dev) (IS_ULT(dev))
  1545. #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
  1546. #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
  1547. #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
  1548. #define HAS_PSR(dev) (IS_HASWELL(dev))
  1549. #define INTEL_PCH_DEVICE_ID_MASK 0xff00
  1550. #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
  1551. #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
  1552. #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
  1553. #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
  1554. #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
  1555. #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
  1556. #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
  1557. #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
  1558. #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
  1559. #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
  1560. #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
  1561. /* DPF == dynamic parity feature */
  1562. #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  1563. #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
  1564. #define GT_FREQUENCY_MULTIPLIER 50
  1565. #include "i915_trace.h"
  1566. extern const struct drm_ioctl_desc i915_ioctls[];
  1567. extern int i915_max_ioctl;
  1568. extern unsigned int i915_fbpercrtc __always_unused;
  1569. extern int i915_panel_ignore_lid __read_mostly;
  1570. extern unsigned int i915_powersave __read_mostly;
  1571. extern int i915_semaphores __read_mostly;
  1572. extern unsigned int i915_lvds_downclock __read_mostly;
  1573. extern int i915_lvds_channel_mode __read_mostly;
  1574. extern int i915_panel_use_ssc __read_mostly;
  1575. extern int i915_vbt_sdvo_panel_type __read_mostly;
  1576. extern int i915_enable_rc6 __read_mostly;
  1577. extern int i915_enable_fbc __read_mostly;
  1578. extern bool i915_enable_hangcheck __read_mostly;
  1579. extern int i915_enable_ppgtt __read_mostly;
  1580. extern int i915_enable_psr __read_mostly;
  1581. extern unsigned int i915_preliminary_hw_support __read_mostly;
  1582. extern int i915_disable_power_well __read_mostly;
  1583. extern int i915_enable_ips __read_mostly;
  1584. extern bool i915_fastboot __read_mostly;
  1585. extern int i915_enable_pc8 __read_mostly;
  1586. extern int i915_pc8_timeout __read_mostly;
  1587. extern bool i915_prefault_disable __read_mostly;
  1588. extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  1589. extern int i915_resume(struct drm_device *dev);
  1590. extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
  1591. extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
  1592. /* i915_dma.c */
  1593. void i915_update_dri1_breadcrumb(struct drm_device *dev);
  1594. extern void i915_kernel_lost_context(struct drm_device * dev);
  1595. extern int i915_driver_load(struct drm_device *, unsigned long flags);
  1596. extern int i915_driver_unload(struct drm_device *);
  1597. extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
  1598. extern void i915_driver_lastclose(struct drm_device * dev);
  1599. extern void i915_driver_preclose(struct drm_device *dev,
  1600. struct drm_file *file_priv);
  1601. extern void i915_driver_postclose(struct drm_device *dev,
  1602. struct drm_file *file_priv);
  1603. extern int i915_driver_device_is_agp(struct drm_device * dev);
  1604. #ifdef CONFIG_COMPAT
  1605. extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
  1606. unsigned long arg);
  1607. #endif
  1608. extern int i915_emit_box(struct drm_device *dev,
  1609. struct drm_clip_rect *box,
  1610. int DR1, int DR4);
  1611. extern int intel_gpu_reset(struct drm_device *dev);
  1612. extern int i915_reset(struct drm_device *dev);
  1613. extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
  1614. extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
  1615. extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
  1616. extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
  1617. extern void intel_console_resume(struct work_struct *work);
  1618. /* i915_irq.c */
  1619. void i915_queue_hangcheck(struct drm_device *dev);
  1620. void i915_handle_error(struct drm_device *dev, bool wedged);
  1621. extern void intel_irq_init(struct drm_device *dev);
  1622. extern void intel_pm_init(struct drm_device *dev);
  1623. extern void intel_hpd_init(struct drm_device *dev);
  1624. extern void intel_pm_init(struct drm_device *dev);
  1625. extern void intel_uncore_sanitize(struct drm_device *dev);
  1626. extern void intel_uncore_early_sanitize(struct drm_device *dev);
  1627. extern void intel_uncore_init(struct drm_device *dev);
  1628. extern void intel_uncore_clear_errors(struct drm_device *dev);
  1629. extern void intel_uncore_check_errors(struct drm_device *dev);
  1630. extern void intel_uncore_fini(struct drm_device *dev);
  1631. void
  1632. i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
  1633. void
  1634. i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
  1635. /* i915_gem.c */
  1636. int i915_gem_init_ioctl(struct drm_device *dev, void *data,
  1637. struct drm_file *file_priv);
  1638. int i915_gem_create_ioctl(struct drm_device *dev, void *data,
  1639. struct drm_file *file_priv);
  1640. int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  1641. struct drm_file *file_priv);
  1642. int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  1643. struct drm_file *file_priv);
  1644. int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1645. struct drm_file *file_priv);
  1646. int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1647. struct drm_file *file_priv);
  1648. int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1649. struct drm_file *file_priv);
  1650. int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1651. struct drm_file *file_priv);
  1652. int i915_gem_execbuffer(struct drm_device *dev, void *data,
  1653. struct drm_file *file_priv);
  1654. int i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1655. struct drm_file *file_priv);
  1656. int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  1657. struct drm_file *file_priv);
  1658. int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  1659. struct drm_file *file_priv);
  1660. int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  1661. struct drm_file *file_priv);
  1662. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  1663. struct drm_file *file);
  1664. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  1665. struct drm_file *file);
  1666. int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  1667. struct drm_file *file_priv);
  1668. int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  1669. struct drm_file *file_priv);
  1670. int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  1671. struct drm_file *file_priv);
  1672. int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  1673. struct drm_file *file_priv);
  1674. int i915_gem_set_tiling(struct drm_device *dev, void *data,
  1675. struct drm_file *file_priv);
  1676. int i915_gem_get_tiling(struct drm_device *dev, void *data,
  1677. struct drm_file *file_priv);
  1678. int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  1679. struct drm_file *file_priv);
  1680. int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
  1681. struct drm_file *file_priv);
  1682. void i915_gem_load(struct drm_device *dev);
  1683. void *i915_gem_object_alloc(struct drm_device *dev);
  1684. void i915_gem_object_free(struct drm_i915_gem_object *obj);
  1685. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  1686. const struct drm_i915_gem_object_ops *ops);
  1687. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  1688. size_t size);
  1689. void i915_gem_free_object(struct drm_gem_object *obj);
  1690. void i915_gem_vma_destroy(struct i915_vma *vma);
  1691. int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
  1692. struct i915_address_space *vm,
  1693. uint32_t alignment,
  1694. bool map_and_fenceable,
  1695. bool nonblocking);
  1696. void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
  1697. int __must_check i915_vma_unbind(struct i915_vma *vma);
  1698. int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
  1699. int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  1700. void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
  1701. void i915_gem_lastclose(struct drm_device *dev);
  1702. int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
  1703. static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
  1704. {
  1705. struct sg_page_iter sg_iter;
  1706. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
  1707. return sg_page_iter_page(&sg_iter);
  1708. return NULL;
  1709. }
  1710. static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
  1711. {
  1712. BUG_ON(obj->pages == NULL);
  1713. obj->pages_pin_count++;
  1714. }
  1715. static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
  1716. {
  1717. BUG_ON(obj->pages_pin_count == 0);
  1718. obj->pages_pin_count--;
  1719. }
  1720. int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
  1721. int i915_gem_object_sync(struct drm_i915_gem_object *obj,
  1722. struct intel_ring_buffer *to);
  1723. void i915_vma_move_to_active(struct i915_vma *vma,
  1724. struct intel_ring_buffer *ring);
  1725. int i915_gem_dumb_create(struct drm_file *file_priv,
  1726. struct drm_device *dev,
  1727. struct drm_mode_create_dumb *args);
  1728. int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
  1729. uint32_t handle, uint64_t *offset);
  1730. /**
  1731. * Returns true if seq1 is later than seq2.
  1732. */
  1733. static inline bool
  1734. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1735. {
  1736. return (int32_t)(seq1 - seq2) >= 0;
  1737. }
  1738. int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
  1739. int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
  1740. int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
  1741. int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
  1742. static inline bool
  1743. i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
  1744. {
  1745. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1746. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1747. dev_priv->fence_regs[obj->fence_reg].pin_count++;
  1748. return true;
  1749. } else
  1750. return false;
  1751. }
  1752. static inline void
  1753. i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
  1754. {
  1755. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1756. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1757. WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
  1758. dev_priv->fence_regs[obj->fence_reg].pin_count--;
  1759. }
  1760. }
  1761. bool i915_gem_retire_requests(struct drm_device *dev);
  1762. void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
  1763. int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
  1764. bool interruptible);
  1765. static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
  1766. {
  1767. return unlikely(atomic_read(&error->reset_counter)
  1768. & I915_RESET_IN_PROGRESS_FLAG);
  1769. }
  1770. static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
  1771. {
  1772. return atomic_read(&error->reset_counter) == I915_WEDGED;
  1773. }
  1774. void i915_gem_reset(struct drm_device *dev);
  1775. bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
  1776. int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
  1777. int __must_check i915_gem_init(struct drm_device *dev);
  1778. int __must_check i915_gem_init_hw(struct drm_device *dev);
  1779. int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
  1780. void i915_gem_init_swizzling(struct drm_device *dev);
  1781. void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  1782. int __must_check i915_gpu_idle(struct drm_device *dev);
  1783. int __must_check i915_gem_suspend(struct drm_device *dev);
  1784. int __i915_add_request(struct intel_ring_buffer *ring,
  1785. struct drm_file *file,
  1786. struct drm_i915_gem_object *batch_obj,
  1787. u32 *seqno);
  1788. #define i915_add_request(ring, seqno) \
  1789. __i915_add_request(ring, NULL, NULL, seqno)
  1790. int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
  1791. uint32_t seqno);
  1792. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  1793. int __must_check
  1794. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
  1795. bool write);
  1796. int __must_check
  1797. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
  1798. int __must_check
  1799. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  1800. u32 alignment,
  1801. struct intel_ring_buffer *pipelined);
  1802. void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
  1803. int i915_gem_attach_phys_object(struct drm_device *dev,
  1804. struct drm_i915_gem_object *obj,
  1805. int id,
  1806. int align);
  1807. void i915_gem_detach_phys_object(struct drm_device *dev,
  1808. struct drm_i915_gem_object *obj);
  1809. void i915_gem_free_all_phys_object(struct drm_device *dev);
  1810. int i915_gem_open(struct drm_device *dev, struct drm_file *file);
  1811. void i915_gem_release(struct drm_device *dev, struct drm_file *file);
  1812. uint32_t
  1813. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
  1814. uint32_t
  1815. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1816. int tiling_mode, bool fenced);
  1817. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  1818. enum i915_cache_level cache_level);
  1819. struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
  1820. struct dma_buf *dma_buf);
  1821. struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
  1822. struct drm_gem_object *gem_obj, int flags);
  1823. void i915_gem_restore_fences(struct drm_device *dev);
  1824. unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
  1825. struct i915_address_space *vm);
  1826. bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
  1827. bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
  1828. struct i915_address_space *vm);
  1829. unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
  1830. struct i915_address_space *vm);
  1831. struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
  1832. struct i915_address_space *vm);
  1833. struct i915_vma *
  1834. i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
  1835. struct i915_address_space *vm);
  1836. struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
  1837. /* Some GGTT VM helpers */
  1838. #define obj_to_ggtt(obj) \
  1839. (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
  1840. static inline bool i915_is_ggtt(struct i915_address_space *vm)
  1841. {
  1842. struct i915_address_space *ggtt =
  1843. &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
  1844. return vm == ggtt;
  1845. }
  1846. static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
  1847. {
  1848. return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
  1849. }
  1850. static inline unsigned long
  1851. i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
  1852. {
  1853. return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
  1854. }
  1855. static inline unsigned long
  1856. i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
  1857. {
  1858. return i915_gem_obj_size(obj, obj_to_ggtt(obj));
  1859. }
  1860. static inline int __must_check
  1861. i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
  1862. uint32_t alignment,
  1863. bool map_and_fenceable,
  1864. bool nonblocking)
  1865. {
  1866. return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
  1867. map_and_fenceable, nonblocking);
  1868. }
  1869. /* i915_gem_context.c */
  1870. void i915_gem_context_init(struct drm_device *dev);
  1871. void i915_gem_context_fini(struct drm_device *dev);
  1872. void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  1873. int i915_switch_context(struct intel_ring_buffer *ring,
  1874. struct drm_file *file, int to_id);
  1875. void i915_gem_context_free(struct kref *ctx_ref);
  1876. static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
  1877. {
  1878. kref_get(&ctx->ref);
  1879. }
  1880. static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
  1881. {
  1882. kref_put(&ctx->ref, i915_gem_context_free);
  1883. }
  1884. struct i915_ctx_hang_stats * __must_check
  1885. i915_gem_context_get_hang_stats(struct drm_device *dev,
  1886. struct drm_file *file,
  1887. u32 id);
  1888. int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
  1889. struct drm_file *file);
  1890. int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
  1891. struct drm_file *file);
  1892. /* i915_gem_gtt.c */
  1893. void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
  1894. void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
  1895. struct drm_i915_gem_object *obj,
  1896. enum i915_cache_level cache_level);
  1897. void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
  1898. struct drm_i915_gem_object *obj);
  1899. void i915_check_and_clear_faults(struct drm_device *dev);
  1900. void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
  1901. void i915_gem_restore_gtt_mappings(struct drm_device *dev);
  1902. int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
  1903. void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
  1904. enum i915_cache_level cache_level);
  1905. void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
  1906. void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
  1907. void i915_gem_init_global_gtt(struct drm_device *dev);
  1908. void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
  1909. unsigned long mappable_end, unsigned long end);
  1910. int i915_gem_gtt_init(struct drm_device *dev);
  1911. static inline void i915_gem_chipset_flush(struct drm_device *dev)
  1912. {
  1913. if (INTEL_INFO(dev)->gen < 6)
  1914. intel_gtt_chipset_flush();
  1915. }
  1916. /* i915_gem_evict.c */
  1917. int __must_check i915_gem_evict_something(struct drm_device *dev,
  1918. struct i915_address_space *vm,
  1919. int min_size,
  1920. unsigned alignment,
  1921. unsigned cache_level,
  1922. bool mappable,
  1923. bool nonblock);
  1924. int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
  1925. int i915_gem_evict_everything(struct drm_device *dev);
  1926. /* i915_gem_stolen.c */
  1927. int i915_gem_init_stolen(struct drm_device *dev);
  1928. int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
  1929. void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
  1930. void i915_gem_cleanup_stolen(struct drm_device *dev);
  1931. struct drm_i915_gem_object *
  1932. i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
  1933. struct drm_i915_gem_object *
  1934. i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
  1935. u32 stolen_offset,
  1936. u32 gtt_offset,
  1937. u32 size);
  1938. void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
  1939. /* i915_gem_tiling.c */
  1940. static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
  1941. {
  1942. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  1943. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  1944. obj->tiling_mode != I915_TILING_NONE;
  1945. }
  1946. void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
  1947. void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
  1948. void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
  1949. /* i915_gem_debug.c */
  1950. #if WATCH_LISTS
  1951. int i915_verify_lists(struct drm_device *dev);
  1952. #else
  1953. #define i915_verify_lists(dev) 0
  1954. #endif
  1955. /* i915_debugfs.c */
  1956. int i915_debugfs_init(struct drm_minor *minor);
  1957. void i915_debugfs_cleanup(struct drm_minor *minor);
  1958. #ifdef CONFIG_DEBUG_FS
  1959. void intel_display_crc_init(struct drm_device *dev);
  1960. #else
  1961. static inline void intel_display_crc_init(struct drm_device *dev) {}
  1962. #endif
  1963. /* i915_gpu_error.c */
  1964. __printf(2, 3)
  1965. void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
  1966. int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
  1967. const struct i915_error_state_file_priv *error);
  1968. int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
  1969. size_t count, loff_t pos);
  1970. static inline void i915_error_state_buf_release(
  1971. struct drm_i915_error_state_buf *eb)
  1972. {
  1973. kfree(eb->buf);
  1974. }
  1975. void i915_capture_error_state(struct drm_device *dev);
  1976. void i915_error_state_get(struct drm_device *dev,
  1977. struct i915_error_state_file_priv *error_priv);
  1978. void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
  1979. void i915_destroy_error_state(struct drm_device *dev);
  1980. void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
  1981. const char *i915_cache_level_str(int type);
  1982. /* i915_suspend.c */
  1983. extern int i915_save_state(struct drm_device *dev);
  1984. extern int i915_restore_state(struct drm_device *dev);
  1985. /* i915_ums.c */
  1986. void i915_save_display_reg(struct drm_device *dev);
  1987. void i915_restore_display_reg(struct drm_device *dev);
  1988. /* i915_sysfs.c */
  1989. void i915_setup_sysfs(struct drm_device *dev_priv);
  1990. void i915_teardown_sysfs(struct drm_device *dev_priv);
  1991. /* intel_i2c.c */
  1992. extern int intel_setup_gmbus(struct drm_device *dev);
  1993. extern void intel_teardown_gmbus(struct drm_device *dev);
  1994. static inline bool intel_gmbus_is_port_valid(unsigned port)
  1995. {
  1996. return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
  1997. }
  1998. extern struct i2c_adapter *intel_gmbus_get_adapter(
  1999. struct drm_i915_private *dev_priv, unsigned port);
  2000. extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
  2001. extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
  2002. static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
  2003. {
  2004. return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
  2005. }
  2006. extern void intel_i2c_reset(struct drm_device *dev);
  2007. /* intel_opregion.c */
  2008. struct intel_encoder;
  2009. extern int intel_opregion_setup(struct drm_device *dev);
  2010. #ifdef CONFIG_ACPI
  2011. extern void intel_opregion_init(struct drm_device *dev);
  2012. extern void intel_opregion_fini(struct drm_device *dev);
  2013. extern void intel_opregion_asle_intr(struct drm_device *dev);
  2014. extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
  2015. bool enable);
  2016. extern int intel_opregion_notify_adapter(struct drm_device *dev,
  2017. pci_power_t state);
  2018. #else
  2019. static inline void intel_opregion_init(struct drm_device *dev) { return; }
  2020. static inline void intel_opregion_fini(struct drm_device *dev) { return; }
  2021. static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
  2022. static inline int
  2023. intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
  2024. {
  2025. return 0;
  2026. }
  2027. static inline int
  2028. intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
  2029. {
  2030. return 0;
  2031. }
  2032. #endif
  2033. /* intel_acpi.c */
  2034. #ifdef CONFIG_ACPI
  2035. extern void intel_register_dsm_handler(void);
  2036. extern void intel_unregister_dsm_handler(void);
  2037. #else
  2038. static inline void intel_register_dsm_handler(void) { return; }
  2039. static inline void intel_unregister_dsm_handler(void) { return; }
  2040. #endif /* CONFIG_ACPI */
  2041. /* modesetting */
  2042. extern void intel_modeset_init_hw(struct drm_device *dev);
  2043. extern void intel_modeset_suspend_hw(struct drm_device *dev);
  2044. extern void intel_modeset_init(struct drm_device *dev);
  2045. extern void intel_modeset_gem_init(struct drm_device *dev);
  2046. extern void intel_modeset_cleanup(struct drm_device *dev);
  2047. extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
  2048. extern void intel_modeset_setup_hw_state(struct drm_device *dev,
  2049. bool force_restore);
  2050. extern void i915_redisable_vga(struct drm_device *dev);
  2051. extern bool intel_fbc_enabled(struct drm_device *dev);
  2052. extern void intel_disable_fbc(struct drm_device *dev);
  2053. extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
  2054. extern void intel_init_pch_refclk(struct drm_device *dev);
  2055. extern void gen6_set_rps(struct drm_device *dev, u8 val);
  2056. extern void valleyview_set_rps(struct drm_device *dev, u8 val);
  2057. extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
  2058. extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
  2059. extern void intel_detect_pch(struct drm_device *dev);
  2060. extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
  2061. extern int intel_enable_rc6(const struct drm_device *dev);
  2062. extern bool i915_semaphore_is_enabled(struct drm_device *dev);
  2063. int i915_reg_read_ioctl(struct drm_device *dev, void *data,
  2064. struct drm_file *file);
  2065. /* overlay */
  2066. extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
  2067. extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
  2068. struct intel_overlay_error_state *error);
  2069. extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
  2070. extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  2071. struct drm_device *dev,
  2072. struct intel_display_error_state *error);
  2073. /* On SNB platform, before reading ring registers forcewake bit
  2074. * must be set to prevent GT core from power down and stale values being
  2075. * returned.
  2076. */
  2077. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
  2078. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
  2079. int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
  2080. int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
  2081. /* intel_sideband.c */
  2082. u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
  2083. void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
  2084. u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
  2085. u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
  2086. void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2087. u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
  2088. void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2089. u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
  2090. void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2091. u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
  2092. void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  2093. u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
  2094. void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
  2095. u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
  2096. enum intel_sbi_destination destination);
  2097. void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
  2098. enum intel_sbi_destination destination);
  2099. int vlv_gpu_freq(int ddr_freq, int val);
  2100. int vlv_freq_opcode(int ddr_freq, int val);
  2101. #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
  2102. #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
  2103. #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
  2104. #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
  2105. #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
  2106. #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
  2107. #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
  2108. #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
  2109. #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
  2110. #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
  2111. #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
  2112. #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
  2113. #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
  2114. #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
  2115. /* "Broadcast RGB" property */
  2116. #define INTEL_BROADCAST_RGB_AUTO 0
  2117. #define INTEL_BROADCAST_RGB_FULL 1
  2118. #define INTEL_BROADCAST_RGB_LIMITED 2
  2119. static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
  2120. {
  2121. if (HAS_PCH_SPLIT(dev))
  2122. return CPU_VGACNTRL;
  2123. else if (IS_VALLEYVIEW(dev))
  2124. return VLV_VGACNTRL;
  2125. else
  2126. return VGACNTRL;
  2127. }
  2128. static inline void __user *to_user_ptr(u64 address)
  2129. {
  2130. return (void __user *)(uintptr_t)address;
  2131. }
  2132. static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
  2133. {
  2134. unsigned long j = msecs_to_jiffies(m);
  2135. return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  2136. }
  2137. static inline unsigned long
  2138. timespec_to_jiffies_timeout(const struct timespec *value)
  2139. {
  2140. unsigned long j = timespec_to_jiffies(value);
  2141. return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  2142. }
  2143. #endif