i915_drv.h 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103
  1. /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
  2. */
  3. /*
  4. *
  5. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the
  10. * "Software"), to deal in the Software without restriction, including
  11. * without limitation the rights to use, copy, modify, merge, publish,
  12. * distribute, sub license, and/or sell copies of the Software, and to
  13. * permit persons to whom the Software is furnished to do so, subject to
  14. * the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the
  17. * next paragraph) shall be included in all copies or substantial portions
  18. * of the Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27. *
  28. */
  29. #ifndef _I915_DRV_H_
  30. #define _I915_DRV_H_
  31. #include <uapi/drm/i915_drm.h>
  32. #include "i915_reg.h"
  33. #include "intel_bios.h"
  34. #include "intel_ringbuffer.h"
  35. #include <linux/io-mapping.h>
  36. #include <linux/i2c.h>
  37. #include <linux/i2c-algo-bit.h>
  38. #include <drm/intel-gtt.h>
  39. #include <linux/backlight.h>
  40. #include <linux/intel-iommu.h>
  41. #include <linux/kref.h>
  42. #include <linux/pm_qos.h>
  43. /* General customization:
  44. */
  45. #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
  46. #define DRIVER_NAME "i915"
  47. #define DRIVER_DESC "Intel Graphics"
  48. #define DRIVER_DATE "20080730"
  49. enum pipe {
  50. PIPE_A = 0,
  51. PIPE_B,
  52. PIPE_C,
  53. I915_MAX_PIPES
  54. };
  55. #define pipe_name(p) ((p) + 'A')
  56. enum transcoder {
  57. TRANSCODER_A = 0,
  58. TRANSCODER_B,
  59. TRANSCODER_C,
  60. TRANSCODER_EDP = 0xF,
  61. };
  62. #define transcoder_name(t) ((t) + 'A')
  63. enum plane {
  64. PLANE_A = 0,
  65. PLANE_B,
  66. PLANE_C,
  67. };
  68. #define plane_name(p) ((p) + 'A')
  69. #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
  70. enum port {
  71. PORT_A = 0,
  72. PORT_B,
  73. PORT_C,
  74. PORT_D,
  75. PORT_E,
  76. I915_MAX_PORTS
  77. };
  78. #define port_name(p) ((p) + 'A')
  79. enum intel_display_power_domain {
  80. POWER_DOMAIN_PIPE_A,
  81. POWER_DOMAIN_PIPE_B,
  82. POWER_DOMAIN_PIPE_C,
  83. POWER_DOMAIN_PIPE_A_PANEL_FITTER,
  84. POWER_DOMAIN_PIPE_B_PANEL_FITTER,
  85. POWER_DOMAIN_PIPE_C_PANEL_FITTER,
  86. POWER_DOMAIN_TRANSCODER_A,
  87. POWER_DOMAIN_TRANSCODER_B,
  88. POWER_DOMAIN_TRANSCODER_C,
  89. POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
  90. };
  91. #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
  92. #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
  93. ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
  94. #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
  95. enum hpd_pin {
  96. HPD_NONE = 0,
  97. HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
  98. HPD_TV = HPD_NONE, /* TV is known to be unreliable */
  99. HPD_CRT,
  100. HPD_SDVO_B,
  101. HPD_SDVO_C,
  102. HPD_PORT_B,
  103. HPD_PORT_C,
  104. HPD_PORT_D,
  105. HPD_NUM_PINS
  106. };
  107. #define I915_GEM_GPU_DOMAINS \
  108. (I915_GEM_DOMAIN_RENDER | \
  109. I915_GEM_DOMAIN_SAMPLER | \
  110. I915_GEM_DOMAIN_COMMAND | \
  111. I915_GEM_DOMAIN_INSTRUCTION | \
  112. I915_GEM_DOMAIN_VERTEX)
  113. #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
  114. #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
  115. list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
  116. if ((intel_encoder)->base.crtc == (__crtc))
  117. struct drm_i915_private;
  118. enum intel_dpll_id {
  119. DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
  120. /* real shared dpll ids must be >= 0 */
  121. DPLL_ID_PCH_PLL_A,
  122. DPLL_ID_PCH_PLL_B,
  123. };
  124. #define I915_NUM_PLLS 2
  125. struct intel_dpll_hw_state {
  126. uint32_t dpll;
  127. uint32_t fp0;
  128. uint32_t fp1;
  129. };
  130. struct intel_shared_dpll {
  131. int refcount; /* count of number of CRTCs sharing this PLL */
  132. int active; /* count of number of active CRTCs (i.e. DPMS on) */
  133. bool on; /* is the PLL actually active? Disabled during modeset */
  134. const char *name;
  135. /* should match the index in the dev_priv->shared_dplls array */
  136. enum intel_dpll_id id;
  137. struct intel_dpll_hw_state hw_state;
  138. void (*enable)(struct drm_i915_private *dev_priv,
  139. struct intel_shared_dpll *pll);
  140. void (*disable)(struct drm_i915_private *dev_priv,
  141. struct intel_shared_dpll *pll);
  142. bool (*get_hw_state)(struct drm_i915_private *dev_priv,
  143. struct intel_shared_dpll *pll,
  144. struct intel_dpll_hw_state *hw_state);
  145. };
  146. /* Used by dp and fdi links */
  147. struct intel_link_m_n {
  148. uint32_t tu;
  149. uint32_t gmch_m;
  150. uint32_t gmch_n;
  151. uint32_t link_m;
  152. uint32_t link_n;
  153. };
  154. void intel_link_compute_m_n(int bpp, int nlanes,
  155. int pixel_clock, int link_clock,
  156. struct intel_link_m_n *m_n);
  157. struct intel_ddi_plls {
  158. int spll_refcount;
  159. int wrpll1_refcount;
  160. int wrpll2_refcount;
  161. };
  162. /* Interface history:
  163. *
  164. * 1.1: Original.
  165. * 1.2: Add Power Management
  166. * 1.3: Add vblank support
  167. * 1.4: Fix cmdbuffer path, add heap destroy
  168. * 1.5: Add vblank pipe configuration
  169. * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
  170. * - Support vertical blank on secondary display pipe
  171. */
  172. #define DRIVER_MAJOR 1
  173. #define DRIVER_MINOR 6
  174. #define DRIVER_PATCHLEVEL 0
  175. #define WATCH_COHERENCY 0
  176. #define WATCH_LISTS 0
  177. #define WATCH_GTT 0
  178. #define I915_GEM_PHYS_CURSOR_0 1
  179. #define I915_GEM_PHYS_CURSOR_1 2
  180. #define I915_GEM_PHYS_OVERLAY_REGS 3
  181. #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
  182. struct drm_i915_gem_phys_object {
  183. int id;
  184. struct page **page_list;
  185. drm_dma_handle_t *handle;
  186. struct drm_i915_gem_object *cur_obj;
  187. };
  188. struct opregion_header;
  189. struct opregion_acpi;
  190. struct opregion_swsci;
  191. struct opregion_asle;
  192. struct intel_opregion {
  193. struct opregion_header __iomem *header;
  194. struct opregion_acpi __iomem *acpi;
  195. struct opregion_swsci __iomem *swsci;
  196. struct opregion_asle __iomem *asle;
  197. void __iomem *vbt;
  198. u32 __iomem *lid_state;
  199. };
  200. #define OPREGION_SIZE (8*1024)
  201. struct intel_overlay;
  202. struct intel_overlay_error_state;
  203. struct drm_i915_master_private {
  204. drm_local_map_t *sarea;
  205. struct _drm_i915_sarea *sarea_priv;
  206. };
  207. #define I915_FENCE_REG_NONE -1
  208. #define I915_MAX_NUM_FENCES 32
  209. /* 32 fences + sign bit for FENCE_REG_NONE */
  210. #define I915_MAX_NUM_FENCE_BITS 6
  211. struct drm_i915_fence_reg {
  212. struct list_head lru_list;
  213. struct drm_i915_gem_object *obj;
  214. int pin_count;
  215. };
  216. struct sdvo_device_mapping {
  217. u8 initialized;
  218. u8 dvo_port;
  219. u8 slave_addr;
  220. u8 dvo_wiring;
  221. u8 i2c_pin;
  222. u8 ddc_pin;
  223. };
  224. struct intel_display_error_state;
  225. struct drm_i915_error_state {
  226. struct kref ref;
  227. u32 eir;
  228. u32 pgtbl_er;
  229. u32 ier;
  230. u32 ccid;
  231. u32 derrmr;
  232. u32 forcewake;
  233. bool waiting[I915_NUM_RINGS];
  234. u32 pipestat[I915_MAX_PIPES];
  235. u32 tail[I915_NUM_RINGS];
  236. u32 head[I915_NUM_RINGS];
  237. u32 ctl[I915_NUM_RINGS];
  238. u32 ipeir[I915_NUM_RINGS];
  239. u32 ipehr[I915_NUM_RINGS];
  240. u32 instdone[I915_NUM_RINGS];
  241. u32 acthd[I915_NUM_RINGS];
  242. u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
  243. u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
  244. u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
  245. /* our own tracking of ring head and tail */
  246. u32 cpu_ring_head[I915_NUM_RINGS];
  247. u32 cpu_ring_tail[I915_NUM_RINGS];
  248. u32 error; /* gen6+ */
  249. u32 err_int; /* gen7 */
  250. u32 instpm[I915_NUM_RINGS];
  251. u32 instps[I915_NUM_RINGS];
  252. u32 extra_instdone[I915_NUM_INSTDONE_REG];
  253. u32 seqno[I915_NUM_RINGS];
  254. u64 bbaddr;
  255. u32 fault_reg[I915_NUM_RINGS];
  256. u32 done_reg;
  257. u32 faddr[I915_NUM_RINGS];
  258. u64 fence[I915_MAX_NUM_FENCES];
  259. struct timeval time;
  260. struct drm_i915_error_ring {
  261. struct drm_i915_error_object {
  262. int page_count;
  263. u32 gtt_offset;
  264. u32 *pages[0];
  265. } *ringbuffer, *batchbuffer, *ctx;
  266. struct drm_i915_error_request {
  267. long jiffies;
  268. u32 seqno;
  269. u32 tail;
  270. } *requests;
  271. int num_requests;
  272. } ring[I915_NUM_RINGS];
  273. struct drm_i915_error_buffer {
  274. u32 size;
  275. u32 name;
  276. u32 rseqno, wseqno;
  277. u32 gtt_offset;
  278. u32 read_domains;
  279. u32 write_domain;
  280. s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
  281. s32 pinned:2;
  282. u32 tiling:2;
  283. u32 dirty:1;
  284. u32 purgeable:1;
  285. s32 ring:4;
  286. u32 cache_level:2;
  287. } *active_bo, *pinned_bo;
  288. u32 active_bo_count, pinned_bo_count;
  289. struct intel_overlay_error_state *overlay;
  290. struct intel_display_error_state *display;
  291. };
  292. struct intel_crtc_config;
  293. struct intel_crtc;
  294. struct intel_limit;
  295. struct dpll;
  296. struct drm_i915_display_funcs {
  297. bool (*fbc_enabled)(struct drm_device *dev);
  298. void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
  299. void (*disable_fbc)(struct drm_device *dev);
  300. int (*get_display_clock_speed)(struct drm_device *dev);
  301. int (*get_fifo_size)(struct drm_device *dev, int plane);
  302. /**
  303. * find_dpll() - Find the best values for the PLL
  304. * @limit: limits for the PLL
  305. * @crtc: current CRTC
  306. * @target: target frequency in kHz
  307. * @refclk: reference clock frequency in kHz
  308. * @match_clock: if provided, @best_clock P divider must
  309. * match the P divider from @match_clock
  310. * used for LVDS downclocking
  311. * @best_clock: best PLL values found
  312. *
  313. * Returns true on success, false on failure.
  314. */
  315. bool (*find_dpll)(const struct intel_limit *limit,
  316. struct drm_crtc *crtc,
  317. int target, int refclk,
  318. struct dpll *match_clock,
  319. struct dpll *best_clock);
  320. void (*update_wm)(struct drm_device *dev);
  321. void (*update_sprite_wm)(struct drm_device *dev, int pipe,
  322. uint32_t sprite_width, int pixel_size,
  323. bool enable);
  324. void (*modeset_global_resources)(struct drm_device *dev);
  325. /* Returns the active state of the crtc, and if the crtc is active,
  326. * fills out the pipe-config with the hw state. */
  327. bool (*get_pipe_config)(struct intel_crtc *,
  328. struct intel_crtc_config *);
  329. int (*crtc_mode_set)(struct drm_crtc *crtc,
  330. int x, int y,
  331. struct drm_framebuffer *old_fb);
  332. void (*crtc_enable)(struct drm_crtc *crtc);
  333. void (*crtc_disable)(struct drm_crtc *crtc);
  334. void (*off)(struct drm_crtc *crtc);
  335. void (*write_eld)(struct drm_connector *connector,
  336. struct drm_crtc *crtc);
  337. void (*fdi_link_train)(struct drm_crtc *crtc);
  338. void (*init_clock_gating)(struct drm_device *dev);
  339. int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
  340. struct drm_framebuffer *fb,
  341. struct drm_i915_gem_object *obj);
  342. int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  343. int x, int y);
  344. void (*hpd_irq_setup)(struct drm_device *dev);
  345. /* clock updates for mode set */
  346. /* cursor updates */
  347. /* render clock increase/decrease */
  348. /* display clock increase/decrease */
  349. /* pll clock increase/decrease */
  350. };
  351. struct drm_i915_gt_funcs {
  352. void (*force_wake_get)(struct drm_i915_private *dev_priv);
  353. void (*force_wake_put)(struct drm_i915_private *dev_priv);
  354. };
  355. #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
  356. func(is_mobile) sep \
  357. func(is_i85x) sep \
  358. func(is_i915g) sep \
  359. func(is_i945gm) sep \
  360. func(is_g33) sep \
  361. func(need_gfx_hws) sep \
  362. func(is_g4x) sep \
  363. func(is_pineview) sep \
  364. func(is_broadwater) sep \
  365. func(is_crestline) sep \
  366. func(is_ivybridge) sep \
  367. func(is_valleyview) sep \
  368. func(is_haswell) sep \
  369. func(has_force_wake) sep \
  370. func(has_fbc) sep \
  371. func(has_pipe_cxsr) sep \
  372. func(has_hotplug) sep \
  373. func(cursor_needs_physical) sep \
  374. func(has_overlay) sep \
  375. func(overlay_needs_physical) sep \
  376. func(supports_tv) sep \
  377. func(has_bsd_ring) sep \
  378. func(has_blt_ring) sep \
  379. func(has_vebox_ring) sep \
  380. func(has_llc) sep \
  381. func(has_ddi) sep \
  382. func(has_fpga_dbg)
  383. #define DEFINE_FLAG(name) u8 name:1
  384. #define SEP_SEMICOLON ;
  385. struct intel_device_info {
  386. u32 display_mmio_offset;
  387. u8 num_pipes:3;
  388. u8 gen;
  389. DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
  390. };
  391. #undef DEFINE_FLAG
  392. #undef SEP_SEMICOLON
  393. enum i915_cache_level {
  394. I915_CACHE_NONE = 0,
  395. I915_CACHE_LLC,
  396. I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
  397. };
  398. typedef uint32_t gen6_gtt_pte_t;
  399. /* The Graphics Translation Table is the way in which GEN hardware translates a
  400. * Graphics Virtual Address into a Physical Address. In addition to the normal
  401. * collateral associated with any va->pa translations GEN hardware also has a
  402. * portion of the GTT which can be mapped by the CPU and remain both coherent
  403. * and correct (in cases like swizzling). That region is referred to as GMADR in
  404. * the spec.
  405. */
  406. struct i915_gtt {
  407. unsigned long start; /* Start offset of used GTT */
  408. size_t total; /* Total size GTT can map */
  409. size_t stolen_size; /* Total size of stolen memory */
  410. unsigned long mappable_end; /* End offset that we can CPU map */
  411. struct io_mapping *mappable; /* Mapping to our CPU mappable region */
  412. phys_addr_t mappable_base; /* PA of our GMADR */
  413. /** "Graphics Stolen Memory" holds the global PTEs */
  414. void __iomem *gsm;
  415. bool do_idle_maps;
  416. dma_addr_t scratch_page_dma;
  417. struct page *scratch_page;
  418. /* global gtt ops */
  419. int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
  420. size_t *stolen, phys_addr_t *mappable_base,
  421. unsigned long *mappable_end);
  422. void (*gtt_remove)(struct drm_device *dev);
  423. void (*gtt_clear_range)(struct drm_device *dev,
  424. unsigned int first_entry,
  425. unsigned int num_entries);
  426. void (*gtt_insert_entries)(struct drm_device *dev,
  427. struct sg_table *st,
  428. unsigned int pg_start,
  429. enum i915_cache_level cache_level);
  430. gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
  431. dma_addr_t addr,
  432. enum i915_cache_level level);
  433. };
  434. #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
  435. #define I915_PPGTT_PD_ENTRIES 512
  436. #define I915_PPGTT_PT_ENTRIES 1024
  437. struct i915_hw_ppgtt {
  438. struct drm_device *dev;
  439. unsigned num_pd_entries;
  440. struct page **pt_pages;
  441. uint32_t pd_offset;
  442. dma_addr_t *pt_dma_addr;
  443. dma_addr_t scratch_page_dma_addr;
  444. /* pte functions, mirroring the interface of the global gtt. */
  445. void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
  446. unsigned int first_entry,
  447. unsigned int num_entries);
  448. void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
  449. struct sg_table *st,
  450. unsigned int pg_start,
  451. enum i915_cache_level cache_level);
  452. gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
  453. dma_addr_t addr,
  454. enum i915_cache_level level);
  455. int (*enable)(struct drm_device *dev);
  456. void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
  457. };
  458. struct i915_ctx_hang_stats {
  459. /* This context had batch pending when hang was declared */
  460. unsigned batch_pending;
  461. /* This context had batch active when hang was declared */
  462. unsigned batch_active;
  463. };
  464. /* This must match up with the value previously used for execbuf2.rsvd1. */
  465. #define DEFAULT_CONTEXT_ID 0
  466. struct i915_hw_context {
  467. struct kref ref;
  468. int id;
  469. bool is_initialized;
  470. struct drm_i915_file_private *file_priv;
  471. struct intel_ring_buffer *ring;
  472. struct drm_i915_gem_object *obj;
  473. struct i915_ctx_hang_stats hang_stats;
  474. };
  475. enum no_fbc_reason {
  476. FBC_NO_OUTPUT, /* no outputs enabled to compress */
  477. FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
  478. FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
  479. FBC_MODE_TOO_LARGE, /* mode too large for compression */
  480. FBC_BAD_PLANE, /* fbc not supported on plane */
  481. FBC_NOT_TILED, /* buffer not tiled */
  482. FBC_MULTIPLE_PIPES, /* more than one pipe active */
  483. FBC_MODULE_PARAM,
  484. };
  485. enum intel_pch {
  486. PCH_NONE = 0, /* No PCH present */
  487. PCH_IBX, /* Ibexpeak PCH */
  488. PCH_CPT, /* Cougarpoint PCH */
  489. PCH_LPT, /* Lynxpoint PCH */
  490. PCH_NOP,
  491. };
  492. enum intel_sbi_destination {
  493. SBI_ICLK,
  494. SBI_MPHY,
  495. };
  496. #define QUIRK_PIPEA_FORCE (1<<0)
  497. #define QUIRK_LVDS_SSC_DISABLE (1<<1)
  498. #define QUIRK_INVERT_BRIGHTNESS (1<<2)
  499. #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
  500. struct intel_fbdev;
  501. struct intel_fbc_work;
  502. struct intel_gmbus {
  503. struct i2c_adapter adapter;
  504. u32 force_bit;
  505. u32 reg0;
  506. u32 gpio_reg;
  507. struct i2c_algo_bit_data bit_algo;
  508. struct drm_i915_private *dev_priv;
  509. };
  510. struct i915_suspend_saved_registers {
  511. u8 saveLBB;
  512. u32 saveDSPACNTR;
  513. u32 saveDSPBCNTR;
  514. u32 saveDSPARB;
  515. u32 savePIPEACONF;
  516. u32 savePIPEBCONF;
  517. u32 savePIPEASRC;
  518. u32 savePIPEBSRC;
  519. u32 saveFPA0;
  520. u32 saveFPA1;
  521. u32 saveDPLL_A;
  522. u32 saveDPLL_A_MD;
  523. u32 saveHTOTAL_A;
  524. u32 saveHBLANK_A;
  525. u32 saveHSYNC_A;
  526. u32 saveVTOTAL_A;
  527. u32 saveVBLANK_A;
  528. u32 saveVSYNC_A;
  529. u32 saveBCLRPAT_A;
  530. u32 saveTRANSACONF;
  531. u32 saveTRANS_HTOTAL_A;
  532. u32 saveTRANS_HBLANK_A;
  533. u32 saveTRANS_HSYNC_A;
  534. u32 saveTRANS_VTOTAL_A;
  535. u32 saveTRANS_VBLANK_A;
  536. u32 saveTRANS_VSYNC_A;
  537. u32 savePIPEASTAT;
  538. u32 saveDSPASTRIDE;
  539. u32 saveDSPASIZE;
  540. u32 saveDSPAPOS;
  541. u32 saveDSPAADDR;
  542. u32 saveDSPASURF;
  543. u32 saveDSPATILEOFF;
  544. u32 savePFIT_PGM_RATIOS;
  545. u32 saveBLC_HIST_CTL;
  546. u32 saveBLC_PWM_CTL;
  547. u32 saveBLC_PWM_CTL2;
  548. u32 saveBLC_CPU_PWM_CTL;
  549. u32 saveBLC_CPU_PWM_CTL2;
  550. u32 saveFPB0;
  551. u32 saveFPB1;
  552. u32 saveDPLL_B;
  553. u32 saveDPLL_B_MD;
  554. u32 saveHTOTAL_B;
  555. u32 saveHBLANK_B;
  556. u32 saveHSYNC_B;
  557. u32 saveVTOTAL_B;
  558. u32 saveVBLANK_B;
  559. u32 saveVSYNC_B;
  560. u32 saveBCLRPAT_B;
  561. u32 saveTRANSBCONF;
  562. u32 saveTRANS_HTOTAL_B;
  563. u32 saveTRANS_HBLANK_B;
  564. u32 saveTRANS_HSYNC_B;
  565. u32 saveTRANS_VTOTAL_B;
  566. u32 saveTRANS_VBLANK_B;
  567. u32 saveTRANS_VSYNC_B;
  568. u32 savePIPEBSTAT;
  569. u32 saveDSPBSTRIDE;
  570. u32 saveDSPBSIZE;
  571. u32 saveDSPBPOS;
  572. u32 saveDSPBADDR;
  573. u32 saveDSPBSURF;
  574. u32 saveDSPBTILEOFF;
  575. u32 saveVGA0;
  576. u32 saveVGA1;
  577. u32 saveVGA_PD;
  578. u32 saveVGACNTRL;
  579. u32 saveADPA;
  580. u32 saveLVDS;
  581. u32 savePP_ON_DELAYS;
  582. u32 savePP_OFF_DELAYS;
  583. u32 saveDVOA;
  584. u32 saveDVOB;
  585. u32 saveDVOC;
  586. u32 savePP_ON;
  587. u32 savePP_OFF;
  588. u32 savePP_CONTROL;
  589. u32 savePP_DIVISOR;
  590. u32 savePFIT_CONTROL;
  591. u32 save_palette_a[256];
  592. u32 save_palette_b[256];
  593. u32 saveDPFC_CB_BASE;
  594. u32 saveFBC_CFB_BASE;
  595. u32 saveFBC_LL_BASE;
  596. u32 saveFBC_CONTROL;
  597. u32 saveFBC_CONTROL2;
  598. u32 saveIER;
  599. u32 saveIIR;
  600. u32 saveIMR;
  601. u32 saveDEIER;
  602. u32 saveDEIMR;
  603. u32 saveGTIER;
  604. u32 saveGTIMR;
  605. u32 saveFDI_RXA_IMR;
  606. u32 saveFDI_RXB_IMR;
  607. u32 saveCACHE_MODE_0;
  608. u32 saveMI_ARB_STATE;
  609. u32 saveSWF0[16];
  610. u32 saveSWF1[16];
  611. u32 saveSWF2[3];
  612. u8 saveMSR;
  613. u8 saveSR[8];
  614. u8 saveGR[25];
  615. u8 saveAR_INDEX;
  616. u8 saveAR[21];
  617. u8 saveDACMASK;
  618. u8 saveCR[37];
  619. uint64_t saveFENCE[I915_MAX_NUM_FENCES];
  620. u32 saveCURACNTR;
  621. u32 saveCURAPOS;
  622. u32 saveCURABASE;
  623. u32 saveCURBCNTR;
  624. u32 saveCURBPOS;
  625. u32 saveCURBBASE;
  626. u32 saveCURSIZE;
  627. u32 saveDP_B;
  628. u32 saveDP_C;
  629. u32 saveDP_D;
  630. u32 savePIPEA_GMCH_DATA_M;
  631. u32 savePIPEB_GMCH_DATA_M;
  632. u32 savePIPEA_GMCH_DATA_N;
  633. u32 savePIPEB_GMCH_DATA_N;
  634. u32 savePIPEA_DP_LINK_M;
  635. u32 savePIPEB_DP_LINK_M;
  636. u32 savePIPEA_DP_LINK_N;
  637. u32 savePIPEB_DP_LINK_N;
  638. u32 saveFDI_RXA_CTL;
  639. u32 saveFDI_TXA_CTL;
  640. u32 saveFDI_RXB_CTL;
  641. u32 saveFDI_TXB_CTL;
  642. u32 savePFA_CTL_1;
  643. u32 savePFB_CTL_1;
  644. u32 savePFA_WIN_SZ;
  645. u32 savePFB_WIN_SZ;
  646. u32 savePFA_WIN_POS;
  647. u32 savePFB_WIN_POS;
  648. u32 savePCH_DREF_CONTROL;
  649. u32 saveDISP_ARB_CTL;
  650. u32 savePIPEA_DATA_M1;
  651. u32 savePIPEA_DATA_N1;
  652. u32 savePIPEA_LINK_M1;
  653. u32 savePIPEA_LINK_N1;
  654. u32 savePIPEB_DATA_M1;
  655. u32 savePIPEB_DATA_N1;
  656. u32 savePIPEB_LINK_M1;
  657. u32 savePIPEB_LINK_N1;
  658. u32 saveMCHBAR_RENDER_STANDBY;
  659. u32 savePCH_PORT_HOTPLUG;
  660. };
  661. struct intel_gen6_power_mgmt {
  662. struct work_struct work;
  663. struct delayed_work vlv_work;
  664. u32 pm_iir;
  665. /* lock - irqsave spinlock that protectects the work_struct and
  666. * pm_iir. */
  667. spinlock_t lock;
  668. /* The below variables an all the rps hw state are protected by
  669. * dev->struct mutext. */
  670. u8 cur_delay;
  671. u8 min_delay;
  672. u8 max_delay;
  673. u8 rpe_delay;
  674. u8 hw_max;
  675. struct delayed_work delayed_resume_work;
  676. /*
  677. * Protects RPS/RC6 register access and PCU communication.
  678. * Must be taken after struct_mutex if nested.
  679. */
  680. struct mutex hw_lock;
  681. };
  682. /* defined intel_pm.c */
  683. extern spinlock_t mchdev_lock;
  684. struct intel_ilk_power_mgmt {
  685. u8 cur_delay;
  686. u8 min_delay;
  687. u8 max_delay;
  688. u8 fmax;
  689. u8 fstart;
  690. u64 last_count1;
  691. unsigned long last_time1;
  692. unsigned long chipset_power;
  693. u64 last_count2;
  694. struct timespec last_time2;
  695. unsigned long gfx_power;
  696. u8 corr;
  697. int c_m;
  698. int r_t;
  699. struct drm_i915_gem_object *pwrctx;
  700. struct drm_i915_gem_object *renderctx;
  701. };
  702. /* Power well structure for haswell */
  703. struct i915_power_well {
  704. struct drm_device *device;
  705. spinlock_t lock;
  706. /* power well enable/disable usage count */
  707. int count;
  708. int i915_request;
  709. };
  710. struct i915_dri1_state {
  711. unsigned allow_batchbuffer : 1;
  712. u32 __iomem *gfx_hws_cpu_addr;
  713. unsigned int cpp;
  714. int back_offset;
  715. int front_offset;
  716. int current_page;
  717. int page_flipping;
  718. uint32_t counter;
  719. };
  720. struct intel_l3_parity {
  721. u32 *remap_info;
  722. struct work_struct error_work;
  723. };
  724. struct i915_gem_mm {
  725. /** Memory allocator for GTT stolen memory */
  726. struct drm_mm stolen;
  727. /** Memory allocator for GTT */
  728. struct drm_mm gtt_space;
  729. /** List of all objects in gtt_space. Used to restore gtt
  730. * mappings on resume */
  731. struct list_head bound_list;
  732. /**
  733. * List of objects which are not bound to the GTT (thus
  734. * are idle and not used by the GPU) but still have
  735. * (presumably uncached) pages still attached.
  736. */
  737. struct list_head unbound_list;
  738. /** Usable portion of the GTT for GEM */
  739. unsigned long stolen_base; /* limited to low memory (32-bit) */
  740. int gtt_mtrr;
  741. /** PPGTT used for aliasing the PPGTT with the GTT */
  742. struct i915_hw_ppgtt *aliasing_ppgtt;
  743. struct shrinker inactive_shrinker;
  744. bool shrinker_no_lock_stealing;
  745. /**
  746. * List of objects currently involved in rendering.
  747. *
  748. * Includes buffers having the contents of their GPU caches
  749. * flushed, not necessarily primitives. last_rendering_seqno
  750. * represents when the rendering involved will be completed.
  751. *
  752. * A reference is held on the buffer while on this list.
  753. */
  754. struct list_head active_list;
  755. /**
  756. * LRU list of objects which are not in the ringbuffer and
  757. * are ready to unbind, but are still in the GTT.
  758. *
  759. * last_rendering_seqno is 0 while an object is in this list.
  760. *
  761. * A reference is not held on the buffer while on this list,
  762. * as merely being GTT-bound shouldn't prevent its being
  763. * freed, and we'll pull it off the list in the free path.
  764. */
  765. struct list_head inactive_list;
  766. /** LRU list of objects with fence regs on them. */
  767. struct list_head fence_list;
  768. /**
  769. * We leave the user IRQ off as much as possible,
  770. * but this means that requests will finish and never
  771. * be retired once the system goes idle. Set a timer to
  772. * fire periodically while the ring is running. When it
  773. * fires, go retire requests.
  774. */
  775. struct delayed_work retire_work;
  776. /**
  777. * Are we in a non-interruptible section of code like
  778. * modesetting?
  779. */
  780. bool interruptible;
  781. /**
  782. * Flag if the X Server, and thus DRM, is not currently in
  783. * control of the device.
  784. *
  785. * This is set between LeaveVT and EnterVT. It needs to be
  786. * replaced with a semaphore. It also needs to be
  787. * transitioned away from for kernel modesetting.
  788. */
  789. int suspended;
  790. /** Bit 6 swizzling required for X tiling */
  791. uint32_t bit_6_swizzle_x;
  792. /** Bit 6 swizzling required for Y tiling */
  793. uint32_t bit_6_swizzle_y;
  794. /* storage for physical objects */
  795. struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
  796. /* accounting, useful for userland debugging */
  797. size_t object_memory;
  798. u32 object_count;
  799. };
  800. struct drm_i915_error_state_buf {
  801. unsigned bytes;
  802. unsigned size;
  803. int err;
  804. u8 *buf;
  805. loff_t start;
  806. loff_t pos;
  807. };
  808. struct i915_gpu_error {
  809. /* For hangcheck timer */
  810. #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
  811. #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
  812. struct timer_list hangcheck_timer;
  813. /* For reset and error_state handling. */
  814. spinlock_t lock;
  815. /* Protected by the above dev->gpu_error.lock. */
  816. struct drm_i915_error_state *first_error;
  817. struct work_struct work;
  818. unsigned long last_reset;
  819. /**
  820. * State variable and reset counter controlling the reset flow
  821. *
  822. * Upper bits are for the reset counter. This counter is used by the
  823. * wait_seqno code to race-free noticed that a reset event happened and
  824. * that it needs to restart the entire ioctl (since most likely the
  825. * seqno it waited for won't ever signal anytime soon).
  826. *
  827. * This is important for lock-free wait paths, where no contended lock
  828. * naturally enforces the correct ordering between the bail-out of the
  829. * waiter and the gpu reset work code.
  830. *
  831. * Lowest bit controls the reset state machine: Set means a reset is in
  832. * progress. This state will (presuming we don't have any bugs) decay
  833. * into either unset (successful reset) or the special WEDGED value (hw
  834. * terminally sour). All waiters on the reset_queue will be woken when
  835. * that happens.
  836. */
  837. atomic_t reset_counter;
  838. /**
  839. * Special values/flags for reset_counter
  840. *
  841. * Note that the code relies on
  842. * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
  843. * being true.
  844. */
  845. #define I915_RESET_IN_PROGRESS_FLAG 1
  846. #define I915_WEDGED 0xffffffff
  847. /**
  848. * Waitqueue to signal when the reset has completed. Used by clients
  849. * that wait for dev_priv->mm.wedged to settle.
  850. */
  851. wait_queue_head_t reset_queue;
  852. /* For gpu hang simulation. */
  853. unsigned int stop_rings;
  854. };
  855. enum modeset_restore {
  856. MODESET_ON_LID_OPEN,
  857. MODESET_DONE,
  858. MODESET_SUSPENDED,
  859. };
  860. struct intel_vbt_data {
  861. struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
  862. struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
  863. /* Feature bits */
  864. unsigned int int_tv_support:1;
  865. unsigned int lvds_dither:1;
  866. unsigned int lvds_vbt:1;
  867. unsigned int int_crt_support:1;
  868. unsigned int lvds_use_ssc:1;
  869. unsigned int display_clock_mode:1;
  870. unsigned int fdi_rx_polarity_inverted:1;
  871. int lvds_ssc_freq;
  872. unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
  873. /* eDP */
  874. int edp_rate;
  875. int edp_lanes;
  876. int edp_preemphasis;
  877. int edp_vswing;
  878. bool edp_initialized;
  879. bool edp_support;
  880. int edp_bpp;
  881. struct edp_power_seq edp_pps;
  882. int crt_ddc_pin;
  883. int child_dev_num;
  884. struct child_device_config *child_dev;
  885. };
  886. typedef struct drm_i915_private {
  887. struct drm_device *dev;
  888. struct kmem_cache *slab;
  889. const struct intel_device_info *info;
  890. int relative_constants_mode;
  891. void __iomem *regs;
  892. struct drm_i915_gt_funcs gt;
  893. /** gt_fifo_count and the subsequent register write are synchronized
  894. * with dev->struct_mutex. */
  895. unsigned gt_fifo_count;
  896. /** forcewake_count is protected by gt_lock */
  897. unsigned forcewake_count;
  898. /** gt_lock is also taken in irq contexts. */
  899. spinlock_t gt_lock;
  900. struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
  901. /** gmbus_mutex protects against concurrent usage of the single hw gmbus
  902. * controller on different i2c buses. */
  903. struct mutex gmbus_mutex;
  904. /**
  905. * Base address of the gmbus and gpio block.
  906. */
  907. uint32_t gpio_mmio_base;
  908. wait_queue_head_t gmbus_wait_queue;
  909. struct pci_dev *bridge_dev;
  910. struct intel_ring_buffer ring[I915_NUM_RINGS];
  911. uint32_t last_seqno, next_seqno;
  912. drm_dma_handle_t *status_page_dmah;
  913. struct resource mch_res;
  914. atomic_t irq_received;
  915. /* protects the irq masks */
  916. spinlock_t irq_lock;
  917. /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
  918. struct pm_qos_request pm_qos;
  919. /* DPIO indirect register protection */
  920. struct mutex dpio_lock;
  921. /** Cached value of IMR to avoid reads in updating the bitfield */
  922. u32 irq_mask;
  923. u32 gt_irq_mask;
  924. struct work_struct hotplug_work;
  925. bool enable_hotplug_processing;
  926. struct {
  927. unsigned long hpd_last_jiffies;
  928. int hpd_cnt;
  929. enum {
  930. HPD_ENABLED = 0,
  931. HPD_DISABLED = 1,
  932. HPD_MARK_DISABLED = 2
  933. } hpd_mark;
  934. } hpd_stats[HPD_NUM_PINS];
  935. u32 hpd_event_bits;
  936. struct timer_list hotplug_reenable_timer;
  937. int num_plane;
  938. unsigned long cfb_size;
  939. unsigned int cfb_fb;
  940. enum plane cfb_plane;
  941. int cfb_y;
  942. struct intel_fbc_work *fbc_work;
  943. struct intel_opregion opregion;
  944. struct intel_vbt_data vbt;
  945. /* overlay */
  946. struct intel_overlay *overlay;
  947. unsigned int sprite_scaling_enabled;
  948. /* backlight */
  949. struct {
  950. int level;
  951. bool enabled;
  952. spinlock_t lock; /* bl registers and the above bl fields */
  953. struct backlight_device *device;
  954. } backlight;
  955. /* LVDS info */
  956. struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
  957. struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
  958. bool no_aux_handshake;
  959. struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
  960. int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
  961. int num_fence_regs; /* 8 on pre-965, 16 otherwise */
  962. unsigned int fsb_freq, mem_freq, is_ddr3;
  963. struct workqueue_struct *wq;
  964. /* Display functions */
  965. struct drm_i915_display_funcs display;
  966. /* PCH chipset type */
  967. enum intel_pch pch_type;
  968. unsigned short pch_id;
  969. unsigned long quirks;
  970. enum modeset_restore modeset_restore;
  971. struct mutex modeset_restore_lock;
  972. struct i915_gtt gtt;
  973. struct i915_gem_mm mm;
  974. /* Kernel Modesetting */
  975. struct sdvo_device_mapping sdvo_mappings[2];
  976. struct drm_crtc *plane_to_crtc_mapping[3];
  977. struct drm_crtc *pipe_to_crtc_mapping[3];
  978. wait_queue_head_t pending_flip_queue;
  979. int num_shared_dpll;
  980. struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
  981. struct intel_ddi_plls ddi_plls;
  982. /* Reclocking support */
  983. bool render_reclock_avail;
  984. bool lvds_downclock_avail;
  985. /* indicates the reduced downclock for LVDS*/
  986. int lvds_downclock;
  987. u16 orig_clock;
  988. bool mchbar_need_disable;
  989. struct intel_l3_parity l3_parity;
  990. /* gen6+ rps state */
  991. struct intel_gen6_power_mgmt rps;
  992. /* ilk-only ips/rps state. Everything in here is protected by the global
  993. * mchdev_lock in intel_pm.c */
  994. struct intel_ilk_power_mgmt ips;
  995. /* Haswell power well */
  996. struct i915_power_well power_well;
  997. enum no_fbc_reason no_fbc_reason;
  998. struct drm_mm_node *compressed_fb;
  999. struct drm_mm_node *compressed_llb;
  1000. struct i915_gpu_error gpu_error;
  1001. struct drm_i915_gem_object *vlv_pctx;
  1002. /* list of fbdev register on this device */
  1003. struct intel_fbdev *fbdev;
  1004. /*
  1005. * The console may be contended at resume, but we don't
  1006. * want it to block on it.
  1007. */
  1008. struct work_struct console_resume_work;
  1009. struct drm_property *broadcast_rgb_property;
  1010. struct drm_property *force_audio_property;
  1011. bool hw_contexts_disabled;
  1012. uint32_t hw_context_size;
  1013. u32 fdi_rx_config;
  1014. struct i915_suspend_saved_registers regfile;
  1015. /* Old dri1 support infrastructure, beware the dragons ya fools entering
  1016. * here! */
  1017. struct i915_dri1_state dri1;
  1018. } drm_i915_private_t;
  1019. /* Iterate over initialised rings */
  1020. #define for_each_ring(ring__, dev_priv__, i__) \
  1021. for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
  1022. if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
  1023. enum hdmi_force_audio {
  1024. HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
  1025. HDMI_AUDIO_OFF, /* force turn off HDMI audio */
  1026. HDMI_AUDIO_AUTO, /* trust EDID */
  1027. HDMI_AUDIO_ON, /* force turn on HDMI audio */
  1028. };
  1029. #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
  1030. struct drm_i915_gem_object_ops {
  1031. /* Interface between the GEM object and its backing storage.
  1032. * get_pages() is called once prior to the use of the associated set
  1033. * of pages before to binding them into the GTT, and put_pages() is
  1034. * called after we no longer need them. As we expect there to be
  1035. * associated cost with migrating pages between the backing storage
  1036. * and making them available for the GPU (e.g. clflush), we may hold
  1037. * onto the pages after they are no longer referenced by the GPU
  1038. * in case they may be used again shortly (for example migrating the
  1039. * pages to a different memory domain within the GTT). put_pages()
  1040. * will therefore most likely be called when the object itself is
  1041. * being released or under memory pressure (where we attempt to
  1042. * reap pages for the shrinker).
  1043. */
  1044. int (*get_pages)(struct drm_i915_gem_object *);
  1045. void (*put_pages)(struct drm_i915_gem_object *);
  1046. };
  1047. struct drm_i915_gem_object {
  1048. struct drm_gem_object base;
  1049. const struct drm_i915_gem_object_ops *ops;
  1050. /** Current space allocated to this object in the GTT, if any. */
  1051. struct drm_mm_node *gtt_space;
  1052. /** Stolen memory for this object, instead of being backed by shmem. */
  1053. struct drm_mm_node *stolen;
  1054. struct list_head global_list;
  1055. /** This object's place on the active/inactive lists */
  1056. struct list_head ring_list;
  1057. struct list_head mm_list;
  1058. /** This object's place in the batchbuffer or on the eviction list */
  1059. struct list_head exec_list;
  1060. /**
  1061. * This is set if the object is on the active lists (has pending
  1062. * rendering and so a non-zero seqno), and is not set if it i s on
  1063. * inactive (ready to be unbound) list.
  1064. */
  1065. unsigned int active:1;
  1066. /**
  1067. * This is set if the object has been written to since last bound
  1068. * to the GTT
  1069. */
  1070. unsigned int dirty:1;
  1071. /**
  1072. * Fence register bits (if any) for this object. Will be set
  1073. * as needed when mapped into the GTT.
  1074. * Protected by dev->struct_mutex.
  1075. */
  1076. signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
  1077. /**
  1078. * Advice: are the backing pages purgeable?
  1079. */
  1080. unsigned int madv:2;
  1081. /**
  1082. * Current tiling mode for the object.
  1083. */
  1084. unsigned int tiling_mode:2;
  1085. /**
  1086. * Whether the tiling parameters for the currently associated fence
  1087. * register have changed. Note that for the purposes of tracking
  1088. * tiling changes we also treat the unfenced register, the register
  1089. * slot that the object occupies whilst it executes a fenced
  1090. * command (such as BLT on gen2/3), as a "fence".
  1091. */
  1092. unsigned int fence_dirty:1;
  1093. /** How many users have pinned this object in GTT space. The following
  1094. * users can each hold at most one reference: pwrite/pread, pin_ioctl
  1095. * (via user_pin_count), execbuffer (objects are not allowed multiple
  1096. * times for the same batchbuffer), and the framebuffer code. When
  1097. * switching/pageflipping, the framebuffer code has at most two buffers
  1098. * pinned per crtc.
  1099. *
  1100. * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
  1101. * bits with absolutely no headroom. So use 4 bits. */
  1102. unsigned int pin_count:4;
  1103. #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
  1104. /**
  1105. * Is the object at the current location in the gtt mappable and
  1106. * fenceable? Used to avoid costly recalculations.
  1107. */
  1108. unsigned int map_and_fenceable:1;
  1109. /**
  1110. * Whether the current gtt mapping needs to be mappable (and isn't just
  1111. * mappable by accident). Track pin and fault separate for a more
  1112. * accurate mappable working set.
  1113. */
  1114. unsigned int fault_mappable:1;
  1115. unsigned int pin_mappable:1;
  1116. /*
  1117. * Is the GPU currently using a fence to access this buffer,
  1118. */
  1119. unsigned int pending_fenced_gpu_access:1;
  1120. unsigned int fenced_gpu_access:1;
  1121. unsigned int cache_level:2;
  1122. unsigned int has_aliasing_ppgtt_mapping:1;
  1123. unsigned int has_global_gtt_mapping:1;
  1124. unsigned int has_dma_mapping:1;
  1125. struct sg_table *pages;
  1126. int pages_pin_count;
  1127. /* prime dma-buf support */
  1128. void *dma_buf_vmapping;
  1129. int vmapping_count;
  1130. /**
  1131. * Used for performing relocations during execbuffer insertion.
  1132. */
  1133. struct hlist_node exec_node;
  1134. unsigned long exec_handle;
  1135. struct drm_i915_gem_exec_object2 *exec_entry;
  1136. /**
  1137. * Current offset of the object in GTT space.
  1138. *
  1139. * This is the same as gtt_space->start
  1140. */
  1141. uint32_t gtt_offset;
  1142. struct intel_ring_buffer *ring;
  1143. /** Breadcrumb of last rendering to the buffer. */
  1144. uint32_t last_read_seqno;
  1145. uint32_t last_write_seqno;
  1146. /** Breadcrumb of last fenced GPU access to the buffer. */
  1147. uint32_t last_fenced_seqno;
  1148. /** Current tiling stride for the object, if it's tiled. */
  1149. uint32_t stride;
  1150. /** Record of address bit 17 of each page at last unbind. */
  1151. unsigned long *bit_17;
  1152. /** User space pin count and filp owning the pin */
  1153. uint32_t user_pin_count;
  1154. struct drm_file *pin_filp;
  1155. /** for phy allocated objects */
  1156. struct drm_i915_gem_phys_object *phys_obj;
  1157. };
  1158. #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
  1159. #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  1160. /**
  1161. * Request queue structure.
  1162. *
  1163. * The request queue allows us to note sequence numbers that have been emitted
  1164. * and may be associated with active buffers to be retired.
  1165. *
  1166. * By keeping this list, we can avoid having to do questionable
  1167. * sequence-number comparisons on buffer last_rendering_seqnos, and associate
  1168. * an emission time with seqnos for tracking how far ahead of the GPU we are.
  1169. */
  1170. struct drm_i915_gem_request {
  1171. /** On Which ring this request was generated */
  1172. struct intel_ring_buffer *ring;
  1173. /** GEM sequence number associated with this request. */
  1174. uint32_t seqno;
  1175. /** Position in the ringbuffer of the start of the request */
  1176. u32 head;
  1177. /** Position in the ringbuffer of the end of the request */
  1178. u32 tail;
  1179. /** Context related to this request */
  1180. struct i915_hw_context *ctx;
  1181. /** Batch buffer related to this request if any */
  1182. struct drm_i915_gem_object *batch_obj;
  1183. /** Time at which this request was emitted, in jiffies. */
  1184. unsigned long emitted_jiffies;
  1185. /** global list entry for this request */
  1186. struct list_head list;
  1187. struct drm_i915_file_private *file_priv;
  1188. /** file_priv list entry for this request */
  1189. struct list_head client_list;
  1190. };
  1191. struct drm_i915_file_private {
  1192. struct {
  1193. spinlock_t lock;
  1194. struct list_head request_list;
  1195. } mm;
  1196. struct idr context_idr;
  1197. struct i915_ctx_hang_stats hang_stats;
  1198. };
  1199. #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
  1200. #define IS_I830(dev) ((dev)->pci_device == 0x3577)
  1201. #define IS_845G(dev) ((dev)->pci_device == 0x2562)
  1202. #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
  1203. #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
  1204. #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
  1205. #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
  1206. #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
  1207. #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
  1208. #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
  1209. #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
  1210. #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
  1211. #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
  1212. #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
  1213. #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
  1214. #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
  1215. #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
  1216. #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
  1217. #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
  1218. #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
  1219. #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
  1220. (dev)->pci_device == 0x0152 || \
  1221. (dev)->pci_device == 0x015a)
  1222. #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
  1223. (dev)->pci_device == 0x0106 || \
  1224. (dev)->pci_device == 0x010A)
  1225. #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
  1226. #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
  1227. #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
  1228. #define IS_ULT(dev) (IS_HASWELL(dev) && \
  1229. ((dev)->pci_device & 0xFF00) == 0x0A00)
  1230. /*
  1231. * The genX designation typically refers to the render engine, so render
  1232. * capability related checks should use IS_GEN, while display and other checks
  1233. * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
  1234. * chips, etc.).
  1235. */
  1236. #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
  1237. #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
  1238. #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
  1239. #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
  1240. #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
  1241. #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
  1242. #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
  1243. #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
  1244. #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
  1245. #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
  1246. #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
  1247. #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
  1248. #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
  1249. #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
  1250. #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
  1251. /* Early gen2 have a totally busted CS tlb and require pinned batches. */
  1252. #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
  1253. /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  1254. * rows, which changed the alignment requirements and fence programming.
  1255. */
  1256. #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
  1257. IS_I915GM(dev)))
  1258. #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
  1259. #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
  1260. #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
  1261. #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
  1262. #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
  1263. #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
  1264. /* dsparb controlled by hw only */
  1265. #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
  1266. #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
  1267. #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
  1268. #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
  1269. #define HAS_IPS(dev) (IS_ULT(dev))
  1270. #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
  1271. #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
  1272. #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
  1273. #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
  1274. #define INTEL_PCH_DEVICE_ID_MASK 0xff00
  1275. #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
  1276. #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
  1277. #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
  1278. #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
  1279. #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
  1280. #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
  1281. #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
  1282. #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
  1283. #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
  1284. #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
  1285. #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
  1286. #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
  1287. #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  1288. #define GT_FREQUENCY_MULTIPLIER 50
  1289. #include "i915_trace.h"
  1290. /**
  1291. * RC6 is a special power stage which allows the GPU to enter an very
  1292. * low-voltage mode when idle, using down to 0V while at this stage. This
  1293. * stage is entered automatically when the GPU is idle when RC6 support is
  1294. * enabled, and as soon as new workload arises GPU wakes up automatically as well.
  1295. *
  1296. * There are different RC6 modes available in Intel GPU, which differentiate
  1297. * among each other with the latency required to enter and leave RC6 and
  1298. * voltage consumed by the GPU in different states.
  1299. *
  1300. * The combination of the following flags define which states GPU is allowed
  1301. * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
  1302. * RC6pp is deepest RC6. Their support by hardware varies according to the
  1303. * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
  1304. * which brings the most power savings; deeper states save more power, but
  1305. * require higher latency to switch to and wake up.
  1306. */
  1307. #define INTEL_RC6_ENABLE (1<<0)
  1308. #define INTEL_RC6p_ENABLE (1<<1)
  1309. #define INTEL_RC6pp_ENABLE (1<<2)
  1310. extern struct drm_ioctl_desc i915_ioctls[];
  1311. extern int i915_max_ioctl;
  1312. extern unsigned int i915_fbpercrtc __always_unused;
  1313. extern int i915_panel_ignore_lid __read_mostly;
  1314. extern unsigned int i915_powersave __read_mostly;
  1315. extern int i915_semaphores __read_mostly;
  1316. extern unsigned int i915_lvds_downclock __read_mostly;
  1317. extern int i915_lvds_channel_mode __read_mostly;
  1318. extern int i915_panel_use_ssc __read_mostly;
  1319. extern int i915_vbt_sdvo_panel_type __read_mostly;
  1320. extern int i915_enable_rc6 __read_mostly;
  1321. extern int i915_enable_fbc __read_mostly;
  1322. extern bool i915_enable_hangcheck __read_mostly;
  1323. extern int i915_enable_ppgtt __read_mostly;
  1324. extern unsigned int i915_preliminary_hw_support __read_mostly;
  1325. extern int i915_disable_power_well __read_mostly;
  1326. extern int i915_enable_ips __read_mostly;
  1327. extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  1328. extern int i915_resume(struct drm_device *dev);
  1329. extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
  1330. extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
  1331. /* i915_dma.c */
  1332. void i915_update_dri1_breadcrumb(struct drm_device *dev);
  1333. extern void i915_kernel_lost_context(struct drm_device * dev);
  1334. extern int i915_driver_load(struct drm_device *, unsigned long flags);
  1335. extern int i915_driver_unload(struct drm_device *);
  1336. extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
  1337. extern void i915_driver_lastclose(struct drm_device * dev);
  1338. extern void i915_driver_preclose(struct drm_device *dev,
  1339. struct drm_file *file_priv);
  1340. extern void i915_driver_postclose(struct drm_device *dev,
  1341. struct drm_file *file_priv);
  1342. extern int i915_driver_device_is_agp(struct drm_device * dev);
  1343. #ifdef CONFIG_COMPAT
  1344. extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
  1345. unsigned long arg);
  1346. #endif
  1347. extern int i915_emit_box(struct drm_device *dev,
  1348. struct drm_clip_rect *box,
  1349. int DR1, int DR4);
  1350. extern int intel_gpu_reset(struct drm_device *dev);
  1351. extern int i915_reset(struct drm_device *dev);
  1352. extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
  1353. extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
  1354. extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
  1355. extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
  1356. extern void intel_console_resume(struct work_struct *work);
  1357. /* i915_irq.c */
  1358. void i915_hangcheck_elapsed(unsigned long data);
  1359. void i915_handle_error(struct drm_device *dev, bool wedged);
  1360. extern void intel_irq_init(struct drm_device *dev);
  1361. extern void intel_hpd_init(struct drm_device *dev);
  1362. extern void intel_gt_init(struct drm_device *dev);
  1363. extern void intel_gt_sanitize(struct drm_device *dev);
  1364. void i915_error_state_free(struct kref *error_ref);
  1365. void
  1366. i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  1367. void
  1368. i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  1369. #ifdef CONFIG_DEBUG_FS
  1370. extern void i915_destroy_error_state(struct drm_device *dev);
  1371. #else
  1372. #define i915_destroy_error_state(x)
  1373. #endif
  1374. /* i915_gem.c */
  1375. int i915_gem_init_ioctl(struct drm_device *dev, void *data,
  1376. struct drm_file *file_priv);
  1377. int i915_gem_create_ioctl(struct drm_device *dev, void *data,
  1378. struct drm_file *file_priv);
  1379. int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  1380. struct drm_file *file_priv);
  1381. int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  1382. struct drm_file *file_priv);
  1383. int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1384. struct drm_file *file_priv);
  1385. int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1386. struct drm_file *file_priv);
  1387. int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1388. struct drm_file *file_priv);
  1389. int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1390. struct drm_file *file_priv);
  1391. int i915_gem_execbuffer(struct drm_device *dev, void *data,
  1392. struct drm_file *file_priv);
  1393. int i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1394. struct drm_file *file_priv);
  1395. int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  1396. struct drm_file *file_priv);
  1397. int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  1398. struct drm_file *file_priv);
  1399. int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  1400. struct drm_file *file_priv);
  1401. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  1402. struct drm_file *file);
  1403. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  1404. struct drm_file *file);
  1405. int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  1406. struct drm_file *file_priv);
  1407. int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  1408. struct drm_file *file_priv);
  1409. int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  1410. struct drm_file *file_priv);
  1411. int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  1412. struct drm_file *file_priv);
  1413. int i915_gem_set_tiling(struct drm_device *dev, void *data,
  1414. struct drm_file *file_priv);
  1415. int i915_gem_get_tiling(struct drm_device *dev, void *data,
  1416. struct drm_file *file_priv);
  1417. int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  1418. struct drm_file *file_priv);
  1419. int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
  1420. struct drm_file *file_priv);
  1421. void i915_gem_load(struct drm_device *dev);
  1422. void *i915_gem_object_alloc(struct drm_device *dev);
  1423. void i915_gem_object_free(struct drm_i915_gem_object *obj);
  1424. int i915_gem_init_object(struct drm_gem_object *obj);
  1425. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  1426. const struct drm_i915_gem_object_ops *ops);
  1427. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  1428. size_t size);
  1429. void i915_gem_free_object(struct drm_gem_object *obj);
  1430. int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
  1431. uint32_t alignment,
  1432. bool map_and_fenceable,
  1433. bool nonblocking);
  1434. void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
  1435. int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
  1436. int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  1437. void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
  1438. void i915_gem_lastclose(struct drm_device *dev);
  1439. int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
  1440. static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
  1441. {
  1442. struct sg_page_iter sg_iter;
  1443. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
  1444. return sg_page_iter_page(&sg_iter);
  1445. return NULL;
  1446. }
  1447. static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
  1448. {
  1449. BUG_ON(obj->pages == NULL);
  1450. obj->pages_pin_count++;
  1451. }
  1452. static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
  1453. {
  1454. BUG_ON(obj->pages_pin_count == 0);
  1455. obj->pages_pin_count--;
  1456. }
  1457. int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
  1458. int i915_gem_object_sync(struct drm_i915_gem_object *obj,
  1459. struct intel_ring_buffer *to);
  1460. void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
  1461. struct intel_ring_buffer *ring);
  1462. int i915_gem_dumb_create(struct drm_file *file_priv,
  1463. struct drm_device *dev,
  1464. struct drm_mode_create_dumb *args);
  1465. int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
  1466. uint32_t handle, uint64_t *offset);
  1467. int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
  1468. uint32_t handle);
  1469. /**
  1470. * Returns true if seq1 is later than seq2.
  1471. */
  1472. static inline bool
  1473. i915_seqno_passed(uint32_t seq1, uint32_t seq2)
  1474. {
  1475. return (int32_t)(seq1 - seq2) >= 0;
  1476. }
  1477. int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
  1478. int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
  1479. int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
  1480. int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
  1481. static inline bool
  1482. i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
  1483. {
  1484. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1485. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1486. dev_priv->fence_regs[obj->fence_reg].pin_count++;
  1487. return true;
  1488. } else
  1489. return false;
  1490. }
  1491. static inline void
  1492. i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
  1493. {
  1494. if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1495. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1496. WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
  1497. dev_priv->fence_regs[obj->fence_reg].pin_count--;
  1498. }
  1499. }
  1500. void i915_gem_retire_requests(struct drm_device *dev);
  1501. void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
  1502. int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
  1503. bool interruptible);
  1504. static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
  1505. {
  1506. return unlikely(atomic_read(&error->reset_counter)
  1507. & I915_RESET_IN_PROGRESS_FLAG);
  1508. }
  1509. static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
  1510. {
  1511. return atomic_read(&error->reset_counter) == I915_WEDGED;
  1512. }
  1513. void i915_gem_reset(struct drm_device *dev);
  1514. void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
  1515. int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
  1516. uint32_t read_domains,
  1517. uint32_t write_domain);
  1518. int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
  1519. int __must_check i915_gem_init(struct drm_device *dev);
  1520. int __must_check i915_gem_init_hw(struct drm_device *dev);
  1521. void i915_gem_l3_remap(struct drm_device *dev);
  1522. void i915_gem_init_swizzling(struct drm_device *dev);
  1523. void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  1524. int __must_check i915_gpu_idle(struct drm_device *dev);
  1525. int __must_check i915_gem_idle(struct drm_device *dev);
  1526. int __i915_add_request(struct intel_ring_buffer *ring,
  1527. struct drm_file *file,
  1528. struct drm_i915_gem_object *batch_obj,
  1529. u32 *seqno);
  1530. #define i915_add_request(ring, seqno) \
  1531. __i915_add_request(ring, NULL, NULL, seqno)
  1532. int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
  1533. uint32_t seqno);
  1534. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
  1535. int __must_check
  1536. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
  1537. bool write);
  1538. int __must_check
  1539. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
  1540. int __must_check
  1541. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  1542. u32 alignment,
  1543. struct intel_ring_buffer *pipelined);
  1544. int i915_gem_attach_phys_object(struct drm_device *dev,
  1545. struct drm_i915_gem_object *obj,
  1546. int id,
  1547. int align);
  1548. void i915_gem_detach_phys_object(struct drm_device *dev,
  1549. struct drm_i915_gem_object *obj);
  1550. void i915_gem_free_all_phys_object(struct drm_device *dev);
  1551. void i915_gem_release(struct drm_device *dev, struct drm_file *file);
  1552. uint32_t
  1553. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
  1554. uint32_t
  1555. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1556. int tiling_mode, bool fenced);
  1557. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  1558. enum i915_cache_level cache_level);
  1559. struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
  1560. struct dma_buf *dma_buf);
  1561. struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
  1562. struct drm_gem_object *gem_obj, int flags);
  1563. void i915_gem_restore_fences(struct drm_device *dev);
  1564. /* i915_gem_context.c */
  1565. void i915_gem_context_init(struct drm_device *dev);
  1566. void i915_gem_context_fini(struct drm_device *dev);
  1567. void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  1568. int i915_switch_context(struct intel_ring_buffer *ring,
  1569. struct drm_file *file, int to_id);
  1570. void i915_gem_context_free(struct kref *ctx_ref);
  1571. static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
  1572. {
  1573. kref_get(&ctx->ref);
  1574. }
  1575. static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
  1576. {
  1577. kref_put(&ctx->ref, i915_gem_context_free);
  1578. }
  1579. struct i915_ctx_hang_stats * __must_check
  1580. i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
  1581. struct drm_file *file,
  1582. u32 id);
  1583. int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
  1584. struct drm_file *file);
  1585. int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
  1586. struct drm_file *file);
  1587. /* i915_gem_gtt.c */
  1588. void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
  1589. void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
  1590. struct drm_i915_gem_object *obj,
  1591. enum i915_cache_level cache_level);
  1592. void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
  1593. struct drm_i915_gem_object *obj);
  1594. void i915_gem_restore_gtt_mappings(struct drm_device *dev);
  1595. int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
  1596. void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
  1597. enum i915_cache_level cache_level);
  1598. void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
  1599. void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
  1600. void i915_gem_init_global_gtt(struct drm_device *dev);
  1601. void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
  1602. unsigned long mappable_end, unsigned long end);
  1603. int i915_gem_gtt_init(struct drm_device *dev);
  1604. static inline void i915_gem_chipset_flush(struct drm_device *dev)
  1605. {
  1606. if (INTEL_INFO(dev)->gen < 6)
  1607. intel_gtt_chipset_flush();
  1608. }
  1609. /* i915_gem_evict.c */
  1610. int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
  1611. unsigned alignment,
  1612. unsigned cache_level,
  1613. bool mappable,
  1614. bool nonblock);
  1615. int i915_gem_evict_everything(struct drm_device *dev);
  1616. /* i915_gem_stolen.c */
  1617. int i915_gem_init_stolen(struct drm_device *dev);
  1618. int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
  1619. void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
  1620. void i915_gem_cleanup_stolen(struct drm_device *dev);
  1621. struct drm_i915_gem_object *
  1622. i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
  1623. struct drm_i915_gem_object *
  1624. i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
  1625. u32 stolen_offset,
  1626. u32 gtt_offset,
  1627. u32 size);
  1628. void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
  1629. /* i915_gem_tiling.c */
  1630. inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
  1631. {
  1632. drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  1633. return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
  1634. obj->tiling_mode != I915_TILING_NONE;
  1635. }
  1636. void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
  1637. void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
  1638. void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
  1639. /* i915_gem_debug.c */
  1640. void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
  1641. const char *where, uint32_t mark);
  1642. #if WATCH_LISTS
  1643. int i915_verify_lists(struct drm_device *dev);
  1644. #else
  1645. #define i915_verify_lists(dev) 0
  1646. #endif
  1647. void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
  1648. int handle);
  1649. void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
  1650. const char *where, uint32_t mark);
  1651. /* i915_debugfs.c */
  1652. int i915_debugfs_init(struct drm_minor *minor);
  1653. void i915_debugfs_cleanup(struct drm_minor *minor);
  1654. __printf(2, 3)
  1655. void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
  1656. /* i915_suspend.c */
  1657. extern int i915_save_state(struct drm_device *dev);
  1658. extern int i915_restore_state(struct drm_device *dev);
  1659. /* i915_ums.c */
  1660. void i915_save_display_reg(struct drm_device *dev);
  1661. void i915_restore_display_reg(struct drm_device *dev);
  1662. /* i915_sysfs.c */
  1663. void i915_setup_sysfs(struct drm_device *dev_priv);
  1664. void i915_teardown_sysfs(struct drm_device *dev_priv);
  1665. /* intel_i2c.c */
  1666. extern int intel_setup_gmbus(struct drm_device *dev);
  1667. extern void intel_teardown_gmbus(struct drm_device *dev);
  1668. static inline bool intel_gmbus_is_port_valid(unsigned port)
  1669. {
  1670. return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
  1671. }
  1672. extern struct i2c_adapter *intel_gmbus_get_adapter(
  1673. struct drm_i915_private *dev_priv, unsigned port);
  1674. extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
  1675. extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
  1676. static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
  1677. {
  1678. return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
  1679. }
  1680. extern void intel_i2c_reset(struct drm_device *dev);
  1681. /* intel_opregion.c */
  1682. extern int intel_opregion_setup(struct drm_device *dev);
  1683. #ifdef CONFIG_ACPI
  1684. extern void intel_opregion_init(struct drm_device *dev);
  1685. extern void intel_opregion_fini(struct drm_device *dev);
  1686. extern void intel_opregion_asle_intr(struct drm_device *dev);
  1687. #else
  1688. static inline void intel_opregion_init(struct drm_device *dev) { return; }
  1689. static inline void intel_opregion_fini(struct drm_device *dev) { return; }
  1690. static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
  1691. #endif
  1692. /* intel_acpi.c */
  1693. #ifdef CONFIG_ACPI
  1694. extern void intel_register_dsm_handler(void);
  1695. extern void intel_unregister_dsm_handler(void);
  1696. #else
  1697. static inline void intel_register_dsm_handler(void) { return; }
  1698. static inline void intel_unregister_dsm_handler(void) { return; }
  1699. #endif /* CONFIG_ACPI */
  1700. /* modesetting */
  1701. extern void intel_modeset_init_hw(struct drm_device *dev);
  1702. extern void intel_modeset_suspend_hw(struct drm_device *dev);
  1703. extern void intel_modeset_init(struct drm_device *dev);
  1704. extern void intel_modeset_gem_init(struct drm_device *dev);
  1705. extern void intel_modeset_cleanup(struct drm_device *dev);
  1706. extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
  1707. extern void intel_modeset_setup_hw_state(struct drm_device *dev,
  1708. bool force_restore);
  1709. extern void i915_redisable_vga(struct drm_device *dev);
  1710. extern bool intel_fbc_enabled(struct drm_device *dev);
  1711. extern void intel_disable_fbc(struct drm_device *dev);
  1712. extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
  1713. extern void intel_init_pch_refclk(struct drm_device *dev);
  1714. extern void gen6_set_rps(struct drm_device *dev, u8 val);
  1715. extern void valleyview_set_rps(struct drm_device *dev, u8 val);
  1716. extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
  1717. extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
  1718. extern void intel_detect_pch(struct drm_device *dev);
  1719. extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
  1720. extern int intel_enable_rc6(const struct drm_device *dev);
  1721. extern bool i915_semaphore_is_enabled(struct drm_device *dev);
  1722. int i915_reg_read_ioctl(struct drm_device *dev, void *data,
  1723. struct drm_file *file);
  1724. /* overlay */
  1725. #ifdef CONFIG_DEBUG_FS
  1726. extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
  1727. extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
  1728. struct intel_overlay_error_state *error);
  1729. extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
  1730. extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  1731. struct drm_device *dev,
  1732. struct intel_display_error_state *error);
  1733. #endif
  1734. /* On SNB platform, before reading ring registers forcewake bit
  1735. * must be set to prevent GT core from power down and stale values being
  1736. * returned.
  1737. */
  1738. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
  1739. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
  1740. int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
  1741. int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
  1742. int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
  1743. /* intel_sideband.c */
  1744. u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
  1745. void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
  1746. u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
  1747. u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
  1748. void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
  1749. u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
  1750. enum intel_sbi_destination destination);
  1751. void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
  1752. enum intel_sbi_destination destination);
  1753. int vlv_gpu_freq(int ddr_freq, int val);
  1754. int vlv_freq_opcode(int ddr_freq, int val);
  1755. #define __i915_read(x, y) \
  1756. u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
  1757. __i915_read(8, b)
  1758. __i915_read(16, w)
  1759. __i915_read(32, l)
  1760. __i915_read(64, q)
  1761. #undef __i915_read
  1762. #define __i915_write(x, y) \
  1763. void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
  1764. __i915_write(8, b)
  1765. __i915_write(16, w)
  1766. __i915_write(32, l)
  1767. __i915_write(64, q)
  1768. #undef __i915_write
  1769. #define I915_READ8(reg) i915_read8(dev_priv, (reg))
  1770. #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
  1771. #define I915_READ16(reg) i915_read16(dev_priv, (reg))
  1772. #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
  1773. #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
  1774. #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
  1775. #define I915_READ(reg) i915_read32(dev_priv, (reg))
  1776. #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
  1777. #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
  1778. #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
  1779. #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
  1780. #define I915_READ64(reg) i915_read64(dev_priv, (reg))
  1781. #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
  1782. #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
  1783. /* "Broadcast RGB" property */
  1784. #define INTEL_BROADCAST_RGB_AUTO 0
  1785. #define INTEL_BROADCAST_RGB_FULL 1
  1786. #define INTEL_BROADCAST_RGB_LIMITED 2
  1787. static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
  1788. {
  1789. if (HAS_PCH_SPLIT(dev))
  1790. return CPU_VGACNTRL;
  1791. else if (IS_VALLEYVIEW(dev))
  1792. return VLV_VGACNTRL;
  1793. else
  1794. return VGACNTRL;
  1795. }
  1796. static inline void __user *to_user_ptr(u64 address)
  1797. {
  1798. return (void __user *)(uintptr_t)address;
  1799. }
  1800. static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
  1801. {
  1802. unsigned long j = msecs_to_jiffies(m);
  1803. return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  1804. }
  1805. static inline unsigned long
  1806. timespec_to_jiffies_timeout(const struct timespec *value)
  1807. {
  1808. unsigned long j = timespec_to_jiffies(value);
  1809. return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  1810. }
  1811. #endif