vmwgfx_drv.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #ifndef _VMWGFX_DRV_H_
  28. #define _VMWGFX_DRV_H_
  29. #include "vmwgfx_reg.h"
  30. #include <drm/drmP.h>
  31. #include <drm/vmwgfx_drm.h>
  32. #include <drm/drm_hashtab.h>
  33. #include <linux/suspend.h>
  34. #include <drm/ttm/ttm_bo_driver.h>
  35. #include <drm/ttm/ttm_object.h>
  36. #include <drm/ttm/ttm_lock.h>
  37. #include <drm/ttm/ttm_execbuf_util.h>
  38. #include <drm/ttm/ttm_module.h>
  39. #include "vmwgfx_fence.h"
  40. #define VMWGFX_DRIVER_DATE "20120209"
  41. #define VMWGFX_DRIVER_MAJOR 2
  42. #define VMWGFX_DRIVER_MINOR 4
  43. #define VMWGFX_DRIVER_PATCHLEVEL 0
  44. #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
  45. #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
  46. #define VMWGFX_MAX_RELOCATIONS 2048
  47. #define VMWGFX_MAX_VALIDATIONS 2048
  48. #define VMWGFX_MAX_DISPLAYS 16
  49. #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
  50. #define VMW_PL_GMR TTM_PL_PRIV0
  51. #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
  52. #define VMW_RES_CONTEXT ttm_driver_type0
  53. #define VMW_RES_SURFACE ttm_driver_type1
  54. #define VMW_RES_STREAM ttm_driver_type2
  55. #define VMW_RES_FENCE ttm_driver_type3
  56. struct vmw_fpriv {
  57. struct drm_master *locked_master;
  58. struct ttm_object_file *tfile;
  59. struct list_head fence_events;
  60. };
  61. struct vmw_dma_buffer {
  62. struct ttm_buffer_object base;
  63. struct list_head res_list;
  64. };
  65. /**
  66. * struct vmw_validate_buffer - Carries validation info about buffers.
  67. *
  68. * @base: Validation info for TTM.
  69. * @hash: Hash entry for quick lookup of the TTM buffer object.
  70. *
  71. * This structure contains also driver private validation info
  72. * on top of the info needed by TTM.
  73. */
  74. struct vmw_validate_buffer {
  75. struct ttm_validate_buffer base;
  76. struct drm_hash_item hash;
  77. };
  78. struct vmw_res_func;
  79. struct vmw_resource {
  80. struct kref kref;
  81. struct vmw_private *dev_priv;
  82. int id;
  83. bool avail;
  84. unsigned long backup_size;
  85. bool res_dirty; /* Protected by backup buffer reserved */
  86. bool backup_dirty; /* Protected by backup buffer reserved */
  87. struct vmw_dma_buffer *backup;
  88. unsigned long backup_offset;
  89. const struct vmw_res_func *func;
  90. struct list_head lru_head; /* Protected by the resource lock */
  91. struct list_head mob_head; /* Protected by @backup reserved */
  92. void (*res_free) (struct vmw_resource *res);
  93. void (*hw_destroy) (struct vmw_resource *res);
  94. };
  95. enum vmw_res_type {
  96. vmw_res_context,
  97. vmw_res_surface,
  98. vmw_res_stream,
  99. vmw_res_max
  100. };
  101. struct vmw_cursor_snooper {
  102. struct drm_crtc *crtc;
  103. size_t age;
  104. uint32_t *image;
  105. };
  106. struct vmw_framebuffer;
  107. struct vmw_surface_offset;
  108. struct vmw_surface {
  109. struct vmw_resource res;
  110. uint32_t flags;
  111. uint32_t format;
  112. uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
  113. struct drm_vmw_size base_size;
  114. struct drm_vmw_size *sizes;
  115. uint32_t num_sizes;
  116. bool scanout;
  117. /* TODO so far just a extra pointer */
  118. struct vmw_cursor_snooper snooper;
  119. struct vmw_surface_offset *offsets;
  120. SVGA3dTextureFilter autogen_filter;
  121. uint32_t multisample_count;
  122. };
  123. struct vmw_marker_queue {
  124. struct list_head head;
  125. struct timespec lag;
  126. struct timespec lag_time;
  127. spinlock_t lock;
  128. };
  129. struct vmw_fifo_state {
  130. unsigned long reserved_size;
  131. __le32 *dynamic_buffer;
  132. __le32 *static_buffer;
  133. unsigned long static_buffer_size;
  134. bool using_bounce_buffer;
  135. uint32_t capabilities;
  136. struct mutex fifo_mutex;
  137. struct rw_semaphore rwsem;
  138. struct vmw_marker_queue marker_queue;
  139. };
  140. struct vmw_relocation {
  141. SVGAGuestPtr *location;
  142. uint32_t index;
  143. };
  144. /**
  145. * struct vmw_res_cache_entry - resource information cache entry
  146. *
  147. * @valid: Whether the entry is valid, which also implies that the execbuf
  148. * code holds a reference to the resource, and it's placed on the
  149. * validation list.
  150. * @handle: User-space handle of a resource.
  151. * @res: Non-ref-counted pointer to the resource.
  152. *
  153. * Used to avoid frequent repeated user-space handle lookups of the
  154. * same resource.
  155. */
  156. struct vmw_res_cache_entry {
  157. bool valid;
  158. uint32_t handle;
  159. struct vmw_resource *res;
  160. struct vmw_resource_val_node *node;
  161. };
  162. /**
  163. * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
  164. */
  165. enum vmw_dma_map_mode {
  166. vmw_dma_phys, /* Use physical page addresses */
  167. vmw_dma_alloc_coherent, /* Use TTM coherent pages */
  168. vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
  169. vmw_dma_map_bind, /* Unmap from DMA just before unbind */
  170. vmw_dma_map_max
  171. };
  172. /**
  173. * struct vmw_sg_table - Scatter/gather table for binding, with additional
  174. * device-specific information.
  175. *
  176. * @sgt: Pointer to a struct sg_table with binding information
  177. * @num_regions: Number of regions with device-address contigous pages
  178. */
  179. struct vmw_sg_table {
  180. enum vmw_dma_map_mode mode;
  181. struct page **pages;
  182. const dma_addr_t *addrs;
  183. struct sg_table *sgt;
  184. unsigned long num_regions;
  185. unsigned long num_pages;
  186. };
  187. /**
  188. * struct vmw_piter - Page iterator that iterates over a list of pages
  189. * and DMA addresses that could be either a scatter-gather list or
  190. * arrays
  191. *
  192. * @pages: Array of page pointers to the pages.
  193. * @addrs: DMA addresses to the pages if coherent pages are used.
  194. * @iter: Scatter-gather page iterator. Current position in SG list.
  195. * @i: Current position in arrays.
  196. * @num_pages: Number of pages total.
  197. * @next: Function to advance the iterator. Returns false if past the list
  198. * of pages, true otherwise.
  199. * @dma_address: Function to return the DMA address of the current page.
  200. */
  201. struct vmw_piter {
  202. struct page **pages;
  203. const dma_addr_t *addrs;
  204. struct sg_page_iter iter;
  205. unsigned long i;
  206. unsigned long num_pages;
  207. bool (*next)(struct vmw_piter *);
  208. dma_addr_t (*dma_address)(struct vmw_piter *);
  209. struct page *(*page)(struct vmw_piter *);
  210. };
  211. struct vmw_sw_context{
  212. struct drm_open_hash res_ht;
  213. bool res_ht_initialized;
  214. bool kernel; /**< is the called made from the kernel */
  215. struct ttm_object_file *tfile;
  216. struct list_head validate_nodes;
  217. struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
  218. uint32_t cur_reloc;
  219. struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
  220. uint32_t cur_val_buf;
  221. uint32_t *cmd_bounce;
  222. uint32_t cmd_bounce_size;
  223. struct list_head resource_list;
  224. uint32_t fence_flags;
  225. struct ttm_buffer_object *cur_query_bo;
  226. struct list_head res_relocations;
  227. uint32_t *buf_start;
  228. struct vmw_res_cache_entry res_cache[vmw_res_max];
  229. struct vmw_resource *last_query_ctx;
  230. bool needs_post_query_barrier;
  231. struct vmw_resource *error_resource;
  232. };
  233. struct vmw_legacy_display;
  234. struct vmw_overlay;
  235. struct vmw_master {
  236. struct ttm_lock lock;
  237. struct mutex fb_surf_mutex;
  238. struct list_head fb_surf;
  239. };
  240. struct vmw_vga_topology_state {
  241. uint32_t width;
  242. uint32_t height;
  243. uint32_t primary;
  244. uint32_t pos_x;
  245. uint32_t pos_y;
  246. };
  247. struct vmw_private {
  248. struct ttm_bo_device bdev;
  249. struct ttm_bo_global_ref bo_global_ref;
  250. struct drm_global_reference mem_global_ref;
  251. struct vmw_fifo_state fifo;
  252. struct drm_device *dev;
  253. unsigned long vmw_chipset;
  254. unsigned int io_start;
  255. uint32_t vram_start;
  256. uint32_t vram_size;
  257. uint32_t mmio_start;
  258. uint32_t mmio_size;
  259. uint32_t fb_max_width;
  260. uint32_t fb_max_height;
  261. uint32_t initial_width;
  262. uint32_t initial_height;
  263. __le32 __iomem *mmio_virt;
  264. int mmio_mtrr;
  265. uint32_t capabilities;
  266. uint32_t max_gmr_descriptors;
  267. uint32_t max_gmr_ids;
  268. uint32_t max_gmr_pages;
  269. uint32_t memory_size;
  270. bool has_gmr;
  271. struct mutex hw_mutex;
  272. /*
  273. * VGA registers.
  274. */
  275. struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
  276. uint32_t vga_width;
  277. uint32_t vga_height;
  278. uint32_t vga_bpp;
  279. uint32_t vga_bpl;
  280. uint32_t vga_pitchlock;
  281. uint32_t num_displays;
  282. /*
  283. * Framebuffer info.
  284. */
  285. void *fb_info;
  286. struct vmw_legacy_display *ldu_priv;
  287. struct vmw_screen_object_display *sou_priv;
  288. struct vmw_overlay *overlay_priv;
  289. /*
  290. * Context and surface management.
  291. */
  292. rwlock_t resource_lock;
  293. struct idr res_idr[vmw_res_max];
  294. /*
  295. * Block lastclose from racing with firstopen.
  296. */
  297. struct mutex init_mutex;
  298. /*
  299. * A resource manager for kernel-only surfaces and
  300. * contexts.
  301. */
  302. struct ttm_object_device *tdev;
  303. /*
  304. * Fencing and IRQs.
  305. */
  306. atomic_t marker_seq;
  307. wait_queue_head_t fence_queue;
  308. wait_queue_head_t fifo_queue;
  309. int fence_queue_waiters; /* Protected by hw_mutex */
  310. int goal_queue_waiters; /* Protected by hw_mutex */
  311. atomic_t fifo_queue_waiters;
  312. uint32_t last_read_seqno;
  313. spinlock_t irq_lock;
  314. struct vmw_fence_manager *fman;
  315. uint32_t irq_mask;
  316. /*
  317. * Device state
  318. */
  319. uint32_t traces_state;
  320. uint32_t enable_state;
  321. uint32_t config_done_state;
  322. /**
  323. * Execbuf
  324. */
  325. /**
  326. * Protected by the cmdbuf mutex.
  327. */
  328. struct vmw_sw_context ctx;
  329. struct mutex cmdbuf_mutex;
  330. /**
  331. * Operating mode.
  332. */
  333. bool stealth;
  334. bool enable_fb;
  335. /**
  336. * Master management.
  337. */
  338. struct vmw_master *active_master;
  339. struct vmw_master fbdev_master;
  340. struct notifier_block pm_nb;
  341. bool suspended;
  342. struct mutex release_mutex;
  343. uint32_t num_3d_resources;
  344. /*
  345. * Query processing. These members
  346. * are protected by the cmdbuf mutex.
  347. */
  348. struct ttm_buffer_object *dummy_query_bo;
  349. struct ttm_buffer_object *pinned_bo;
  350. uint32_t query_cid;
  351. uint32_t query_cid_valid;
  352. bool dummy_query_bo_pinned;
  353. /*
  354. * Surface swapping. The "surface_lru" list is protected by the
  355. * resource lock in order to be able to destroy a surface and take
  356. * it off the lru atomically. "used_memory_size" is currently
  357. * protected by the cmdbuf mutex for simplicity.
  358. */
  359. struct list_head res_lru[vmw_res_max];
  360. uint32_t used_memory_size;
  361. /*
  362. * DMA mapping stuff.
  363. */
  364. enum vmw_dma_map_mode map_mode;
  365. };
  366. static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
  367. {
  368. return container_of(res, struct vmw_surface, res);
  369. }
  370. static inline struct vmw_private *vmw_priv(struct drm_device *dev)
  371. {
  372. return (struct vmw_private *)dev->dev_private;
  373. }
  374. static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
  375. {
  376. return (struct vmw_fpriv *)file_priv->driver_priv;
  377. }
  378. static inline struct vmw_master *vmw_master(struct drm_master *master)
  379. {
  380. return (struct vmw_master *) master->driver_priv;
  381. }
  382. static inline void vmw_write(struct vmw_private *dev_priv,
  383. unsigned int offset, uint32_t value)
  384. {
  385. outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
  386. outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
  387. }
  388. static inline uint32_t vmw_read(struct vmw_private *dev_priv,
  389. unsigned int offset)
  390. {
  391. uint32_t val;
  392. outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
  393. val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
  394. return val;
  395. }
  396. int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
  397. void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
  398. /**
  399. * GMR utilities - vmwgfx_gmr.c
  400. */
  401. extern int vmw_gmr_bind(struct vmw_private *dev_priv,
  402. const struct vmw_sg_table *vsgt,
  403. unsigned long num_pages,
  404. int gmr_id);
  405. extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
  406. /**
  407. * Resource utilities - vmwgfx_resource.c
  408. */
  409. struct vmw_user_resource_conv;
  410. extern const struct vmw_user_resource_conv *user_surface_converter;
  411. extern const struct vmw_user_resource_conv *user_context_converter;
  412. extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
  413. extern void vmw_resource_unreference(struct vmw_resource **p_res);
  414. extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
  415. extern int vmw_resource_validate(struct vmw_resource *res);
  416. extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
  417. extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
  418. extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  419. struct drm_file *file_priv);
  420. extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  421. struct drm_file *file_priv);
  422. extern int vmw_context_check(struct vmw_private *dev_priv,
  423. struct ttm_object_file *tfile,
  424. int id,
  425. struct vmw_resource **p_res);
  426. extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
  427. struct ttm_object_file *tfile,
  428. uint32_t handle,
  429. struct vmw_surface **out_surf,
  430. struct vmw_dma_buffer **out_buf);
  431. extern int vmw_user_resource_lookup_handle(
  432. struct vmw_private *dev_priv,
  433. struct ttm_object_file *tfile,
  434. uint32_t handle,
  435. const struct vmw_user_resource_conv *converter,
  436. struct vmw_resource **p_res);
  437. extern void vmw_surface_res_free(struct vmw_resource *res);
  438. extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  439. struct drm_file *file_priv);
  440. extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  441. struct drm_file *file_priv);
  442. extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  443. struct drm_file *file_priv);
  444. extern int vmw_surface_check(struct vmw_private *dev_priv,
  445. struct ttm_object_file *tfile,
  446. uint32_t handle, int *id);
  447. extern int vmw_surface_validate(struct vmw_private *dev_priv,
  448. struct vmw_surface *srf);
  449. extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
  450. extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
  451. struct vmw_dma_buffer *vmw_bo,
  452. size_t size, struct ttm_placement *placement,
  453. bool interuptable,
  454. void (*bo_free) (struct ttm_buffer_object *bo));
  455. extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
  456. struct ttm_object_file *tfile);
  457. extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  458. struct drm_file *file_priv);
  459. extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  460. struct drm_file *file_priv);
  461. extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  462. uint32_t cur_validate_node);
  463. extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
  464. extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  465. uint32_t id, struct vmw_dma_buffer **out);
  466. extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  467. struct drm_file *file_priv);
  468. extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  469. struct drm_file *file_priv);
  470. extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  471. struct ttm_object_file *tfile,
  472. uint32_t *inout_id,
  473. struct vmw_resource **out);
  474. extern void vmw_resource_unreserve(struct vmw_resource *res,
  475. struct vmw_dma_buffer *new_backup,
  476. unsigned long new_backup_offset);
  477. extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
  478. struct ttm_mem_reg *mem);
  479. extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
  480. struct vmw_fence_obj *fence);
  481. extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
  482. /**
  483. * DMA buffer helper routines - vmwgfx_dmabuf.c
  484. */
  485. extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
  486. struct vmw_dma_buffer *bo,
  487. struct ttm_placement *placement,
  488. bool interruptible);
  489. extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
  490. struct vmw_dma_buffer *buf,
  491. bool pin, bool interruptible);
  492. extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
  493. struct vmw_dma_buffer *buf,
  494. bool pin, bool interruptible);
  495. extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
  496. struct vmw_dma_buffer *bo,
  497. bool pin, bool interruptible);
  498. extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
  499. struct vmw_dma_buffer *bo,
  500. bool interruptible);
  501. extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
  502. SVGAGuestPtr *ptr);
  503. extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
  504. /**
  505. * Misc Ioctl functionality - vmwgfx_ioctl.c
  506. */
  507. extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
  508. struct drm_file *file_priv);
  509. extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
  510. struct drm_file *file_priv);
  511. extern int vmw_present_ioctl(struct drm_device *dev, void *data,
  512. struct drm_file *file_priv);
  513. extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
  514. struct drm_file *file_priv);
  515. extern unsigned int vmw_fops_poll(struct file *filp,
  516. struct poll_table_struct *wait);
  517. extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
  518. size_t count, loff_t *offset);
  519. /**
  520. * Fifo utilities - vmwgfx_fifo.c
  521. */
  522. extern int vmw_fifo_init(struct vmw_private *dev_priv,
  523. struct vmw_fifo_state *fifo);
  524. extern void vmw_fifo_release(struct vmw_private *dev_priv,
  525. struct vmw_fifo_state *fifo);
  526. extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
  527. extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
  528. extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
  529. uint32_t *seqno);
  530. extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
  531. extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
  532. extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
  533. extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
  534. uint32_t cid);
  535. /**
  536. * TTM glue - vmwgfx_ttm_glue.c
  537. */
  538. extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
  539. extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
  540. extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
  541. /**
  542. * TTM buffer object driver - vmwgfx_buffer.c
  543. */
  544. extern const size_t vmw_tt_size;
  545. extern struct ttm_placement vmw_vram_placement;
  546. extern struct ttm_placement vmw_vram_ne_placement;
  547. extern struct ttm_placement vmw_vram_sys_placement;
  548. extern struct ttm_placement vmw_vram_gmr_placement;
  549. extern struct ttm_placement vmw_vram_gmr_ne_placement;
  550. extern struct ttm_placement vmw_sys_placement;
  551. extern struct ttm_placement vmw_evictable_placement;
  552. extern struct ttm_placement vmw_srf_placement;
  553. extern struct ttm_bo_driver vmw_bo_driver;
  554. extern int vmw_dma_quiescent(struct drm_device *dev);
  555. extern void vmw_piter_start(struct vmw_piter *viter,
  556. const struct vmw_sg_table *vsgt,
  557. unsigned long p_offs);
  558. /**
  559. * vmw_piter_next - Advance the iterator one page.
  560. *
  561. * @viter: Pointer to the iterator to advance.
  562. *
  563. * Returns false if past the list of pages, true otherwise.
  564. */
  565. static inline bool vmw_piter_next(struct vmw_piter *viter)
  566. {
  567. return viter->next(viter);
  568. }
  569. /**
  570. * vmw_piter_dma_addr - Return the DMA address of the current page.
  571. *
  572. * @viter: Pointer to the iterator
  573. *
  574. * Returns the DMA address of the page pointed to by @viter.
  575. */
  576. static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
  577. {
  578. return viter->dma_address(viter);
  579. }
  580. /**
  581. * vmw_piter_page - Return a pointer to the current page.
  582. *
  583. * @viter: Pointer to the iterator
  584. *
  585. * Returns the DMA address of the page pointed to by @viter.
  586. */
  587. static inline struct page *vmw_piter_page(struct vmw_piter *viter)
  588. {
  589. return viter->page(viter);
  590. }
  591. /**
  592. * Command submission - vmwgfx_execbuf.c
  593. */
  594. extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
  595. struct drm_file *file_priv);
  596. extern int vmw_execbuf_process(struct drm_file *file_priv,
  597. struct vmw_private *dev_priv,
  598. void __user *user_commands,
  599. void *kernel_commands,
  600. uint32_t command_size,
  601. uint64_t throttle_us,
  602. struct drm_vmw_fence_rep __user
  603. *user_fence_rep,
  604. struct vmw_fence_obj **out_fence);
  605. extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
  606. struct vmw_fence_obj *fence);
  607. extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
  608. extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
  609. struct vmw_private *dev_priv,
  610. struct vmw_fence_obj **p_fence,
  611. uint32_t *p_handle);
  612. extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
  613. struct vmw_fpriv *vmw_fp,
  614. int ret,
  615. struct drm_vmw_fence_rep __user
  616. *user_fence_rep,
  617. struct vmw_fence_obj *fence,
  618. uint32_t fence_handle);
  619. /**
  620. * IRQs and wating - vmwgfx_irq.c
  621. */
  622. extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
  623. extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
  624. uint32_t seqno, bool interruptible,
  625. unsigned long timeout);
  626. extern void vmw_irq_preinstall(struct drm_device *dev);
  627. extern int vmw_irq_postinstall(struct drm_device *dev);
  628. extern void vmw_irq_uninstall(struct drm_device *dev);
  629. extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
  630. uint32_t seqno);
  631. extern int vmw_fallback_wait(struct vmw_private *dev_priv,
  632. bool lazy,
  633. bool fifo_idle,
  634. uint32_t seqno,
  635. bool interruptible,
  636. unsigned long timeout);
  637. extern void vmw_update_seqno(struct vmw_private *dev_priv,
  638. struct vmw_fifo_state *fifo_state);
  639. extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
  640. extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
  641. extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
  642. extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
  643. /**
  644. * Rudimentary fence-like objects currently used only for throttling -
  645. * vmwgfx_marker.c
  646. */
  647. extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
  648. extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
  649. extern int vmw_marker_push(struct vmw_marker_queue *queue,
  650. uint32_t seqno);
  651. extern int vmw_marker_pull(struct vmw_marker_queue *queue,
  652. uint32_t signaled_seqno);
  653. extern int vmw_wait_lag(struct vmw_private *dev_priv,
  654. struct vmw_marker_queue *queue, uint32_t us);
  655. /**
  656. * Kernel framebuffer - vmwgfx_fb.c
  657. */
  658. int vmw_fb_init(struct vmw_private *vmw_priv);
  659. int vmw_fb_close(struct vmw_private *dev_priv);
  660. int vmw_fb_off(struct vmw_private *vmw_priv);
  661. int vmw_fb_on(struct vmw_private *vmw_priv);
  662. /**
  663. * Kernel modesetting - vmwgfx_kms.c
  664. */
  665. int vmw_kms_init(struct vmw_private *dev_priv);
  666. int vmw_kms_close(struct vmw_private *dev_priv);
  667. int vmw_kms_save_vga(struct vmw_private *vmw_priv);
  668. int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
  669. int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
  670. struct drm_file *file_priv);
  671. void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
  672. void vmw_kms_cursor_snoop(struct vmw_surface *srf,
  673. struct ttm_object_file *tfile,
  674. struct ttm_buffer_object *bo,
  675. SVGA3dCmdHeader *header);
  676. int vmw_kms_write_svga(struct vmw_private *vmw_priv,
  677. unsigned width, unsigned height, unsigned pitch,
  678. unsigned bpp, unsigned depth);
  679. void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
  680. bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
  681. uint32_t pitch,
  682. uint32_t height);
  683. u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
  684. int vmw_enable_vblank(struct drm_device *dev, int crtc);
  685. void vmw_disable_vblank(struct drm_device *dev, int crtc);
  686. int vmw_kms_present(struct vmw_private *dev_priv,
  687. struct drm_file *file_priv,
  688. struct vmw_framebuffer *vfb,
  689. struct vmw_surface *surface,
  690. uint32_t sid, int32_t destX, int32_t destY,
  691. struct drm_vmw_rect *clips,
  692. uint32_t num_clips);
  693. int vmw_kms_readback(struct vmw_private *dev_priv,
  694. struct drm_file *file_priv,
  695. struct vmw_framebuffer *vfb,
  696. struct drm_vmw_fence_rep __user *user_fence_rep,
  697. struct drm_vmw_rect *clips,
  698. uint32_t num_clips);
  699. int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
  700. struct drm_file *file_priv);
  701. int vmw_dumb_create(struct drm_file *file_priv,
  702. struct drm_device *dev,
  703. struct drm_mode_create_dumb *args);
  704. int vmw_dumb_map_offset(struct drm_file *file_priv,
  705. struct drm_device *dev, uint32_t handle,
  706. uint64_t *offset);
  707. int vmw_dumb_destroy(struct drm_file *file_priv,
  708. struct drm_device *dev,
  709. uint32_t handle);
  710. /**
  711. * Overlay control - vmwgfx_overlay.c
  712. */
  713. int vmw_overlay_init(struct vmw_private *dev_priv);
  714. int vmw_overlay_close(struct vmw_private *dev_priv);
  715. int vmw_overlay_ioctl(struct drm_device *dev, void *data,
  716. struct drm_file *file_priv);
  717. int vmw_overlay_stop_all(struct vmw_private *dev_priv);
  718. int vmw_overlay_resume_all(struct vmw_private *dev_priv);
  719. int vmw_overlay_pause_all(struct vmw_private *dev_priv);
  720. int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
  721. int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
  722. int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
  723. int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
  724. /**
  725. * GMR Id manager
  726. */
  727. extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
  728. /**
  729. * Prime - vmwgfx_prime.c
  730. */
  731. extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
  732. extern int vmw_prime_fd_to_handle(struct drm_device *dev,
  733. struct drm_file *file_priv,
  734. int fd, u32 *handle);
  735. extern int vmw_prime_handle_to_fd(struct drm_device *dev,
  736. struct drm_file *file_priv,
  737. uint32_t handle, uint32_t flags,
  738. int *prime_fd);
  739. /**
  740. * Inline helper functions
  741. */
  742. static inline void vmw_surface_unreference(struct vmw_surface **srf)
  743. {
  744. struct vmw_surface *tmp_srf = *srf;
  745. struct vmw_resource *res = &tmp_srf->res;
  746. *srf = NULL;
  747. vmw_resource_unreference(&res);
  748. }
  749. static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
  750. {
  751. (void) vmw_resource_reference(&srf->res);
  752. return srf;
  753. }
  754. static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
  755. {
  756. struct vmw_dma_buffer *tmp_buf = *buf;
  757. *buf = NULL;
  758. if (tmp_buf != NULL) {
  759. struct ttm_buffer_object *bo = &tmp_buf->base;
  760. ttm_bo_unref(&bo);
  761. }
  762. }
  763. static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
  764. {
  765. if (ttm_bo_reference(&buf->base))
  766. return buf;
  767. return NULL;
  768. }
  769. static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
  770. {
  771. return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
  772. }
  773. #endif