vmwgfx_fb.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /**************************************************************************
  2. *
  3. * Copyright © 2007 David Airlie
  4. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include "drmP.h"
  29. #include "vmwgfx_drv.h"
  30. #include "ttm/ttm_placement.h"
  31. #define VMW_DIRTY_DELAY (HZ / 30)
  32. struct vmw_fb_par {
  33. struct vmw_private *vmw_priv;
  34. void *vmalloc;
  35. struct vmw_dma_buffer *vmw_bo;
  36. struct ttm_bo_kmap_obj map;
  37. u32 pseudo_palette[17];
  38. unsigned depth;
  39. unsigned bpp;
  40. unsigned max_width;
  41. unsigned max_height;
  42. void *bo_ptr;
  43. unsigned bo_size;
  44. bool bo_iowrite;
  45. struct {
  46. spinlock_t lock;
  47. bool active;
  48. unsigned x1;
  49. unsigned y1;
  50. unsigned x2;
  51. unsigned y2;
  52. } dirty;
  53. };
  54. static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
  55. unsigned blue, unsigned transp,
  56. struct fb_info *info)
  57. {
  58. struct vmw_fb_par *par = info->par;
  59. u32 *pal = par->pseudo_palette;
  60. if (regno > 15) {
  61. DRM_ERROR("Bad regno %u.\n", regno);
  62. return 1;
  63. }
  64. switch (par->depth) {
  65. case 24:
  66. case 32:
  67. pal[regno] = ((red & 0xff00) << 8) |
  68. (green & 0xff00) |
  69. ((blue & 0xff00) >> 8);
  70. break;
  71. default:
  72. DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
  73. return 1;
  74. }
  75. return 0;
  76. }
  77. static int vmw_fb_check_var(struct fb_var_screeninfo *var,
  78. struct fb_info *info)
  79. {
  80. int depth = var->bits_per_pixel;
  81. struct vmw_fb_par *par = info->par;
  82. struct vmw_private *vmw_priv = par->vmw_priv;
  83. switch (var->bits_per_pixel) {
  84. case 32:
  85. depth = (var->transp.length > 0) ? 32 : 24;
  86. break;
  87. default:
  88. DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
  89. return -EINVAL;
  90. }
  91. switch (depth) {
  92. case 24:
  93. var->red.offset = 16;
  94. var->green.offset = 8;
  95. var->blue.offset = 0;
  96. var->red.length = 8;
  97. var->green.length = 8;
  98. var->blue.length = 8;
  99. var->transp.length = 0;
  100. var->transp.offset = 0;
  101. break;
  102. case 32:
  103. var->red.offset = 16;
  104. var->green.offset = 8;
  105. var->blue.offset = 0;
  106. var->red.length = 8;
  107. var->green.length = 8;
  108. var->blue.length = 8;
  109. var->transp.length = 8;
  110. var->transp.offset = 24;
  111. break;
  112. default:
  113. DRM_ERROR("Bad depth %u.\n", depth);
  114. return -EINVAL;
  115. }
  116. /* without multimon its hard to resize */
  117. if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
  118. (var->xres != par->max_width ||
  119. var->yres != par->max_height)) {
  120. DRM_ERROR("Tried to resize, but we don't have multimon\n");
  121. return -EINVAL;
  122. }
  123. if (var->xres > par->max_width ||
  124. var->yres > par->max_height) {
  125. DRM_ERROR("Requested geom can not fit in framebuffer\n");
  126. return -EINVAL;
  127. }
  128. return 0;
  129. }
  130. static int vmw_fb_set_par(struct fb_info *info)
  131. {
  132. struct vmw_fb_par *par = info->par;
  133. struct vmw_private *vmw_priv = par->vmw_priv;
  134. if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
  135. vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
  136. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
  137. vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
  138. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
  139. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
  140. vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
  141. vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
  142. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
  143. vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
  144. vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
  145. vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
  146. vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
  147. vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
  148. vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
  149. vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
  150. vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
  151. /* TODO check if pitch and offset changes */
  152. vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
  153. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
  154. vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
  155. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
  156. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
  157. vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
  158. vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
  159. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
  160. } else {
  161. vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
  162. vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
  163. /* TODO check if pitch and offset changes */
  164. }
  165. return 0;
  166. }
  167. static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
  168. struct fb_info *info)
  169. {
  170. return 0;
  171. }
  172. static int vmw_fb_blank(int blank, struct fb_info *info)
  173. {
  174. return 0;
  175. }
  176. /*
  177. * Dirty code
  178. */
  179. static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
  180. {
  181. struct vmw_private *vmw_priv = par->vmw_priv;
  182. struct fb_info *info = vmw_priv->fb_info;
  183. int stride = (info->fix.line_length / 4);
  184. int *src = (int *)info->screen_base;
  185. __le32 __iomem *vram_mem = par->bo_ptr;
  186. unsigned long flags;
  187. unsigned x, y, w, h;
  188. int i, k;
  189. struct {
  190. uint32_t header;
  191. SVGAFifoCmdUpdate body;
  192. } *cmd;
  193. spin_lock_irqsave(&par->dirty.lock, flags);
  194. if (!par->dirty.active) {
  195. spin_unlock_irqrestore(&par->dirty.lock, flags);
  196. return;
  197. }
  198. x = par->dirty.x1;
  199. y = par->dirty.y1;
  200. w = min(par->dirty.x2, info->var.xres) - x;
  201. h = min(par->dirty.y2, info->var.yres) - y;
  202. par->dirty.x1 = par->dirty.x2 = 0;
  203. par->dirty.y1 = par->dirty.y2 = 0;
  204. spin_unlock_irqrestore(&par->dirty.lock, flags);
  205. for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
  206. for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
  207. iowrite32(src[k], vram_mem + k);
  208. }
  209. #if 0
  210. DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
  211. #endif
  212. cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
  213. if (unlikely(cmd == NULL)) {
  214. DRM_ERROR("Fifo reserve failed.\n");
  215. return;
  216. }
  217. cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
  218. cmd->body.x = cpu_to_le32(x);
  219. cmd->body.y = cpu_to_le32(y);
  220. cmd->body.width = cpu_to_le32(w);
  221. cmd->body.height = cpu_to_le32(h);
  222. vmw_fifo_commit(vmw_priv, sizeof(*cmd));
  223. }
  224. static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
  225. unsigned x1, unsigned y1,
  226. unsigned width, unsigned height)
  227. {
  228. struct fb_info *info = par->vmw_priv->fb_info;
  229. unsigned long flags;
  230. unsigned x2 = x1 + width;
  231. unsigned y2 = y1 + height;
  232. spin_lock_irqsave(&par->dirty.lock, flags);
  233. if (par->dirty.x1 == par->dirty.x2) {
  234. par->dirty.x1 = x1;
  235. par->dirty.y1 = y1;
  236. par->dirty.x2 = x2;
  237. par->dirty.y2 = y2;
  238. /* if we are active start the dirty work
  239. * we share the work with the defio system */
  240. if (par->dirty.active)
  241. schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
  242. } else {
  243. if (x1 < par->dirty.x1)
  244. par->dirty.x1 = x1;
  245. if (y1 < par->dirty.y1)
  246. par->dirty.y1 = y1;
  247. if (x2 > par->dirty.x2)
  248. par->dirty.x2 = x2;
  249. if (y2 > par->dirty.y2)
  250. par->dirty.y2 = y2;
  251. }
  252. spin_unlock_irqrestore(&par->dirty.lock, flags);
  253. }
  254. static void vmw_deferred_io(struct fb_info *info,
  255. struct list_head *pagelist)
  256. {
  257. struct vmw_fb_par *par = info->par;
  258. unsigned long start, end, min, max;
  259. unsigned long flags;
  260. struct page *page;
  261. int y1, y2;
  262. min = ULONG_MAX;
  263. max = 0;
  264. list_for_each_entry(page, pagelist, lru) {
  265. start = page->index << PAGE_SHIFT;
  266. end = start + PAGE_SIZE - 1;
  267. min = min(min, start);
  268. max = max(max, end);
  269. }
  270. if (min < max) {
  271. y1 = min / info->fix.line_length;
  272. y2 = (max / info->fix.line_length) + 1;
  273. spin_lock_irqsave(&par->dirty.lock, flags);
  274. par->dirty.x1 = 0;
  275. par->dirty.y1 = y1;
  276. par->dirty.x2 = info->var.xres;
  277. par->dirty.y2 = y2;
  278. spin_unlock_irqrestore(&par->dirty.lock, flags);
  279. }
  280. vmw_fb_dirty_flush(par);
  281. };
  282. struct fb_deferred_io vmw_defio = {
  283. .delay = VMW_DIRTY_DELAY,
  284. .deferred_io = vmw_deferred_io,
  285. };
  286. /*
  287. * Draw code
  288. */
  289. static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
  290. {
  291. cfb_fillrect(info, rect);
  292. vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
  293. rect->width, rect->height);
  294. }
  295. static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
  296. {
  297. cfb_copyarea(info, region);
  298. vmw_fb_dirty_mark(info->par, region->dx, region->dy,
  299. region->width, region->height);
  300. }
  301. static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
  302. {
  303. cfb_imageblit(info, image);
  304. vmw_fb_dirty_mark(info->par, image->dx, image->dy,
  305. image->width, image->height);
  306. }
  307. /*
  308. * Bring up code
  309. */
  310. static struct fb_ops vmw_fb_ops = {
  311. .owner = THIS_MODULE,
  312. .fb_check_var = vmw_fb_check_var,
  313. .fb_set_par = vmw_fb_set_par,
  314. .fb_setcolreg = vmw_fb_setcolreg,
  315. .fb_fillrect = vmw_fb_fillrect,
  316. .fb_copyarea = vmw_fb_copyarea,
  317. .fb_imageblit = vmw_fb_imageblit,
  318. .fb_pan_display = vmw_fb_pan_display,
  319. .fb_blank = vmw_fb_blank,
  320. };
  321. static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
  322. size_t size, struct vmw_dma_buffer **out)
  323. {
  324. struct vmw_dma_buffer *vmw_bo;
  325. struct ttm_placement ne_placement = vmw_vram_ne_placement;
  326. int ret;
  327. ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  328. /* interuptable? */
  329. ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
  330. if (unlikely(ret != 0))
  331. return ret;
  332. vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
  333. if (!vmw_bo)
  334. goto err_unlock;
  335. ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
  336. &ne_placement,
  337. false,
  338. &vmw_dmabuf_bo_free);
  339. if (unlikely(ret != 0))
  340. goto err_unlock; /* init frees the buffer on failure */
  341. *out = vmw_bo;
  342. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  343. return 0;
  344. err_unlock:
  345. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  346. return ret;
  347. }
  348. int vmw_fb_init(struct vmw_private *vmw_priv)
  349. {
  350. struct device *device = &vmw_priv->dev->pdev->dev;
  351. struct vmw_fb_par *par;
  352. struct fb_info *info;
  353. unsigned initial_width, initial_height;
  354. unsigned fb_width, fb_height;
  355. unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
  356. int ret;
  357. initial_width = 800;
  358. initial_height = 600;
  359. fb_bbp = 32;
  360. fb_depth = 24;
  361. if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
  362. fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
  363. fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
  364. } else {
  365. fb_width = min(vmw_priv->fb_max_width, initial_width);
  366. fb_height = min(vmw_priv->fb_max_height, initial_height);
  367. }
  368. initial_width = min(fb_width, initial_width);
  369. initial_height = min(fb_height, initial_height);
  370. vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
  371. vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
  372. vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
  373. vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
  374. vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
  375. vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
  376. vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
  377. fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
  378. fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
  379. fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
  380. DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
  381. DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
  382. DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
  383. DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
  384. DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
  385. DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
  386. DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
  387. DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
  388. DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
  389. DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
  390. DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
  391. DRM_DEBUG("fb_pitch %u\n", fb_pitch);
  392. DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
  393. info = framebuffer_alloc(sizeof(*par), device);
  394. if (!info)
  395. return -ENOMEM;
  396. /*
  397. * Par
  398. */
  399. vmw_priv->fb_info = info;
  400. par = info->par;
  401. par->vmw_priv = vmw_priv;
  402. par->depth = fb_depth;
  403. par->bpp = fb_bbp;
  404. par->vmalloc = NULL;
  405. par->max_width = fb_width;
  406. par->max_height = fb_height;
  407. /*
  408. * Create buffers and alloc memory
  409. */
  410. par->vmalloc = vmalloc(fb_size);
  411. if (unlikely(par->vmalloc == NULL)) {
  412. ret = -ENOMEM;
  413. goto err_free;
  414. }
  415. ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
  416. if (unlikely(ret != 0))
  417. goto err_free;
  418. ret = ttm_bo_kmap(&par->vmw_bo->base,
  419. 0,
  420. par->vmw_bo->base.num_pages,
  421. &par->map);
  422. if (unlikely(ret != 0))
  423. goto err_unref;
  424. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
  425. par->bo_size = fb_size;
  426. /*
  427. * Fixed and var
  428. */
  429. strcpy(info->fix.id, "svgadrmfb");
  430. info->fix.type = FB_TYPE_PACKED_PIXELS;
  431. info->fix.visual = FB_VISUAL_TRUECOLOR;
  432. info->fix.type_aux = 0;
  433. info->fix.xpanstep = 1; /* doing it in hw */
  434. info->fix.ypanstep = 1; /* doing it in hw */
  435. info->fix.ywrapstep = 0;
  436. info->fix.accel = FB_ACCEL_NONE;
  437. info->fix.line_length = fb_pitch;
  438. info->fix.smem_start = 0;
  439. info->fix.smem_len = fb_size;
  440. info->fix.mmio_start = 0;
  441. info->fix.mmio_len = 0;
  442. info->pseudo_palette = par->pseudo_palette;
  443. info->screen_base = par->vmalloc;
  444. info->screen_size = fb_size;
  445. info->flags = FBINFO_DEFAULT;
  446. info->fbops = &vmw_fb_ops;
  447. /* 24 depth per default */
  448. info->var.red.offset = 16;
  449. info->var.green.offset = 8;
  450. info->var.blue.offset = 0;
  451. info->var.red.length = 8;
  452. info->var.green.length = 8;
  453. info->var.blue.length = 8;
  454. info->var.transp.offset = 0;
  455. info->var.transp.length = 0;
  456. info->var.xres_virtual = fb_width;
  457. info->var.yres_virtual = fb_height;
  458. info->var.bits_per_pixel = par->bpp;
  459. info->var.xoffset = 0;
  460. info->var.yoffset = 0;
  461. info->var.activate = FB_ACTIVATE_NOW;
  462. info->var.height = -1;
  463. info->var.width = -1;
  464. info->var.xres = initial_width;
  465. info->var.yres = initial_height;
  466. #if 0
  467. info->pixmap.size = 64*1024;
  468. info->pixmap.buf_align = 8;
  469. info->pixmap.access_align = 32;
  470. info->pixmap.flags = FB_PIXMAP_SYSTEM;
  471. info->pixmap.scan_align = 1;
  472. #else
  473. info->pixmap.size = 0;
  474. info->pixmap.buf_align = 8;
  475. info->pixmap.access_align = 32;
  476. info->pixmap.flags = FB_PIXMAP_SYSTEM;
  477. info->pixmap.scan_align = 1;
  478. #endif
  479. /*
  480. * Dirty & Deferred IO
  481. */
  482. par->dirty.x1 = par->dirty.x2 = 0;
  483. par->dirty.y1 = par->dirty.y1 = 0;
  484. par->dirty.active = true;
  485. spin_lock_init(&par->dirty.lock);
  486. info->fbdefio = &vmw_defio;
  487. fb_deferred_io_init(info);
  488. ret = register_framebuffer(info);
  489. if (unlikely(ret != 0))
  490. goto err_defio;
  491. return 0;
  492. err_defio:
  493. fb_deferred_io_cleanup(info);
  494. ttm_bo_kunmap(&par->map);
  495. err_unref:
  496. ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
  497. err_free:
  498. vfree(par->vmalloc);
  499. framebuffer_release(info);
  500. vmw_priv->fb_info = NULL;
  501. return ret;
  502. }
  503. int vmw_fb_close(struct vmw_private *vmw_priv)
  504. {
  505. struct fb_info *info;
  506. struct vmw_fb_par *par;
  507. struct ttm_buffer_object *bo;
  508. if (!vmw_priv->fb_info)
  509. return 0;
  510. info = vmw_priv->fb_info;
  511. par = info->par;
  512. bo = &par->vmw_bo->base;
  513. par->vmw_bo = NULL;
  514. /* ??? order */
  515. fb_deferred_io_cleanup(info);
  516. unregister_framebuffer(info);
  517. ttm_bo_kunmap(&par->map);
  518. ttm_bo_unref(&bo);
  519. vfree(par->vmalloc);
  520. framebuffer_release(info);
  521. return 0;
  522. }
  523. int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
  524. struct vmw_dma_buffer *vmw_bo)
  525. {
  526. struct ttm_buffer_object *bo = &vmw_bo->base;
  527. int ret = 0;
  528. ret = ttm_bo_reserve(bo, false, false, false, 0);
  529. if (unlikely(ret != 0))
  530. return ret;
  531. ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
  532. ttm_bo_unreserve(bo);
  533. return ret;
  534. }
  535. int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
  536. struct vmw_dma_buffer *vmw_bo)
  537. {
  538. struct ttm_buffer_object *bo = &vmw_bo->base;
  539. struct ttm_placement ne_placement = vmw_vram_ne_placement;
  540. int ret = 0;
  541. ne_placement.lpfn = bo->num_pages;
  542. /* interuptable? */
  543. ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
  544. if (unlikely(ret != 0))
  545. return ret;
  546. ret = ttm_bo_reserve(bo, false, false, false, 0);
  547. if (unlikely(ret != 0))
  548. goto err_unlock;
  549. if (vmw_bo->gmr_bound) {
  550. vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
  551. spin_lock(&bo->glob->lru_lock);
  552. ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
  553. spin_unlock(&bo->glob->lru_lock);
  554. vmw_bo->gmr_bound = NULL;
  555. }
  556. ret = ttm_bo_validate(bo, &ne_placement, false, false);
  557. ttm_bo_unreserve(bo);
  558. err_unlock:
  559. ttm_write_unlock(&vmw_priv->active_master->lock);
  560. return ret;
  561. }
  562. int vmw_fb_off(struct vmw_private *vmw_priv)
  563. {
  564. struct fb_info *info;
  565. struct vmw_fb_par *par;
  566. unsigned long flags;
  567. if (!vmw_priv->fb_info)
  568. return -EINVAL;
  569. info = vmw_priv->fb_info;
  570. par = info->par;
  571. spin_lock_irqsave(&par->dirty.lock, flags);
  572. par->dirty.active = false;
  573. spin_unlock_irqrestore(&par->dirty.lock, flags);
  574. flush_scheduled_work();
  575. par->bo_ptr = NULL;
  576. ttm_bo_kunmap(&par->map);
  577. vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
  578. return 0;
  579. }
  580. int vmw_fb_on(struct vmw_private *vmw_priv)
  581. {
  582. struct fb_info *info;
  583. struct vmw_fb_par *par;
  584. unsigned long flags;
  585. bool dummy;
  586. int ret;
  587. if (!vmw_priv->fb_info)
  588. return -EINVAL;
  589. info = vmw_priv->fb_info;
  590. par = info->par;
  591. /* we are already active */
  592. if (par->bo_ptr != NULL)
  593. return 0;
  594. /* Make sure that all overlays are stoped when we take over */
  595. vmw_overlay_stop_all(vmw_priv);
  596. ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
  597. if (unlikely(ret != 0)) {
  598. DRM_ERROR("could not move buffer to start of VRAM\n");
  599. goto err_no_buffer;
  600. }
  601. ret = ttm_bo_kmap(&par->vmw_bo->base,
  602. 0,
  603. par->vmw_bo->base.num_pages,
  604. &par->map);
  605. BUG_ON(ret != 0);
  606. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
  607. spin_lock_irqsave(&par->dirty.lock, flags);
  608. par->dirty.active = true;
  609. spin_unlock_irqrestore(&par->dirty.lock, flags);
  610. err_no_buffer:
  611. vmw_fb_set_par(info);
  612. vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
  613. /* If there already was stuff dirty we wont
  614. * schedule a new work, so lets do it now */
  615. schedule_delayed_work(&info->deferred_work, 0);
  616. return 0;
  617. }