vmwgfx_fb.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /**************************************************************************
  2. *
  3. * Copyright © 2007 David Airlie
  4. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include "drmP.h"
  29. #include "vmwgfx_drv.h"
  30. #include "ttm/ttm_placement.h"
  31. #define VMW_DIRTY_DELAY (HZ / 30)
  32. struct vmw_fb_par {
  33. struct vmw_private *vmw_priv;
  34. void *vmalloc;
  35. struct vmw_dma_buffer *vmw_bo;
  36. struct ttm_bo_kmap_obj map;
  37. u32 pseudo_palette[17];
  38. unsigned depth;
  39. unsigned bpp;
  40. unsigned max_width;
  41. unsigned max_height;
  42. void *bo_ptr;
  43. unsigned bo_size;
  44. bool bo_iowrite;
  45. struct {
  46. spinlock_t lock;
  47. bool active;
  48. unsigned x1;
  49. unsigned y1;
  50. unsigned x2;
  51. unsigned y2;
  52. } dirty;
  53. };
  54. static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
  55. unsigned blue, unsigned transp,
  56. struct fb_info *info)
  57. {
  58. struct vmw_fb_par *par = info->par;
  59. u32 *pal = par->pseudo_palette;
  60. if (regno > 15) {
  61. DRM_ERROR("Bad regno %u.\n", regno);
  62. return 1;
  63. }
  64. switch (par->depth) {
  65. case 24:
  66. case 32:
  67. pal[regno] = ((red & 0xff00) << 8) |
  68. (green & 0xff00) |
  69. ((blue & 0xff00) >> 8);
  70. break;
  71. default:
  72. DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
  73. return 1;
  74. }
  75. return 0;
  76. }
  77. static int vmw_fb_check_var(struct fb_var_screeninfo *var,
  78. struct fb_info *info)
  79. {
  80. int depth = var->bits_per_pixel;
  81. struct vmw_fb_par *par = info->par;
  82. struct vmw_private *vmw_priv = par->vmw_priv;
  83. switch (var->bits_per_pixel) {
  84. case 32:
  85. depth = (var->transp.length > 0) ? 32 : 24;
  86. break;
  87. default:
  88. DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
  89. return -EINVAL;
  90. }
  91. switch (depth) {
  92. case 24:
  93. var->red.offset = 16;
  94. var->green.offset = 8;
  95. var->blue.offset = 0;
  96. var->red.length = 8;
  97. var->green.length = 8;
  98. var->blue.length = 8;
  99. var->transp.length = 0;
  100. var->transp.offset = 0;
  101. break;
  102. case 32:
  103. var->red.offset = 16;
  104. var->green.offset = 8;
  105. var->blue.offset = 0;
  106. var->red.length = 8;
  107. var->green.length = 8;
  108. var->blue.length = 8;
  109. var->transp.length = 8;
  110. var->transp.offset = 24;
  111. break;
  112. default:
  113. DRM_ERROR("Bad depth %u.\n", depth);
  114. return -EINVAL;
  115. }
  116. if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  117. (var->xoffset != 0 || var->yoffset != 0)) {
  118. DRM_ERROR("Can not handle panning without display topology\n");
  119. return -EINVAL;
  120. }
  121. if ((var->xoffset + var->xres) > par->max_width ||
  122. (var->yoffset + var->yres) > par->max_height) {
  123. DRM_ERROR("Requested geom can not fit in framebuffer\n");
  124. return -EINVAL;
  125. }
  126. if (!vmw_kms_validate_mode_vram(vmw_priv,
  127. info->fix.line_length,
  128. var->yoffset + var->yres)) {
  129. DRM_ERROR("Requested geom can not fit in framebuffer\n");
  130. return -EINVAL;
  131. }
  132. return 0;
  133. }
  134. static int vmw_fb_set_par(struct fb_info *info)
  135. {
  136. struct vmw_fb_par *par = info->par;
  137. struct vmw_private *vmw_priv = par->vmw_priv;
  138. vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
  139. info->fix.line_length,
  140. par->bpp, par->depth);
  141. if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
  142. /* TODO check if pitch and offset changes */
  143. vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
  144. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
  145. vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
  146. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
  147. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
  148. vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
  149. vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
  150. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
  151. }
  152. /* This is really helpful since if this fails the user
  153. * can probably not see anything on the screen.
  154. */
  155. WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
  156. return 0;
  157. }
  158. static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
  159. struct fb_info *info)
  160. {
  161. return 0;
  162. }
  163. static int vmw_fb_blank(int blank, struct fb_info *info)
  164. {
  165. return 0;
  166. }
  167. /*
  168. * Dirty code
  169. */
  170. static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
  171. {
  172. struct vmw_private *vmw_priv = par->vmw_priv;
  173. struct fb_info *info = vmw_priv->fb_info;
  174. int stride = (info->fix.line_length / 4);
  175. int *src = (int *)info->screen_base;
  176. __le32 __iomem *vram_mem = par->bo_ptr;
  177. unsigned long flags;
  178. unsigned x, y, w, h;
  179. int i, k;
  180. struct {
  181. uint32_t header;
  182. SVGAFifoCmdUpdate body;
  183. } *cmd;
  184. if (vmw_priv->suspended)
  185. return;
  186. spin_lock_irqsave(&par->dirty.lock, flags);
  187. if (!par->dirty.active) {
  188. spin_unlock_irqrestore(&par->dirty.lock, flags);
  189. return;
  190. }
  191. x = par->dirty.x1;
  192. y = par->dirty.y1;
  193. w = min(par->dirty.x2, info->var.xres) - x;
  194. h = min(par->dirty.y2, info->var.yres) - y;
  195. par->dirty.x1 = par->dirty.x2 = 0;
  196. par->dirty.y1 = par->dirty.y2 = 0;
  197. spin_unlock_irqrestore(&par->dirty.lock, flags);
  198. for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
  199. for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
  200. iowrite32(src[k], vram_mem + k);
  201. }
  202. #if 0
  203. DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
  204. #endif
  205. cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
  206. if (unlikely(cmd == NULL)) {
  207. DRM_ERROR("Fifo reserve failed.\n");
  208. return;
  209. }
  210. cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
  211. cmd->body.x = cpu_to_le32(x);
  212. cmd->body.y = cpu_to_le32(y);
  213. cmd->body.width = cpu_to_le32(w);
  214. cmd->body.height = cpu_to_le32(h);
  215. vmw_fifo_commit(vmw_priv, sizeof(*cmd));
  216. }
  217. static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
  218. unsigned x1, unsigned y1,
  219. unsigned width, unsigned height)
  220. {
  221. struct fb_info *info = par->vmw_priv->fb_info;
  222. unsigned long flags;
  223. unsigned x2 = x1 + width;
  224. unsigned y2 = y1 + height;
  225. spin_lock_irqsave(&par->dirty.lock, flags);
  226. if (par->dirty.x1 == par->dirty.x2) {
  227. par->dirty.x1 = x1;
  228. par->dirty.y1 = y1;
  229. par->dirty.x2 = x2;
  230. par->dirty.y2 = y2;
  231. /* if we are active start the dirty work
  232. * we share the work with the defio system */
  233. if (par->dirty.active)
  234. schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
  235. } else {
  236. if (x1 < par->dirty.x1)
  237. par->dirty.x1 = x1;
  238. if (y1 < par->dirty.y1)
  239. par->dirty.y1 = y1;
  240. if (x2 > par->dirty.x2)
  241. par->dirty.x2 = x2;
  242. if (y2 > par->dirty.y2)
  243. par->dirty.y2 = y2;
  244. }
  245. spin_unlock_irqrestore(&par->dirty.lock, flags);
  246. }
  247. static void vmw_deferred_io(struct fb_info *info,
  248. struct list_head *pagelist)
  249. {
  250. struct vmw_fb_par *par = info->par;
  251. unsigned long start, end, min, max;
  252. unsigned long flags;
  253. struct page *page;
  254. int y1, y2;
  255. min = ULONG_MAX;
  256. max = 0;
  257. list_for_each_entry(page, pagelist, lru) {
  258. start = page->index << PAGE_SHIFT;
  259. end = start + PAGE_SIZE - 1;
  260. min = min(min, start);
  261. max = max(max, end);
  262. }
  263. if (min < max) {
  264. y1 = min / info->fix.line_length;
  265. y2 = (max / info->fix.line_length) + 1;
  266. spin_lock_irqsave(&par->dirty.lock, flags);
  267. par->dirty.x1 = 0;
  268. par->dirty.y1 = y1;
  269. par->dirty.x2 = info->var.xres;
  270. par->dirty.y2 = y2;
  271. spin_unlock_irqrestore(&par->dirty.lock, flags);
  272. }
  273. vmw_fb_dirty_flush(par);
  274. };
  275. struct fb_deferred_io vmw_defio = {
  276. .delay = VMW_DIRTY_DELAY,
  277. .deferred_io = vmw_deferred_io,
  278. };
  279. /*
  280. * Draw code
  281. */
  282. static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
  283. {
  284. cfb_fillrect(info, rect);
  285. vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
  286. rect->width, rect->height);
  287. }
  288. static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
  289. {
  290. cfb_copyarea(info, region);
  291. vmw_fb_dirty_mark(info->par, region->dx, region->dy,
  292. region->width, region->height);
  293. }
  294. static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
  295. {
  296. cfb_imageblit(info, image);
  297. vmw_fb_dirty_mark(info->par, image->dx, image->dy,
  298. image->width, image->height);
  299. }
  300. /*
  301. * Bring up code
  302. */
  303. static struct fb_ops vmw_fb_ops = {
  304. .owner = THIS_MODULE,
  305. .fb_check_var = vmw_fb_check_var,
  306. .fb_set_par = vmw_fb_set_par,
  307. .fb_setcolreg = vmw_fb_setcolreg,
  308. .fb_fillrect = vmw_fb_fillrect,
  309. .fb_copyarea = vmw_fb_copyarea,
  310. .fb_imageblit = vmw_fb_imageblit,
  311. .fb_pan_display = vmw_fb_pan_display,
  312. .fb_blank = vmw_fb_blank,
  313. };
  314. static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
  315. size_t size, struct vmw_dma_buffer **out)
  316. {
  317. struct vmw_dma_buffer *vmw_bo;
  318. struct ttm_placement ne_placement = vmw_vram_ne_placement;
  319. int ret;
  320. ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  321. /* interuptable? */
  322. ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
  323. if (unlikely(ret != 0))
  324. return ret;
  325. vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
  326. if (!vmw_bo)
  327. goto err_unlock;
  328. ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
  329. &ne_placement,
  330. false,
  331. &vmw_dmabuf_bo_free);
  332. if (unlikely(ret != 0))
  333. goto err_unlock; /* init frees the buffer on failure */
  334. *out = vmw_bo;
  335. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  336. return 0;
  337. err_unlock:
  338. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  339. return ret;
  340. }
  341. int vmw_fb_init(struct vmw_private *vmw_priv)
  342. {
  343. struct device *device = &vmw_priv->dev->pdev->dev;
  344. struct vmw_fb_par *par;
  345. struct fb_info *info;
  346. unsigned initial_width, initial_height;
  347. unsigned fb_width, fb_height;
  348. unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
  349. int ret;
  350. /* XXX These shouldn't be hardcoded. */
  351. initial_width = 800;
  352. initial_height = 600;
  353. fb_bbp = 32;
  354. fb_depth = 24;
  355. /* XXX As shouldn't these be as well. */
  356. fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
  357. fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
  358. initial_width = min(fb_width, initial_width);
  359. initial_height = min(fb_height, initial_height);
  360. fb_pitch = fb_width * fb_bbp / 8;
  361. fb_size = fb_pitch * fb_height;
  362. fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
  363. info = framebuffer_alloc(sizeof(*par), device);
  364. if (!info)
  365. return -ENOMEM;
  366. /*
  367. * Par
  368. */
  369. vmw_priv->fb_info = info;
  370. par = info->par;
  371. par->vmw_priv = vmw_priv;
  372. par->depth = fb_depth;
  373. par->bpp = fb_bbp;
  374. par->vmalloc = NULL;
  375. par->max_width = fb_width;
  376. par->max_height = fb_height;
  377. /*
  378. * Create buffers and alloc memory
  379. */
  380. par->vmalloc = vmalloc(fb_size);
  381. if (unlikely(par->vmalloc == NULL)) {
  382. ret = -ENOMEM;
  383. goto err_free;
  384. }
  385. ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
  386. if (unlikely(ret != 0))
  387. goto err_free;
  388. ret = ttm_bo_kmap(&par->vmw_bo->base,
  389. 0,
  390. par->vmw_bo->base.num_pages,
  391. &par->map);
  392. if (unlikely(ret != 0))
  393. goto err_unref;
  394. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
  395. par->bo_size = fb_size;
  396. /*
  397. * Fixed and var
  398. */
  399. strcpy(info->fix.id, "svgadrmfb");
  400. info->fix.type = FB_TYPE_PACKED_PIXELS;
  401. info->fix.visual = FB_VISUAL_TRUECOLOR;
  402. info->fix.type_aux = 0;
  403. info->fix.xpanstep = 1; /* doing it in hw */
  404. info->fix.ypanstep = 1; /* doing it in hw */
  405. info->fix.ywrapstep = 0;
  406. info->fix.accel = FB_ACCEL_NONE;
  407. info->fix.line_length = fb_pitch;
  408. info->fix.smem_start = 0;
  409. info->fix.smem_len = fb_size;
  410. info->fix.mmio_start = 0;
  411. info->fix.mmio_len = 0;
  412. info->pseudo_palette = par->pseudo_palette;
  413. info->screen_base = par->vmalloc;
  414. info->screen_size = fb_size;
  415. info->flags = FBINFO_DEFAULT;
  416. info->fbops = &vmw_fb_ops;
  417. /* 24 depth per default */
  418. info->var.red.offset = 16;
  419. info->var.green.offset = 8;
  420. info->var.blue.offset = 0;
  421. info->var.red.length = 8;
  422. info->var.green.length = 8;
  423. info->var.blue.length = 8;
  424. info->var.transp.offset = 0;
  425. info->var.transp.length = 0;
  426. info->var.xres_virtual = fb_width;
  427. info->var.yres_virtual = fb_height;
  428. info->var.bits_per_pixel = par->bpp;
  429. info->var.xoffset = 0;
  430. info->var.yoffset = 0;
  431. info->var.activate = FB_ACTIVATE_NOW;
  432. info->var.height = -1;
  433. info->var.width = -1;
  434. info->var.xres = initial_width;
  435. info->var.yres = initial_height;
  436. #if 0
  437. info->pixmap.size = 64*1024;
  438. info->pixmap.buf_align = 8;
  439. info->pixmap.access_align = 32;
  440. info->pixmap.flags = FB_PIXMAP_SYSTEM;
  441. info->pixmap.scan_align = 1;
  442. #else
  443. info->pixmap.size = 0;
  444. info->pixmap.buf_align = 8;
  445. info->pixmap.access_align = 32;
  446. info->pixmap.flags = FB_PIXMAP_SYSTEM;
  447. info->pixmap.scan_align = 1;
  448. #endif
  449. info->apertures = alloc_apertures(1);
  450. if (!info->apertures) {
  451. ret = -ENOMEM;
  452. goto err_aper;
  453. }
  454. info->apertures->ranges[0].base = vmw_priv->vram_start;
  455. info->apertures->ranges[0].size = vmw_priv->vram_size;
  456. /*
  457. * Dirty & Deferred IO
  458. */
  459. par->dirty.x1 = par->dirty.x2 = 0;
  460. par->dirty.y1 = par->dirty.y2 = 0;
  461. par->dirty.active = true;
  462. spin_lock_init(&par->dirty.lock);
  463. info->fbdefio = &vmw_defio;
  464. fb_deferred_io_init(info);
  465. ret = register_framebuffer(info);
  466. if (unlikely(ret != 0))
  467. goto err_defio;
  468. return 0;
  469. err_defio:
  470. fb_deferred_io_cleanup(info);
  471. err_aper:
  472. ttm_bo_kunmap(&par->map);
  473. err_unref:
  474. ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
  475. err_free:
  476. vfree(par->vmalloc);
  477. framebuffer_release(info);
  478. vmw_priv->fb_info = NULL;
  479. return ret;
  480. }
  481. int vmw_fb_close(struct vmw_private *vmw_priv)
  482. {
  483. struct fb_info *info;
  484. struct vmw_fb_par *par;
  485. struct ttm_buffer_object *bo;
  486. if (!vmw_priv->fb_info)
  487. return 0;
  488. info = vmw_priv->fb_info;
  489. par = info->par;
  490. bo = &par->vmw_bo->base;
  491. par->vmw_bo = NULL;
  492. /* ??? order */
  493. fb_deferred_io_cleanup(info);
  494. unregister_framebuffer(info);
  495. ttm_bo_kunmap(&par->map);
  496. ttm_bo_unref(&bo);
  497. vfree(par->vmalloc);
  498. framebuffer_release(info);
  499. return 0;
  500. }
  501. int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
  502. struct vmw_dma_buffer *vmw_bo)
  503. {
  504. struct ttm_buffer_object *bo = &vmw_bo->base;
  505. int ret = 0;
  506. ret = ttm_bo_reserve(bo, false, false, false, 0);
  507. if (unlikely(ret != 0))
  508. return ret;
  509. ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
  510. ttm_bo_unreserve(bo);
  511. return ret;
  512. }
  513. int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
  514. struct vmw_dma_buffer *vmw_bo)
  515. {
  516. struct ttm_buffer_object *bo = &vmw_bo->base;
  517. struct ttm_placement ne_placement = vmw_vram_ne_placement;
  518. int ret = 0;
  519. ne_placement.lpfn = bo->num_pages;
  520. /* interuptable? */
  521. ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
  522. if (unlikely(ret != 0))
  523. return ret;
  524. ret = ttm_bo_reserve(bo, false, false, false, 0);
  525. if (unlikely(ret != 0))
  526. goto err_unlock;
  527. if (bo->mem.mem_type == TTM_PL_VRAM &&
  528. bo->mem.start < bo->num_pages &&
  529. bo->mem.start > 0)
  530. (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
  531. false, false);
  532. ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
  533. /* Could probably bug on */
  534. WARN_ON(bo->offset != 0);
  535. ttm_bo_unreserve(bo);
  536. err_unlock:
  537. ttm_write_unlock(&vmw_priv->active_master->lock);
  538. return ret;
  539. }
  540. int vmw_fb_off(struct vmw_private *vmw_priv)
  541. {
  542. struct fb_info *info;
  543. struct vmw_fb_par *par;
  544. unsigned long flags;
  545. if (!vmw_priv->fb_info)
  546. return -EINVAL;
  547. info = vmw_priv->fb_info;
  548. par = info->par;
  549. spin_lock_irqsave(&par->dirty.lock, flags);
  550. par->dirty.active = false;
  551. spin_unlock_irqrestore(&par->dirty.lock, flags);
  552. flush_scheduled_work();
  553. par->bo_ptr = NULL;
  554. ttm_bo_kunmap(&par->map);
  555. vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
  556. return 0;
  557. }
  558. int vmw_fb_on(struct vmw_private *vmw_priv)
  559. {
  560. struct fb_info *info;
  561. struct vmw_fb_par *par;
  562. unsigned long flags;
  563. bool dummy;
  564. int ret;
  565. if (!vmw_priv->fb_info)
  566. return -EINVAL;
  567. info = vmw_priv->fb_info;
  568. par = info->par;
  569. /* we are already active */
  570. if (par->bo_ptr != NULL)
  571. return 0;
  572. /* Make sure that all overlays are stoped when we take over */
  573. vmw_overlay_stop_all(vmw_priv);
  574. ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
  575. if (unlikely(ret != 0)) {
  576. DRM_ERROR("could not move buffer to start of VRAM\n");
  577. goto err_no_buffer;
  578. }
  579. ret = ttm_bo_kmap(&par->vmw_bo->base,
  580. 0,
  581. par->vmw_bo->base.num_pages,
  582. &par->map);
  583. BUG_ON(ret != 0);
  584. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
  585. spin_lock_irqsave(&par->dirty.lock, flags);
  586. par->dirty.active = true;
  587. spin_unlock_irqrestore(&par->dirty.lock, flags);
  588. err_no_buffer:
  589. vmw_fb_set_par(info);
  590. vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
  591. /* If there already was stuff dirty we wont
  592. * schedule a new work, so lets do it now */
  593. schedule_delayed_work(&info->deferred_work, 0);
  594. return 0;
  595. }