vmwgfx_fb.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /**************************************************************************
  2. *
  3. * Copyright © 2007 David Airlie
  4. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include "drmP.h"
  29. #include "vmwgfx_drv.h"
  30. #include "ttm/ttm_placement.h"
  31. #define VMW_DIRTY_DELAY (HZ / 30)
  32. struct vmw_fb_par {
  33. struct vmw_private *vmw_priv;
  34. void *vmalloc;
  35. struct vmw_dma_buffer *vmw_bo;
  36. struct ttm_bo_kmap_obj map;
  37. u32 pseudo_palette[17];
  38. unsigned depth;
  39. unsigned bpp;
  40. unsigned max_width;
  41. unsigned max_height;
  42. void *bo_ptr;
  43. unsigned bo_size;
  44. bool bo_iowrite;
  45. struct {
  46. spinlock_t lock;
  47. bool active;
  48. unsigned x1;
  49. unsigned y1;
  50. unsigned x2;
  51. unsigned y2;
  52. } dirty;
  53. };
  54. static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
  55. unsigned blue, unsigned transp,
  56. struct fb_info *info)
  57. {
  58. struct vmw_fb_par *par = info->par;
  59. u32 *pal = par->pseudo_palette;
  60. if (regno > 15) {
  61. DRM_ERROR("Bad regno %u.\n", regno);
  62. return 1;
  63. }
  64. switch (par->depth) {
  65. case 24:
  66. case 32:
  67. pal[regno] = ((red & 0xff00) << 8) |
  68. (green & 0xff00) |
  69. ((blue & 0xff00) >> 8);
  70. break;
  71. default:
  72. DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
  73. return 1;
  74. }
  75. return 0;
  76. }
  77. static int vmw_fb_check_var(struct fb_var_screeninfo *var,
  78. struct fb_info *info)
  79. {
  80. int depth = var->bits_per_pixel;
  81. struct vmw_fb_par *par = info->par;
  82. struct vmw_private *vmw_priv = par->vmw_priv;
  83. switch (var->bits_per_pixel) {
  84. case 32:
  85. depth = (var->transp.length > 0) ? 32 : 24;
  86. break;
  87. default:
  88. DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
  89. return -EINVAL;
  90. }
  91. switch (depth) {
  92. case 24:
  93. var->red.offset = 16;
  94. var->green.offset = 8;
  95. var->blue.offset = 0;
  96. var->red.length = 8;
  97. var->green.length = 8;
  98. var->blue.length = 8;
  99. var->transp.length = 0;
  100. var->transp.offset = 0;
  101. break;
  102. case 32:
  103. var->red.offset = 16;
  104. var->green.offset = 8;
  105. var->blue.offset = 0;
  106. var->red.length = 8;
  107. var->green.length = 8;
  108. var->blue.length = 8;
  109. var->transp.length = 8;
  110. var->transp.offset = 24;
  111. break;
  112. default:
  113. DRM_ERROR("Bad depth %u.\n", depth);
  114. return -EINVAL;
  115. }
  116. if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  117. (var->xoffset != 0 || var->yoffset != 0)) {
  118. DRM_ERROR("Can not handle panning without display topology\n");
  119. return -EINVAL;
  120. }
  121. if ((var->xoffset + var->xres) > par->max_width ||
  122. (var->yoffset + var->yres) > par->max_height) {
  123. DRM_ERROR("Requested geom can not fit in framebuffer\n");
  124. return -EINVAL;
  125. }
  126. if (!vmw_kms_validate_mode_vram(vmw_priv,
  127. info->fix.line_length,
  128. var->yoffset + var->yres)) {
  129. DRM_ERROR("Requested geom can not fit in framebuffer\n");
  130. return -EINVAL;
  131. }
  132. return 0;
  133. }
  134. static int vmw_fb_set_par(struct fb_info *info)
  135. {
  136. struct vmw_fb_par *par = info->par;
  137. struct vmw_private *vmw_priv = par->vmw_priv;
  138. int ret;
  139. ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
  140. info->fix.line_length,
  141. par->bpp, par->depth);
  142. if (ret)
  143. return ret;
  144. if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
  145. /* TODO check if pitch and offset changes */
  146. vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
  147. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
  148. vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
  149. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
  150. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
  151. vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
  152. vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
  153. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
  154. }
  155. /* This is really helpful since if this fails the user
  156. * can probably not see anything on the screen.
  157. */
  158. WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
  159. return 0;
  160. }
  161. static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
  162. struct fb_info *info)
  163. {
  164. return 0;
  165. }
  166. static int vmw_fb_blank(int blank, struct fb_info *info)
  167. {
  168. return 0;
  169. }
  170. /*
  171. * Dirty code
  172. */
  173. static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
  174. {
  175. struct vmw_private *vmw_priv = par->vmw_priv;
  176. struct fb_info *info = vmw_priv->fb_info;
  177. int stride = (info->fix.line_length / 4);
  178. int *src = (int *)info->screen_base;
  179. __le32 __iomem *vram_mem = par->bo_ptr;
  180. unsigned long flags;
  181. unsigned x, y, w, h;
  182. int i, k;
  183. struct {
  184. uint32_t header;
  185. SVGAFifoCmdUpdate body;
  186. } *cmd;
  187. if (vmw_priv->suspended)
  188. return;
  189. spin_lock_irqsave(&par->dirty.lock, flags);
  190. if (!par->dirty.active) {
  191. spin_unlock_irqrestore(&par->dirty.lock, flags);
  192. return;
  193. }
  194. x = par->dirty.x1;
  195. y = par->dirty.y1;
  196. w = min(par->dirty.x2, info->var.xres) - x;
  197. h = min(par->dirty.y2, info->var.yres) - y;
  198. par->dirty.x1 = par->dirty.x2 = 0;
  199. par->dirty.y1 = par->dirty.y2 = 0;
  200. spin_unlock_irqrestore(&par->dirty.lock, flags);
  201. for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
  202. for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
  203. iowrite32(src[k], vram_mem + k);
  204. }
  205. #if 0
  206. DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
  207. #endif
  208. cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
  209. if (unlikely(cmd == NULL)) {
  210. DRM_ERROR("Fifo reserve failed.\n");
  211. return;
  212. }
  213. cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
  214. cmd->body.x = cpu_to_le32(x);
  215. cmd->body.y = cpu_to_le32(y);
  216. cmd->body.width = cpu_to_le32(w);
  217. cmd->body.height = cpu_to_le32(h);
  218. vmw_fifo_commit(vmw_priv, sizeof(*cmd));
  219. }
  220. static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
  221. unsigned x1, unsigned y1,
  222. unsigned width, unsigned height)
  223. {
  224. struct fb_info *info = par->vmw_priv->fb_info;
  225. unsigned long flags;
  226. unsigned x2 = x1 + width;
  227. unsigned y2 = y1 + height;
  228. spin_lock_irqsave(&par->dirty.lock, flags);
  229. if (par->dirty.x1 == par->dirty.x2) {
  230. par->dirty.x1 = x1;
  231. par->dirty.y1 = y1;
  232. par->dirty.x2 = x2;
  233. par->dirty.y2 = y2;
  234. /* if we are active start the dirty work
  235. * we share the work with the defio system */
  236. if (par->dirty.active)
  237. schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
  238. } else {
  239. if (x1 < par->dirty.x1)
  240. par->dirty.x1 = x1;
  241. if (y1 < par->dirty.y1)
  242. par->dirty.y1 = y1;
  243. if (x2 > par->dirty.x2)
  244. par->dirty.x2 = x2;
  245. if (y2 > par->dirty.y2)
  246. par->dirty.y2 = y2;
  247. }
  248. spin_unlock_irqrestore(&par->dirty.lock, flags);
  249. }
  250. static void vmw_deferred_io(struct fb_info *info,
  251. struct list_head *pagelist)
  252. {
  253. struct vmw_fb_par *par = info->par;
  254. unsigned long start, end, min, max;
  255. unsigned long flags;
  256. struct page *page;
  257. int y1, y2;
  258. min = ULONG_MAX;
  259. max = 0;
  260. list_for_each_entry(page, pagelist, lru) {
  261. start = page->index << PAGE_SHIFT;
  262. end = start + PAGE_SIZE - 1;
  263. min = min(min, start);
  264. max = max(max, end);
  265. }
  266. if (min < max) {
  267. y1 = min / info->fix.line_length;
  268. y2 = (max / info->fix.line_length) + 1;
  269. spin_lock_irqsave(&par->dirty.lock, flags);
  270. par->dirty.x1 = 0;
  271. par->dirty.y1 = y1;
  272. par->dirty.x2 = info->var.xres;
  273. par->dirty.y2 = y2;
  274. spin_unlock_irqrestore(&par->dirty.lock, flags);
  275. }
  276. vmw_fb_dirty_flush(par);
  277. };
  278. struct fb_deferred_io vmw_defio = {
  279. .delay = VMW_DIRTY_DELAY,
  280. .deferred_io = vmw_deferred_io,
  281. };
  282. /*
  283. * Draw code
  284. */
  285. static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
  286. {
  287. cfb_fillrect(info, rect);
  288. vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
  289. rect->width, rect->height);
  290. }
  291. static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
  292. {
  293. cfb_copyarea(info, region);
  294. vmw_fb_dirty_mark(info->par, region->dx, region->dy,
  295. region->width, region->height);
  296. }
  297. static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
  298. {
  299. cfb_imageblit(info, image);
  300. vmw_fb_dirty_mark(info->par, image->dx, image->dy,
  301. image->width, image->height);
  302. }
  303. /*
  304. * Bring up code
  305. */
  306. static struct fb_ops vmw_fb_ops = {
  307. .owner = THIS_MODULE,
  308. .fb_check_var = vmw_fb_check_var,
  309. .fb_set_par = vmw_fb_set_par,
  310. .fb_setcolreg = vmw_fb_setcolreg,
  311. .fb_fillrect = vmw_fb_fillrect,
  312. .fb_copyarea = vmw_fb_copyarea,
  313. .fb_imageblit = vmw_fb_imageblit,
  314. .fb_pan_display = vmw_fb_pan_display,
  315. .fb_blank = vmw_fb_blank,
  316. };
  317. static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
  318. size_t size, struct vmw_dma_buffer **out)
  319. {
  320. struct vmw_dma_buffer *vmw_bo;
  321. struct ttm_placement ne_placement = vmw_vram_ne_placement;
  322. int ret;
  323. ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  324. /* interuptable? */
  325. ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
  326. if (unlikely(ret != 0))
  327. return ret;
  328. vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
  329. if (!vmw_bo)
  330. goto err_unlock;
  331. ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
  332. &ne_placement,
  333. false,
  334. &vmw_dmabuf_bo_free);
  335. if (unlikely(ret != 0))
  336. goto err_unlock; /* init frees the buffer on failure */
  337. *out = vmw_bo;
  338. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  339. return 0;
  340. err_unlock:
  341. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  342. return ret;
  343. }
  344. int vmw_fb_init(struct vmw_private *vmw_priv)
  345. {
  346. struct device *device = &vmw_priv->dev->pdev->dev;
  347. struct vmw_fb_par *par;
  348. struct fb_info *info;
  349. unsigned initial_width, initial_height;
  350. unsigned fb_width, fb_height;
  351. unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
  352. int ret;
  353. /* XXX These shouldn't be hardcoded. */
  354. initial_width = 800;
  355. initial_height = 600;
  356. fb_bpp = 32;
  357. fb_depth = 24;
  358. /* XXX As shouldn't these be as well. */
  359. fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
  360. fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
  361. initial_width = min(fb_width, initial_width);
  362. initial_height = min(fb_height, initial_height);
  363. fb_pitch = fb_width * fb_bpp / 8;
  364. fb_size = fb_pitch * fb_height;
  365. fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
  366. info = framebuffer_alloc(sizeof(*par), device);
  367. if (!info)
  368. return -ENOMEM;
  369. /*
  370. * Par
  371. */
  372. vmw_priv->fb_info = info;
  373. par = info->par;
  374. par->vmw_priv = vmw_priv;
  375. par->depth = fb_depth;
  376. par->bpp = fb_bpp;
  377. par->vmalloc = NULL;
  378. par->max_width = fb_width;
  379. par->max_height = fb_height;
  380. /*
  381. * Create buffers and alloc memory
  382. */
  383. par->vmalloc = vmalloc(fb_size);
  384. if (unlikely(par->vmalloc == NULL)) {
  385. ret = -ENOMEM;
  386. goto err_free;
  387. }
  388. ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
  389. if (unlikely(ret != 0))
  390. goto err_free;
  391. ret = ttm_bo_kmap(&par->vmw_bo->base,
  392. 0,
  393. par->vmw_bo->base.num_pages,
  394. &par->map);
  395. if (unlikely(ret != 0))
  396. goto err_unref;
  397. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
  398. par->bo_size = fb_size;
  399. /*
  400. * Fixed and var
  401. */
  402. strcpy(info->fix.id, "svgadrmfb");
  403. info->fix.type = FB_TYPE_PACKED_PIXELS;
  404. info->fix.visual = FB_VISUAL_TRUECOLOR;
  405. info->fix.type_aux = 0;
  406. info->fix.xpanstep = 1; /* doing it in hw */
  407. info->fix.ypanstep = 1; /* doing it in hw */
  408. info->fix.ywrapstep = 0;
  409. info->fix.accel = FB_ACCEL_NONE;
  410. info->fix.line_length = fb_pitch;
  411. info->fix.smem_start = 0;
  412. info->fix.smem_len = fb_size;
  413. info->pseudo_palette = par->pseudo_palette;
  414. info->screen_base = par->vmalloc;
  415. info->screen_size = fb_size;
  416. info->flags = FBINFO_DEFAULT;
  417. info->fbops = &vmw_fb_ops;
  418. /* 24 depth per default */
  419. info->var.red.offset = 16;
  420. info->var.green.offset = 8;
  421. info->var.blue.offset = 0;
  422. info->var.red.length = 8;
  423. info->var.green.length = 8;
  424. info->var.blue.length = 8;
  425. info->var.transp.offset = 0;
  426. info->var.transp.length = 0;
  427. info->var.xres_virtual = fb_width;
  428. info->var.yres_virtual = fb_height;
  429. info->var.bits_per_pixel = par->bpp;
  430. info->var.xoffset = 0;
  431. info->var.yoffset = 0;
  432. info->var.activate = FB_ACTIVATE_NOW;
  433. info->var.height = -1;
  434. info->var.width = -1;
  435. info->var.xres = initial_width;
  436. info->var.yres = initial_height;
  437. #if 0
  438. info->pixmap.size = 64*1024;
  439. info->pixmap.buf_align = 8;
  440. info->pixmap.access_align = 32;
  441. info->pixmap.flags = FB_PIXMAP_SYSTEM;
  442. info->pixmap.scan_align = 1;
  443. #else
  444. info->pixmap.size = 0;
  445. info->pixmap.buf_align = 8;
  446. info->pixmap.access_align = 32;
  447. info->pixmap.flags = FB_PIXMAP_SYSTEM;
  448. info->pixmap.scan_align = 1;
  449. #endif
  450. info->apertures = alloc_apertures(1);
  451. if (!info->apertures) {
  452. ret = -ENOMEM;
  453. goto err_aper;
  454. }
  455. info->apertures->ranges[0].base = vmw_priv->vram_start;
  456. info->apertures->ranges[0].size = vmw_priv->vram_size;
  457. /*
  458. * Dirty & Deferred IO
  459. */
  460. par->dirty.x1 = par->dirty.x2 = 0;
  461. par->dirty.y1 = par->dirty.y2 = 0;
  462. par->dirty.active = true;
  463. spin_lock_init(&par->dirty.lock);
  464. info->fbdefio = &vmw_defio;
  465. fb_deferred_io_init(info);
  466. ret = register_framebuffer(info);
  467. if (unlikely(ret != 0))
  468. goto err_defio;
  469. return 0;
  470. err_defio:
  471. fb_deferred_io_cleanup(info);
  472. err_aper:
  473. ttm_bo_kunmap(&par->map);
  474. err_unref:
  475. ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
  476. err_free:
  477. vfree(par->vmalloc);
  478. framebuffer_release(info);
  479. vmw_priv->fb_info = NULL;
  480. return ret;
  481. }
  482. int vmw_fb_close(struct vmw_private *vmw_priv)
  483. {
  484. struct fb_info *info;
  485. struct vmw_fb_par *par;
  486. struct ttm_buffer_object *bo;
  487. if (!vmw_priv->fb_info)
  488. return 0;
  489. info = vmw_priv->fb_info;
  490. par = info->par;
  491. bo = &par->vmw_bo->base;
  492. par->vmw_bo = NULL;
  493. /* ??? order */
  494. fb_deferred_io_cleanup(info);
  495. unregister_framebuffer(info);
  496. ttm_bo_kunmap(&par->map);
  497. ttm_bo_unref(&bo);
  498. vfree(par->vmalloc);
  499. framebuffer_release(info);
  500. return 0;
  501. }
  502. int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
  503. struct vmw_dma_buffer *vmw_bo)
  504. {
  505. struct ttm_buffer_object *bo = &vmw_bo->base;
  506. int ret = 0;
  507. ret = ttm_bo_reserve(bo, false, false, false, 0);
  508. if (unlikely(ret != 0))
  509. return ret;
  510. ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
  511. ttm_bo_unreserve(bo);
  512. return ret;
  513. }
  514. int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
  515. struct vmw_dma_buffer *vmw_bo)
  516. {
  517. struct ttm_buffer_object *bo = &vmw_bo->base;
  518. struct ttm_placement ne_placement = vmw_vram_ne_placement;
  519. int ret = 0;
  520. ne_placement.lpfn = bo->num_pages;
  521. /* interuptable? */
  522. ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
  523. if (unlikely(ret != 0))
  524. return ret;
  525. ret = ttm_bo_reserve(bo, false, false, false, 0);
  526. if (unlikely(ret != 0))
  527. goto err_unlock;
  528. if (bo->mem.mem_type == TTM_PL_VRAM &&
  529. bo->mem.start < bo->num_pages &&
  530. bo->mem.start > 0)
  531. (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
  532. false, false);
  533. ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
  534. /* Could probably bug on */
  535. WARN_ON(bo->offset != 0);
  536. ttm_bo_unreserve(bo);
  537. err_unlock:
  538. ttm_write_unlock(&vmw_priv->active_master->lock);
  539. return ret;
  540. }
  541. int vmw_fb_off(struct vmw_private *vmw_priv)
  542. {
  543. struct fb_info *info;
  544. struct vmw_fb_par *par;
  545. unsigned long flags;
  546. if (!vmw_priv->fb_info)
  547. return -EINVAL;
  548. info = vmw_priv->fb_info;
  549. par = info->par;
  550. spin_lock_irqsave(&par->dirty.lock, flags);
  551. par->dirty.active = false;
  552. spin_unlock_irqrestore(&par->dirty.lock, flags);
  553. flush_delayed_work_sync(&info->deferred_work);
  554. par->bo_ptr = NULL;
  555. ttm_bo_kunmap(&par->map);
  556. vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
  557. return 0;
  558. }
  559. int vmw_fb_on(struct vmw_private *vmw_priv)
  560. {
  561. struct fb_info *info;
  562. struct vmw_fb_par *par;
  563. unsigned long flags;
  564. bool dummy;
  565. int ret;
  566. if (!vmw_priv->fb_info)
  567. return -EINVAL;
  568. info = vmw_priv->fb_info;
  569. par = info->par;
  570. /* we are already active */
  571. if (par->bo_ptr != NULL)
  572. return 0;
  573. /* Make sure that all overlays are stoped when we take over */
  574. vmw_overlay_stop_all(vmw_priv);
  575. ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
  576. if (unlikely(ret != 0)) {
  577. DRM_ERROR("could not move buffer to start of VRAM\n");
  578. goto err_no_buffer;
  579. }
  580. ret = ttm_bo_kmap(&par->vmw_bo->base,
  581. 0,
  582. par->vmw_bo->base.num_pages,
  583. &par->map);
  584. BUG_ON(ret != 0);
  585. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
  586. spin_lock_irqsave(&par->dirty.lock, flags);
  587. par->dirty.active = true;
  588. spin_unlock_irqrestore(&par->dirty.lock, flags);
  589. err_no_buffer:
  590. vmw_fb_set_par(info);
  591. vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
  592. /* If there already was stuff dirty we wont
  593. * schedule a new work, so lets do it now */
  594. schedule_delayed_work(&info->deferred_work, 0);
  595. return 0;
  596. }