qxl_fb.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /*
  2. * Copyright © 2013 Red Hat
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * David Airlie
  25. */
  26. #include <linux/module.h>
  27. #include <linux/fb.h>
  28. #include "drmP.h"
  29. #include "drm/drm.h"
  30. #include "drm/drm_crtc.h"
  31. #include "drm/drm_crtc_helper.h"
  32. #include "qxl_drv.h"
  33. #include "qxl_object.h"
  34. #include "drm_fb_helper.h"
  35. #define QXL_DIRTY_DELAY (HZ / 30)
  36. #define QXL_FB_OP_FILLRECT 1
  37. #define QXL_FB_OP_COPYAREA 2
  38. #define QXL_FB_OP_IMAGEBLIT 3
  39. struct qxl_fb_op {
  40. struct list_head head;
  41. int op_type;
  42. union {
  43. struct fb_fillrect fr;
  44. struct fb_copyarea ca;
  45. struct fb_image ib;
  46. } op;
  47. void *img_data;
  48. };
  49. struct qxl_fbdev {
  50. struct drm_fb_helper helper;
  51. struct qxl_framebuffer qfb;
  52. struct list_head fbdev_list;
  53. struct qxl_device *qdev;
  54. spinlock_t delayed_ops_lock;
  55. struct list_head delayed_ops;
  56. void *shadow;
  57. int size;
  58. /* dirty memory logging */
  59. struct {
  60. spinlock_t lock;
  61. bool active;
  62. unsigned x1;
  63. unsigned y1;
  64. unsigned x2;
  65. unsigned y2;
  66. } dirty;
  67. };
  68. static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
  69. struct qxl_device *qdev, struct fb_info *info,
  70. const struct fb_image *image)
  71. {
  72. qxl_fb_image->qdev = qdev;
  73. if (info) {
  74. qxl_fb_image->visual = info->fix.visual;
  75. if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
  76. qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
  77. memcpy(&qxl_fb_image->pseudo_palette,
  78. info->pseudo_palette,
  79. sizeof(qxl_fb_image->pseudo_palette));
  80. } else {
  81. /* fallback */
  82. if (image->depth == 1)
  83. qxl_fb_image->visual = FB_VISUAL_MONO10;
  84. else
  85. qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
  86. }
  87. if (image) {
  88. memcpy(&qxl_fb_image->fb_image, image,
  89. sizeof(qxl_fb_image->fb_image));
  90. }
  91. }
  92. static void qxl_fb_dirty_flush(struct fb_info *info)
  93. {
  94. struct qxl_fbdev *qfbdev = info->par;
  95. struct qxl_device *qdev = qfbdev->qdev;
  96. struct qxl_fb_image qxl_fb_image;
  97. struct fb_image *image = &qxl_fb_image.fb_image;
  98. u32 x1, x2, y1, y2;
  99. /* TODO: hard coding 32 bpp */
  100. int stride = qfbdev->qfb.base.pitches[0] * 4;
  101. x1 = qfbdev->dirty.x1;
  102. x2 = qfbdev->dirty.x2;
  103. y1 = qfbdev->dirty.y1;
  104. y2 = qfbdev->dirty.y2;
  105. /*
  106. * we are using a shadow draw buffer, at qdev->surface0_shadow
  107. */
  108. qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
  109. image->dx = x1;
  110. image->dy = y1;
  111. image->width = x2 - x1;
  112. image->height = y2 - y1;
  113. image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
  114. warnings */
  115. image->bg_color = 0;
  116. image->depth = 32; /* TODO: take from somewhere? */
  117. image->cmap.start = 0;
  118. image->cmap.len = 0;
  119. image->cmap.red = NULL;
  120. image->cmap.green = NULL;
  121. image->cmap.blue = NULL;
  122. image->cmap.transp = NULL;
  123. image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
  124. qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
  125. qxl_draw_opaque_fb(&qxl_fb_image, stride);
  126. qfbdev->dirty.x1 = 0;
  127. qfbdev->dirty.x2 = 0;
  128. qfbdev->dirty.y1 = 0;
  129. qfbdev->dirty.y2 = 0;
  130. }
  131. static void qxl_deferred_io(struct fb_info *info,
  132. struct list_head *pagelist)
  133. {
  134. struct qxl_fbdev *qfbdev = info->par;
  135. unsigned long start, end, min, max;
  136. struct page *page;
  137. int y1, y2;
  138. min = ULONG_MAX;
  139. max = 0;
  140. list_for_each_entry(page, pagelist, lru) {
  141. start = page->index << PAGE_SHIFT;
  142. end = start + PAGE_SIZE - 1;
  143. min = min(min, start);
  144. max = max(max, end);
  145. }
  146. if (min < max) {
  147. y1 = min / info->fix.line_length;
  148. y2 = (max / info->fix.line_length) + 1;
  149. /* TODO: add spin lock? */
  150. /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
  151. qfbdev->dirty.x1 = 0;
  152. qfbdev->dirty.y1 = y1;
  153. qfbdev->dirty.x2 = info->var.xres;
  154. qfbdev->dirty.y2 = y2;
  155. /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
  156. }
  157. qxl_fb_dirty_flush(info);
  158. };
  159. static struct fb_deferred_io qxl_defio = {
  160. .delay = QXL_DIRTY_DELAY,
  161. .deferred_io = qxl_deferred_io,
  162. };
  163. static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
  164. const struct fb_fillrect *fb_rect)
  165. {
  166. struct qxl_fb_op *op;
  167. unsigned long flags;
  168. op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
  169. if (!op)
  170. return;
  171. op->op.fr = *fb_rect;
  172. op->img_data = NULL;
  173. op->op_type = QXL_FB_OP_FILLRECT;
  174. spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
  175. list_add_tail(&op->head, &qfbdev->delayed_ops);
  176. spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
  177. }
  178. static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
  179. const struct fb_copyarea *fb_copy)
  180. {
  181. struct qxl_fb_op *op;
  182. unsigned long flags;
  183. op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
  184. if (!op)
  185. return;
  186. op->op.ca = *fb_copy;
  187. op->img_data = NULL;
  188. op->op_type = QXL_FB_OP_COPYAREA;
  189. spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
  190. list_add_tail(&op->head, &qfbdev->delayed_ops);
  191. spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
  192. }
  193. static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
  194. const struct fb_image *fb_image)
  195. {
  196. struct qxl_fb_op *op;
  197. unsigned long flags;
  198. uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
  199. op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
  200. if (!op)
  201. return;
  202. op->op.ib = *fb_image;
  203. op->img_data = (void *)(op + 1);
  204. op->op_type = QXL_FB_OP_IMAGEBLIT;
  205. memcpy(op->img_data, fb_image->data, size);
  206. op->op.ib.data = op->img_data;
  207. spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
  208. list_add_tail(&op->head, &qfbdev->delayed_ops);
  209. spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
  210. }
  211. static void qxl_fb_fillrect_internal(struct fb_info *info,
  212. const struct fb_fillrect *fb_rect)
  213. {
  214. struct qxl_fbdev *qfbdev = info->par;
  215. struct qxl_device *qdev = qfbdev->qdev;
  216. struct qxl_rect rect;
  217. uint32_t color;
  218. int x = fb_rect->dx;
  219. int y = fb_rect->dy;
  220. int width = fb_rect->width;
  221. int height = fb_rect->height;
  222. uint16_t rop;
  223. struct qxl_draw_fill qxl_draw_fill_rec;
  224. if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
  225. info->fix.visual == FB_VISUAL_DIRECTCOLOR)
  226. color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
  227. else
  228. color = fb_rect->color;
  229. rect.left = x;
  230. rect.right = x + width;
  231. rect.top = y;
  232. rect.bottom = y + height;
  233. switch (fb_rect->rop) {
  234. case ROP_XOR:
  235. rop = SPICE_ROPD_OP_XOR;
  236. break;
  237. case ROP_COPY:
  238. rop = SPICE_ROPD_OP_PUT;
  239. break;
  240. default:
  241. pr_err("qxl_fb_fillrect(): unknown rop, "
  242. "defaulting to SPICE_ROPD_OP_PUT\n");
  243. rop = SPICE_ROPD_OP_PUT;
  244. }
  245. qxl_draw_fill_rec.qdev = qdev;
  246. qxl_draw_fill_rec.rect = rect;
  247. qxl_draw_fill_rec.color = color;
  248. qxl_draw_fill_rec.rop = rop;
  249. qxl_draw_fill(&qxl_draw_fill_rec);
  250. }
  251. static void qxl_fb_fillrect(struct fb_info *info,
  252. const struct fb_fillrect *fb_rect)
  253. {
  254. struct qxl_fbdev *qfbdev = info->par;
  255. struct qxl_device *qdev = qfbdev->qdev;
  256. if (!drm_can_sleep()) {
  257. qxl_fb_delayed_fillrect(qfbdev, fb_rect);
  258. schedule_work(&qdev->fb_work);
  259. return;
  260. }
  261. /* make sure any previous work is done */
  262. flush_work(&qdev->fb_work);
  263. qxl_fb_fillrect_internal(info, fb_rect);
  264. }
  265. static void qxl_fb_copyarea_internal(struct fb_info *info,
  266. const struct fb_copyarea *region)
  267. {
  268. struct qxl_fbdev *qfbdev = info->par;
  269. qxl_draw_copyarea(qfbdev->qdev,
  270. region->width, region->height,
  271. region->sx, region->sy,
  272. region->dx, region->dy);
  273. }
  274. static void qxl_fb_copyarea(struct fb_info *info,
  275. const struct fb_copyarea *region)
  276. {
  277. struct qxl_fbdev *qfbdev = info->par;
  278. struct qxl_device *qdev = qfbdev->qdev;
  279. if (!drm_can_sleep()) {
  280. qxl_fb_delayed_copyarea(qfbdev, region);
  281. schedule_work(&qdev->fb_work);
  282. return;
  283. }
  284. /* make sure any previous work is done */
  285. flush_work(&qdev->fb_work);
  286. qxl_fb_copyarea_internal(info, region);
  287. }
  288. static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
  289. {
  290. qxl_draw_opaque_fb(qxl_fb_image, 0);
  291. }
  292. static void qxl_fb_imageblit_internal(struct fb_info *info,
  293. const struct fb_image *image)
  294. {
  295. struct qxl_fbdev *qfbdev = info->par;
  296. struct qxl_fb_image qxl_fb_image;
  297. /* ensure proper order rendering operations - TODO: must do this
  298. * for everything. */
  299. qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
  300. qxl_fb_imageblit_safe(&qxl_fb_image);
  301. }
  302. static void qxl_fb_imageblit(struct fb_info *info,
  303. const struct fb_image *image)
  304. {
  305. struct qxl_fbdev *qfbdev = info->par;
  306. struct qxl_device *qdev = qfbdev->qdev;
  307. if (!drm_can_sleep()) {
  308. qxl_fb_delayed_imageblit(qfbdev, image);
  309. schedule_work(&qdev->fb_work);
  310. return;
  311. }
  312. /* make sure any previous work is done */
  313. flush_work(&qdev->fb_work);
  314. qxl_fb_imageblit_internal(info, image);
  315. }
  316. static void qxl_fb_work(struct work_struct *work)
  317. {
  318. struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
  319. unsigned long flags;
  320. struct qxl_fb_op *entry, *tmp;
  321. struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
  322. /* since the irq context just adds entries to the end of the
  323. list dropping the lock should be fine, as entry isn't modified
  324. in the operation code */
  325. spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
  326. list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
  327. spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
  328. switch (entry->op_type) {
  329. case QXL_FB_OP_FILLRECT:
  330. qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
  331. break;
  332. case QXL_FB_OP_COPYAREA:
  333. qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
  334. break;
  335. case QXL_FB_OP_IMAGEBLIT:
  336. qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
  337. break;
  338. }
  339. spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
  340. list_del(&entry->head);
  341. kfree(entry);
  342. }
  343. spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
  344. }
  345. int qxl_fb_init(struct qxl_device *qdev)
  346. {
  347. INIT_WORK(&qdev->fb_work, qxl_fb_work);
  348. return 0;
  349. }
  350. static struct fb_ops qxlfb_ops = {
  351. .owner = THIS_MODULE,
  352. .fb_check_var = drm_fb_helper_check_var,
  353. .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
  354. .fb_fillrect = qxl_fb_fillrect,
  355. .fb_copyarea = qxl_fb_copyarea,
  356. .fb_imageblit = qxl_fb_imageblit,
  357. .fb_pan_display = drm_fb_helper_pan_display,
  358. .fb_blank = drm_fb_helper_blank,
  359. .fb_setcmap = drm_fb_helper_setcmap,
  360. .fb_debug_enter = drm_fb_helper_debug_enter,
  361. .fb_debug_leave = drm_fb_helper_debug_leave,
  362. };
  363. static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
  364. {
  365. struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
  366. int ret;
  367. ret = qxl_bo_reserve(qbo, false);
  368. if (likely(ret == 0)) {
  369. qxl_bo_kunmap(qbo);
  370. qxl_bo_unpin(qbo);
  371. qxl_bo_unreserve(qbo);
  372. }
  373. drm_gem_object_unreference_unlocked(gobj);
  374. }
  375. int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
  376. struct drm_file *file_priv,
  377. uint32_t *handle)
  378. {
  379. int r;
  380. struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
  381. BUG_ON(!gobj);
  382. /* drm_get_handle_create adds a reference - good */
  383. r = drm_gem_handle_create(file_priv, gobj, handle);
  384. if (r)
  385. return r;
  386. return 0;
  387. }
  388. static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
  389. struct drm_mode_fb_cmd2 *mode_cmd,
  390. struct drm_gem_object **gobj_p)
  391. {
  392. struct qxl_device *qdev = qfbdev->qdev;
  393. struct drm_gem_object *gobj = NULL;
  394. struct qxl_bo *qbo = NULL;
  395. int ret;
  396. int aligned_size, size;
  397. int height = mode_cmd->height;
  398. int bpp;
  399. int depth;
  400. drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
  401. size = mode_cmd->pitches[0] * height;
  402. aligned_size = ALIGN(size, PAGE_SIZE);
  403. /* TODO: unallocate and reallocate surface0 for real. Hack to just
  404. * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
  405. ret = qxl_gem_object_create(qdev, aligned_size, 0,
  406. QXL_GEM_DOMAIN_SURFACE,
  407. false, /* is discardable */
  408. false, /* is kernel (false means device) */
  409. NULL,
  410. &gobj);
  411. if (ret) {
  412. pr_err("failed to allocate framebuffer (%d)\n",
  413. aligned_size);
  414. return -ENOMEM;
  415. }
  416. qbo = gem_to_qxl_bo(gobj);
  417. qbo->surf.width = mode_cmd->width;
  418. qbo->surf.height = mode_cmd->height;
  419. qbo->surf.stride = mode_cmd->pitches[0];
  420. qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
  421. ret = qxl_bo_reserve(qbo, false);
  422. if (unlikely(ret != 0))
  423. goto out_unref;
  424. ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
  425. if (ret) {
  426. qxl_bo_unreserve(qbo);
  427. goto out_unref;
  428. }
  429. ret = qxl_bo_kmap(qbo, NULL);
  430. qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
  431. if (ret)
  432. goto out_unref;
  433. *gobj_p = gobj;
  434. return 0;
  435. out_unref:
  436. qxlfb_destroy_pinned_object(gobj);
  437. *gobj_p = NULL;
  438. return ret;
  439. }
  440. static int qxlfb_create(struct qxl_fbdev *qfbdev,
  441. struct drm_fb_helper_surface_size *sizes)
  442. {
  443. struct qxl_device *qdev = qfbdev->qdev;
  444. struct fb_info *info;
  445. struct drm_framebuffer *fb = NULL;
  446. struct drm_mode_fb_cmd2 mode_cmd;
  447. struct drm_gem_object *gobj = NULL;
  448. struct qxl_bo *qbo = NULL;
  449. struct device *device = &qdev->pdev->dev;
  450. int ret;
  451. int size;
  452. int bpp = sizes->surface_bpp;
  453. int depth = sizes->surface_depth;
  454. void *shadow;
  455. mode_cmd.width = sizes->surface_width;
  456. mode_cmd.height = sizes->surface_height;
  457. mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
  458. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  459. ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
  460. qbo = gem_to_qxl_bo(gobj);
  461. QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
  462. mode_cmd.height, mode_cmd.pitches[0]);
  463. shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
  464. /* TODO: what's the usual response to memory allocation errors? */
  465. BUG_ON(!shadow);
  466. QXL_INFO(qdev,
  467. "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
  468. qxl_bo_gpu_offset(qbo),
  469. qxl_bo_mmap_offset(qbo),
  470. qbo->kptr,
  471. shadow);
  472. size = mode_cmd.pitches[0] * mode_cmd.height;
  473. info = framebuffer_alloc(0, device);
  474. if (info == NULL) {
  475. ret = -ENOMEM;
  476. goto out_unref;
  477. }
  478. info->par = qfbdev;
  479. qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
  480. fb = &qfbdev->qfb.base;
  481. /* setup helper with fb data */
  482. qfbdev->helper.fb = fb;
  483. qfbdev->helper.fbdev = info;
  484. qfbdev->shadow = shadow;
  485. strcpy(info->fix.id, "qxldrmfb");
  486. drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
  487. info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
  488. info->fbops = &qxlfb_ops;
  489. /*
  490. * TODO: using gobj->size in various places in this function. Not sure
  491. * what the difference between the different sizes is.
  492. */
  493. info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
  494. info->fix.smem_len = gobj->size;
  495. info->screen_base = qfbdev->shadow;
  496. info->screen_size = gobj->size;
  497. drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
  498. sizes->fb_height);
  499. /* setup aperture base/size for vesafb takeover */
  500. info->apertures = alloc_apertures(1);
  501. if (!info->apertures) {
  502. ret = -ENOMEM;
  503. goto out_unref;
  504. }
  505. info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
  506. info->apertures->ranges[0].size = qdev->vram_size;
  507. info->fix.mmio_start = 0;
  508. info->fix.mmio_len = 0;
  509. if (info->screen_base == NULL) {
  510. ret = -ENOSPC;
  511. goto out_unref;
  512. }
  513. ret = fb_alloc_cmap(&info->cmap, 256, 0);
  514. if (ret) {
  515. ret = -ENOMEM;
  516. goto out_unref;
  517. }
  518. info->fbdefio = &qxl_defio;
  519. fb_deferred_io_init(info);
  520. qdev->fbdev_info = info;
  521. qdev->fbdev_qfb = &qfbdev->qfb;
  522. DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
  523. DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
  524. return 0;
  525. out_unref:
  526. if (qbo) {
  527. ret = qxl_bo_reserve(qbo, false);
  528. if (likely(ret == 0)) {
  529. qxl_bo_kunmap(qbo);
  530. qxl_bo_unpin(qbo);
  531. qxl_bo_unreserve(qbo);
  532. }
  533. }
  534. if (fb && ret) {
  535. drm_gem_object_unreference(gobj);
  536. drm_framebuffer_cleanup(fb);
  537. kfree(fb);
  538. }
  539. drm_gem_object_unreference(gobj);
  540. return ret;
  541. }
  542. static int qxl_fb_find_or_create_single(
  543. struct drm_fb_helper *helper,
  544. struct drm_fb_helper_surface_size *sizes)
  545. {
  546. struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
  547. int new_fb = 0;
  548. int ret;
  549. if (!helper->fb) {
  550. ret = qxlfb_create(qfbdev, sizes);
  551. if (ret)
  552. return ret;
  553. new_fb = 1;
  554. }
  555. return new_fb;
  556. }
  557. static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
  558. {
  559. struct fb_info *info;
  560. struct qxl_framebuffer *qfb = &qfbdev->qfb;
  561. if (qfbdev->helper.fbdev) {
  562. info = qfbdev->helper.fbdev;
  563. unregister_framebuffer(info);
  564. framebuffer_release(info);
  565. }
  566. if (qfb->obj) {
  567. qxlfb_destroy_pinned_object(qfb->obj);
  568. qfb->obj = NULL;
  569. }
  570. drm_fb_helper_fini(&qfbdev->helper);
  571. vfree(qfbdev->shadow);
  572. drm_framebuffer_cleanup(&qfb->base);
  573. return 0;
  574. }
  575. static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
  576. .fb_probe = qxl_fb_find_or_create_single,
  577. };
  578. int qxl_fbdev_init(struct qxl_device *qdev)
  579. {
  580. struct qxl_fbdev *qfbdev;
  581. int bpp_sel = 32; /* TODO: parameter from somewhere? */
  582. int ret;
  583. qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
  584. if (!qfbdev)
  585. return -ENOMEM;
  586. qfbdev->qdev = qdev;
  587. qdev->mode_info.qfbdev = qfbdev;
  588. qfbdev->helper.funcs = &qxl_fb_helper_funcs;
  589. spin_lock_init(&qfbdev->delayed_ops_lock);
  590. INIT_LIST_HEAD(&qfbdev->delayed_ops);
  591. ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
  592. qxl_num_crtc /* num_crtc - QXL supports just 1 */,
  593. QXLFB_CONN_LIMIT);
  594. if (ret) {
  595. kfree(qfbdev);
  596. return ret;
  597. }
  598. drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
  599. drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
  600. return 0;
  601. }
  602. void qxl_fbdev_fini(struct qxl_device *qdev)
  603. {
  604. if (!qdev->mode_info.qfbdev)
  605. return;
  606. qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
  607. kfree(qdev->mode_info.qfbdev);
  608. qdev->mode_info.qfbdev = NULL;
  609. }
  610. void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
  611. {
  612. fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
  613. }
  614. bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
  615. {
  616. if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
  617. return true;
  618. return false;
  619. }