|
@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
|
|
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
|
|
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
|
|
int ret;
|
|
int ret;
|
|
bool unmap = false;
|
|
bool unmap = false;
|
|
|
|
+ bool store_for_later = false;
|
|
|
|
+ int x2, y2;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
obj = mfbdev->mfb.obj;
|
|
obj = mfbdev->mfb.obj;
|
|
bo = gem_to_mga_bo(obj);
|
|
bo = gem_to_mga_bo(obj);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * try and reserve the BO, if we fail with busy
|
|
|
|
+ * then the BO is being moved and we should
|
|
|
|
+ * store up the damage until later.
|
|
|
|
+ */
|
|
ret = mgag200_bo_reserve(bo, true);
|
|
ret = mgag200_bo_reserve(bo, true);
|
|
if (ret) {
|
|
if (ret) {
|
|
- DRM_ERROR("failed to reserve fb bo\n");
|
|
|
|
|
|
+ if (ret != -EBUSY)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ store_for_later = true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ x2 = x + width - 1;
|
|
|
|
+ y2 = y + height - 1;
|
|
|
|
+ spin_lock_irqsave(&mfbdev->dirty_lock, flags);
|
|
|
|
+
|
|
|
|
+ if (mfbdev->y1 < y)
|
|
|
|
+ y = mfbdev->y1;
|
|
|
|
+ if (mfbdev->y2 > y2)
|
|
|
|
+ y2 = mfbdev->y2;
|
|
|
|
+ if (mfbdev->x1 < x)
|
|
|
|
+ x = mfbdev->x1;
|
|
|
|
+ if (mfbdev->x2 > x2)
|
|
|
|
+ x2 = mfbdev->x2;
|
|
|
|
+
|
|
|
|
+ if (store_for_later) {
|
|
|
|
+ mfbdev->x1 = x;
|
|
|
|
+ mfbdev->x2 = x2;
|
|
|
|
+ mfbdev->y1 = y;
|
|
|
|
+ mfbdev->y2 = y2;
|
|
|
|
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ mfbdev->x1 = mfbdev->y1 = INT_MAX;
|
|
|
|
+ mfbdev->x2 = mfbdev->y2 = 0;
|
|
|
|
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
|
|
|
|
+
|
|
if (!bo->kmap.virtual) {
|
|
if (!bo->kmap.virtual) {
|
|
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
|
|
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
|
|
if (ret) {
|
|
if (ret) {
|
|
@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
|
|
}
|
|
}
|
|
unmap = true;
|
|
unmap = true;
|
|
}
|
|
}
|
|
- for (i = y; i < y + height; i++) {
|
|
|
|
|
|
+ for (i = y; i <= y2; i++) {
|
|
/* assume equal stride for now */
|
|
/* assume equal stride for now */
|
|
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
|
|
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
|
|
- memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
|
|
|
|
|
|
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
|
|
|
|
|
|
}
|
|
}
|
|
if (unmap)
|
|
if (unmap)
|
|
@@ -252,6 +288,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
|
|
|
|
|
|
mdev->mfbdev = mfbdev;
|
|
mdev->mfbdev = mfbdev;
|
|
mfbdev->helper.funcs = &mga_fb_helper_funcs;
|
|
mfbdev->helper.funcs = &mga_fb_helper_funcs;
|
|
|
|
+ spin_lock_init(&mfbdev->dirty_lock);
|
|
|
|
|
|
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
|
|
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
|
|
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
|
|
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
|