|
@@ -74,6 +74,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
|
|
|
{
|
|
|
struct fb_info *info = vma->vm_private_data;
|
|
|
struct fb_deferred_io *fbdefio = info->fbdefio;
|
|
|
+ struct page *cur;
|
|
|
|
|
|
/* this is a callback we get when userspace first tries to
|
|
|
write to the page. we schedule a workqueue. that workqueue
|
|
@@ -83,7 +84,24 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
|
|
|
|
|
|
/* protect against the workqueue changing the page list */
|
|
|
mutex_lock(&fbdefio->lock);
|
|
|
- list_add(&page->lru, &fbdefio->pagelist);
|
|
|
+
|
|
|
+ /* we loop through the pagelist before adding in order
|
|
|
+ to keep the pagelist sorted */
|
|
|
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
|
|
|
+ /* this check is to catch the case where a new
|
|
|
+ process could start writing to the same page
|
|
|
+ through a new pte. this new access can cause the
|
|
|
+ mkwrite even when the original ps's pte is marked
|
|
|
+ writable */
|
|
|
+ if (unlikely(cur == page))
|
|
|
+ goto page_already_added;
|
|
|
+ else if (cur->index > page->index)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_add_tail(&page->lru, &cur->lru);
|
|
|
+
|
|
|
+page_already_added:
|
|
|
mutex_unlock(&fbdefio->lock);
|
|
|
|
|
|
/* come back after delay to process the deferred IO */
|