fb_defio.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * linux/drivers/video/fb_defio.c
  3. *
  4. * Copyright (C) 2006 Jaya Kumar
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/string.h>
  14. #include <linux/mm.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/delay.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/fb.h>
  20. #include <linux/list.h>
  21. /* to support deferred IO */
  22. #include <linux/rmap.h>
  23. #include <linux/pagemap.h>
  24. /* this is to find and return the vmalloc-ed fb pages */
  25. static int fb_deferred_io_fault(struct vm_area_struct *vma,
  26. struct vm_fault *vmf)
  27. {
  28. unsigned long offset;
  29. struct page *page;
  30. struct fb_info *info = vma->vm_private_data;
  31. /* info->screen_base is virtual memory */
  32. void *screen_base = (void __force *) info->screen_base;
  33. offset = vmf->pgoff << PAGE_SHIFT;
  34. if (offset >= info->fix.smem_len)
  35. return VM_FAULT_SIGBUS;
  36. page = vmalloc_to_page(screen_base + offset);
  37. if (!page)
  38. return VM_FAULT_SIGBUS;
  39. get_page(page);
  40. if (vma->vm_file)
  41. page->mapping = vma->vm_file->f_mapping;
  42. else
  43. printk(KERN_ERR "no mapping available\n");
  44. BUG_ON(!page->mapping);
  45. page->index = vmf->pgoff;
  46. vmf->page = page;
  47. return 0;
  48. }
  49. int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
  50. {
  51. struct fb_info *info = file->private_data;
  52. /* Kill off the delayed work */
  53. cancel_rearming_delayed_work(&info->deferred_work);
  54. /* Run it immediately */
  55. return schedule_delayed_work(&info->deferred_work, 0);
  56. }
  57. EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
  58. /* vm_ops->page_mkwrite handler */
  59. static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
  60. struct page *page)
  61. {
  62. struct fb_info *info = vma->vm_private_data;
  63. struct fb_deferred_io *fbdefio = info->fbdefio;
  64. /* this is a callback we get when userspace first tries to
  65. write to the page. we schedule a workqueue. that workqueue
  66. will eventually mkclean the touched pages and execute the
  67. deferred framebuffer IO. then if userspace touches a page
  68. again, we repeat the same scheme */
  69. /* protect against the workqueue changing the page list */
  70. mutex_lock(&fbdefio->lock);
  71. list_add(&page->lru, &fbdefio->pagelist);
  72. mutex_unlock(&fbdefio->lock);
  73. /* come back after delay to process the deferred IO */
  74. schedule_delayed_work(&info->deferred_work, fbdefio->delay);
  75. return 0;
  76. }
  77. static struct vm_operations_struct fb_deferred_io_vm_ops = {
  78. .fault = fb_deferred_io_fault,
  79. .page_mkwrite = fb_deferred_io_mkwrite,
  80. };
  81. static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
  82. {
  83. vma->vm_ops = &fb_deferred_io_vm_ops;
  84. vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND );
  85. vma->vm_private_data = info;
  86. return 0;
  87. }
  88. /* workqueue callback */
  89. static void fb_deferred_io_work(struct work_struct *work)
  90. {
  91. struct fb_info *info = container_of(work, struct fb_info,
  92. deferred_work.work);
  93. struct list_head *node, *next;
  94. struct page *cur;
  95. struct fb_deferred_io *fbdefio = info->fbdefio;
  96. /* here we mkclean the pages, then do all deferred IO */
  97. mutex_lock(&fbdefio->lock);
  98. list_for_each_entry(cur, &fbdefio->pagelist, lru) {
  99. lock_page(cur);
  100. page_mkclean(cur);
  101. unlock_page(cur);
  102. }
  103. /* driver's callback with pagelist */
  104. fbdefio->deferred_io(info, &fbdefio->pagelist);
  105. /* clear the list */
  106. list_for_each_safe(node, next, &fbdefio->pagelist) {
  107. list_del(node);
  108. }
  109. mutex_unlock(&fbdefio->lock);
  110. }
  111. void fb_deferred_io_init(struct fb_info *info)
  112. {
  113. struct fb_deferred_io *fbdefio = info->fbdefio;
  114. BUG_ON(!fbdefio);
  115. mutex_init(&fbdefio->lock);
  116. info->fbops->fb_mmap = fb_deferred_io_mmap;
  117. INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
  118. INIT_LIST_HEAD(&fbdefio->pagelist);
  119. if (fbdefio->delay == 0) /* set a default of 1 s */
  120. fbdefio->delay = HZ;
  121. }
  122. EXPORT_SYMBOL_GPL(fb_deferred_io_init);
  123. void fb_deferred_io_cleanup(struct fb_info *info)
  124. {
  125. void *screen_base = (void __force *) info->screen_base;
  126. struct fb_deferred_io *fbdefio = info->fbdefio;
  127. struct page *page;
  128. int i;
  129. BUG_ON(!fbdefio);
  130. cancel_delayed_work(&info->deferred_work);
  131. flush_scheduled_work();
  132. /* clear out the mapping that we setup */
  133. for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
  134. page = vmalloc_to_page(screen_base + i);
  135. page->mapping = NULL;
  136. }
  137. }
  138. EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
  139. MODULE_LICENSE("GPL");