filemap_xip.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * linux/mm/filemap_xip.c
  3. *
  4. * Copyright (C) 2005 IBM Corporation
  5. * Author: Carsten Otte <cotte@de.ibm.com>
  6. *
  7. * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
  8. *
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/module.h>
  13. #include <linux/uio.h>
  14. #include <linux/rmap.h>
  15. #include <linux/sched.h>
  16. #include <asm/tlbflush.h>
  17. /*
  18. * We do use our own empty page to avoid interference with other users
  19. * of ZERO_PAGE(), such as /dev/zero
  20. */
  21. static struct page *__xip_sparse_page;
  22. static struct page *xip_sparse_page(void)
  23. {
  24. if (!__xip_sparse_page) {
  25. struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
  26. if (page) {
  27. static DEFINE_SPINLOCK(xip_alloc_lock);
  28. spin_lock(&xip_alloc_lock);
  29. if (!__xip_sparse_page)
  30. __xip_sparse_page = page;
  31. else
  32. __free_page(page);
  33. spin_unlock(&xip_alloc_lock);
  34. }
  35. }
  36. return __xip_sparse_page;
  37. }
  38. /*
  39. * This is a file read routine for execute in place files, and uses
  40. * the mapping->a_ops->get_xip_page() function for the actual low-level
  41. * stuff.
  42. *
  43. * Note the struct file* is not used at all. It may be NULL.
  44. */
  45. static void
  46. do_xip_mapping_read(struct address_space *mapping,
  47. struct file_ra_state *_ra,
  48. struct file *filp,
  49. loff_t *ppos,
  50. read_descriptor_t *desc,
  51. read_actor_t actor)
  52. {
  53. struct inode *inode = mapping->host;
  54. pgoff_t index, end_index;
  55. unsigned long offset;
  56. loff_t isize;
  57. BUG_ON(!mapping->a_ops->get_xip_page);
  58. index = *ppos >> PAGE_CACHE_SHIFT;
  59. offset = *ppos & ~PAGE_CACHE_MASK;
  60. isize = i_size_read(inode);
  61. if (!isize)
  62. goto out;
  63. end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
  64. for (;;) {
  65. struct page *page;
  66. unsigned long nr, ret;
  67. /* nr is the maximum number of bytes to copy from this page */
  68. nr = PAGE_CACHE_SIZE;
  69. if (index >= end_index) {
  70. if (index > end_index)
  71. goto out;
  72. nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
  73. if (nr <= offset) {
  74. goto out;
  75. }
  76. }
  77. nr = nr - offset;
  78. page = mapping->a_ops->get_xip_page(mapping,
  79. index*(PAGE_SIZE/512), 0);
  80. if (!page)
  81. goto no_xip_page;
  82. if (unlikely(IS_ERR(page))) {
  83. if (PTR_ERR(page) == -ENODATA) {
  84. /* sparse */
  85. page = ZERO_PAGE(0);
  86. } else {
  87. desc->error = PTR_ERR(page);
  88. goto out;
  89. }
  90. }
  91. /* If users can be writing to this page using arbitrary
  92. * virtual addresses, take care about potential aliasing
  93. * before reading the page on the kernel side.
  94. */
  95. if (mapping_writably_mapped(mapping))
  96. flush_dcache_page(page);
  97. /*
  98. * Ok, we have the page, so now we can copy it to user space...
  99. *
  100. * The actor routine returns how many bytes were actually used..
  101. * NOTE! This may not be the same as how much of a user buffer
  102. * we filled up (we may be padding etc), so we can only update
  103. * "pos" here (the actor routine has to update the user buffer
  104. * pointers and the remaining count).
  105. */
  106. ret = actor(desc, page, offset, nr);
  107. offset += ret;
  108. index += offset >> PAGE_CACHE_SHIFT;
  109. offset &= ~PAGE_CACHE_MASK;
  110. if (ret == nr && desc->count)
  111. continue;
  112. goto out;
  113. no_xip_page:
  114. /* Did not get the page. Report it */
  115. desc->error = -EIO;
  116. goto out;
  117. }
  118. out:
  119. *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
  120. if (filp)
  121. file_accessed(filp);
  122. }
  123. ssize_t
  124. xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  125. {
  126. read_descriptor_t desc;
  127. if (!access_ok(VERIFY_WRITE, buf, len))
  128. return -EFAULT;
  129. desc.written = 0;
  130. desc.arg.buf = buf;
  131. desc.count = len;
  132. desc.error = 0;
  133. do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
  134. ppos, &desc, file_read_actor);
  135. if (desc.written)
  136. return desc.written;
  137. else
  138. return desc.error;
  139. }
  140. EXPORT_SYMBOL_GPL(xip_file_read);
  141. /*
  142. * __xip_unmap is invoked from xip_unmap and
  143. * xip_write
  144. *
  145. * This function walks all vmas of the address_space and unmaps the
  146. * __xip_sparse_page when found at pgoff.
  147. */
  148. static void
  149. __xip_unmap (struct address_space * mapping,
  150. unsigned long pgoff)
  151. {
  152. struct vm_area_struct *vma;
  153. struct mm_struct *mm;
  154. struct prio_tree_iter iter;
  155. unsigned long address;
  156. pte_t *pte;
  157. pte_t pteval;
  158. spinlock_t *ptl;
  159. struct page *page;
  160. page = __xip_sparse_page;
  161. if (!page)
  162. return;
  163. spin_lock(&mapping->i_mmap_lock);
  164. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  165. mm = vma->vm_mm;
  166. address = vma->vm_start +
  167. ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  168. BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  169. pte = page_check_address(page, mm, address, &ptl);
  170. if (pte) {
  171. /* Nuke the page table entry. */
  172. flush_cache_page(vma, address, pte_pfn(*pte));
  173. pteval = ptep_clear_flush(vma, address, pte);
  174. page_remove_rmap(page, vma);
  175. dec_mm_counter(mm, file_rss);
  176. BUG_ON(pte_dirty(pteval));
  177. pte_unmap_unlock(pte, ptl);
  178. page_cache_release(page);
  179. }
  180. }
  181. spin_unlock(&mapping->i_mmap_lock);
  182. }
  183. /*
  184. * xip_fault() is invoked via the vma operations vector for a
  185. * mapped memory region to read in file data during a page fault.
  186. *
  187. * This function is derived from filemap_fault, but used for execute in place
  188. */
  189. static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
  190. {
  191. struct file *file = area->vm_file;
  192. struct address_space *mapping = file->f_mapping;
  193. struct inode *inode = mapping->host;
  194. struct page *page;
  195. pgoff_t size;
  196. /* XXX: are VM_FAULT_ codes OK? */
  197. size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  198. if (vmf->pgoff >= size)
  199. return VM_FAULT_SIGBUS;
  200. page = mapping->a_ops->get_xip_page(mapping,
  201. vmf->pgoff*(PAGE_SIZE/512), 0);
  202. if (!IS_ERR(page))
  203. goto out;
  204. if (PTR_ERR(page) != -ENODATA)
  205. return VM_FAULT_OOM;
  206. /* sparse block */
  207. if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
  208. (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
  209. (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
  210. /* maybe shared writable, allocate new block */
  211. page = mapping->a_ops->get_xip_page(mapping,
  212. vmf->pgoff*(PAGE_SIZE/512), 1);
  213. if (IS_ERR(page))
  214. return VM_FAULT_SIGBUS;
  215. /* unmap page at pgoff from all other vmas */
  216. __xip_unmap(mapping, vmf->pgoff);
  217. } else {
  218. /* not shared and writable, use xip_sparse_page() */
  219. page = xip_sparse_page();
  220. if (!page)
  221. return VM_FAULT_OOM;
  222. }
  223. out:
  224. page_cache_get(page);
  225. vmf->page = page;
  226. return 0;
  227. }
  228. static struct vm_operations_struct xip_file_vm_ops = {
  229. .fault = xip_file_fault,
  230. };
  231. int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
  232. {
  233. BUG_ON(!file->f_mapping->a_ops->get_xip_page);
  234. file_accessed(file);
  235. vma->vm_ops = &xip_file_vm_ops;
  236. vma->vm_flags |= VM_CAN_NONLINEAR;
  237. return 0;
  238. }
  239. EXPORT_SYMBOL_GPL(xip_file_mmap);
  240. static ssize_t
  241. __xip_file_write(struct file *filp, const char __user *buf,
  242. size_t count, loff_t pos, loff_t *ppos)
  243. {
  244. struct address_space * mapping = filp->f_mapping;
  245. const struct address_space_operations *a_ops = mapping->a_ops;
  246. struct inode *inode = mapping->host;
  247. long status = 0;
  248. struct page *page;
  249. size_t bytes;
  250. ssize_t written = 0;
  251. BUG_ON(!mapping->a_ops->get_xip_page);
  252. do {
  253. unsigned long index;
  254. unsigned long offset;
  255. size_t copied;
  256. char *kaddr;
  257. offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
  258. index = pos >> PAGE_CACHE_SHIFT;
  259. bytes = PAGE_CACHE_SIZE - offset;
  260. if (bytes > count)
  261. bytes = count;
  262. page = a_ops->get_xip_page(mapping,
  263. index*(PAGE_SIZE/512), 0);
  264. if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
  265. /* we allocate a new page unmap it */
  266. page = a_ops->get_xip_page(mapping,
  267. index*(PAGE_SIZE/512), 1);
  268. if (!IS_ERR(page))
  269. /* unmap page at pgoff from all other vmas */
  270. __xip_unmap(mapping, index);
  271. }
  272. if (IS_ERR(page)) {
  273. status = PTR_ERR(page);
  274. break;
  275. }
  276. fault_in_pages_readable(buf, bytes);
  277. kaddr = kmap_atomic(page, KM_USER0);
  278. copied = bytes -
  279. __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
  280. kunmap_atomic(kaddr, KM_USER0);
  281. flush_dcache_page(page);
  282. if (likely(copied > 0)) {
  283. status = copied;
  284. if (status >= 0) {
  285. written += status;
  286. count -= status;
  287. pos += status;
  288. buf += status;
  289. }
  290. }
  291. if (unlikely(copied != bytes))
  292. if (status >= 0)
  293. status = -EFAULT;
  294. if (status < 0)
  295. break;
  296. } while (count);
  297. *ppos = pos;
  298. /*
  299. * No need to use i_size_read() here, the i_size
  300. * cannot change under us because we hold i_mutex.
  301. */
  302. if (pos > inode->i_size) {
  303. i_size_write(inode, pos);
  304. mark_inode_dirty(inode);
  305. }
  306. return written ? written : status;
  307. }
  308. ssize_t
  309. xip_file_write(struct file *filp, const char __user *buf, size_t len,
  310. loff_t *ppos)
  311. {
  312. struct address_space *mapping = filp->f_mapping;
  313. struct inode *inode = mapping->host;
  314. size_t count;
  315. loff_t pos;
  316. ssize_t ret;
  317. mutex_lock(&inode->i_mutex);
  318. if (!access_ok(VERIFY_READ, buf, len)) {
  319. ret=-EFAULT;
  320. goto out_up;
  321. }
  322. pos = *ppos;
  323. count = len;
  324. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  325. /* We can write back this queue in page reclaim */
  326. current->backing_dev_info = mapping->backing_dev_info;
  327. ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
  328. if (ret)
  329. goto out_backing;
  330. if (count == 0)
  331. goto out_backing;
  332. ret = remove_suid(filp->f_path.dentry);
  333. if (ret)
  334. goto out_backing;
  335. file_update_time(filp);
  336. ret = __xip_file_write (filp, buf, count, pos, ppos);
  337. out_backing:
  338. current->backing_dev_info = NULL;
  339. out_up:
  340. mutex_unlock(&inode->i_mutex);
  341. return ret;
  342. }
  343. EXPORT_SYMBOL_GPL(xip_file_write);
  344. /*
  345. * truncate a page used for execute in place
  346. * functionality is analog to block_truncate_page but does use get_xip_page
  347. * to get the page instead of page cache
  348. */
  349. int
  350. xip_truncate_page(struct address_space *mapping, loff_t from)
  351. {
  352. pgoff_t index = from >> PAGE_CACHE_SHIFT;
  353. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  354. unsigned blocksize;
  355. unsigned length;
  356. struct page *page;
  357. BUG_ON(!mapping->a_ops->get_xip_page);
  358. blocksize = 1 << mapping->host->i_blkbits;
  359. length = offset & (blocksize - 1);
  360. /* Block boundary? Nothing to do */
  361. if (!length)
  362. return 0;
  363. length = blocksize - length;
  364. page = mapping->a_ops->get_xip_page(mapping,
  365. index*(PAGE_SIZE/512), 0);
  366. if (!page)
  367. return -ENOMEM;
  368. if (unlikely(IS_ERR(page))) {
  369. if (PTR_ERR(page) == -ENODATA)
  370. /* Hole? No need to truncate */
  371. return 0;
  372. else
  373. return PTR_ERR(page);
  374. }
  375. zero_user(page, offset, length);
  376. return 0;
  377. }
  378. EXPORT_SYMBOL_GPL(xip_truncate_page);