filemap_xip.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * linux/mm/filemap_xip.c
  3. *
  4. * Copyright (C) 2005 IBM Corporation
  5. * Author: Carsten Otte <cotte@de.ibm.com>
  6. *
  7. * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
  8. *
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/module.h>
  13. #include <linux/uio.h>
  14. #include <linux/rmap.h>
  15. #include <asm/tlbflush.h>
  16. #include "filemap.h"
  17. /*
  18. * This is a file read routine for execute in place files, and uses
  19. * the mapping->a_ops->get_xip_page() function for the actual low-level
  20. * stuff.
  21. *
  22. * Note the struct file* is not used at all. It may be NULL.
  23. */
  24. static void
  25. do_xip_mapping_read(struct address_space *mapping,
  26. struct file_ra_state *_ra,
  27. struct file *filp,
  28. loff_t *ppos,
  29. read_descriptor_t *desc,
  30. read_actor_t actor)
  31. {
  32. struct inode *inode = mapping->host;
  33. unsigned long index, end_index, offset;
  34. loff_t isize;
  35. BUG_ON(!mapping->a_ops->get_xip_page);
  36. index = *ppos >> PAGE_CACHE_SHIFT;
  37. offset = *ppos & ~PAGE_CACHE_MASK;
  38. isize = i_size_read(inode);
  39. if (!isize)
  40. goto out;
  41. end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
  42. for (;;) {
  43. struct page *page;
  44. unsigned long nr, ret;
  45. /* nr is the maximum number of bytes to copy from this page */
  46. nr = PAGE_CACHE_SIZE;
  47. if (index >= end_index) {
  48. if (index > end_index)
  49. goto out;
  50. nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
  51. if (nr <= offset) {
  52. goto out;
  53. }
  54. }
  55. nr = nr - offset;
  56. page = mapping->a_ops->get_xip_page(mapping,
  57. index*(PAGE_SIZE/512), 0);
  58. if (!page)
  59. goto no_xip_page;
  60. if (unlikely(IS_ERR(page))) {
  61. if (PTR_ERR(page) == -ENODATA) {
  62. /* sparse */
  63. page = virt_to_page(empty_zero_page);
  64. } else {
  65. desc->error = PTR_ERR(page);
  66. goto out;
  67. }
  68. } else
  69. BUG_ON(!PageUptodate(page));
  70. /* If users can be writing to this page using arbitrary
  71. * virtual addresses, take care about potential aliasing
  72. * before reading the page on the kernel side.
  73. */
  74. if (mapping_writably_mapped(mapping))
  75. flush_dcache_page(page);
  76. /*
  77. * Ok, we have the page, and it's up-to-date, so
  78. * now we can copy it to user space...
  79. *
  80. * The actor routine returns how many bytes were actually used..
  81. * NOTE! This may not be the same as how much of a user buffer
  82. * we filled up (we may be padding etc), so we can only update
  83. * "pos" here (the actor routine has to update the user buffer
  84. * pointers and the remaining count).
  85. */
  86. ret = actor(desc, page, offset, nr);
  87. offset += ret;
  88. index += offset >> PAGE_CACHE_SHIFT;
  89. offset &= ~PAGE_CACHE_MASK;
  90. if (ret == nr && desc->count)
  91. continue;
  92. goto out;
  93. no_xip_page:
  94. /* Did not get the page. Report it */
  95. desc->error = -EIO;
  96. goto out;
  97. }
  98. out:
  99. *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
  100. if (filp)
  101. file_accessed(filp);
  102. }
  103. /*
  104. * This is the "read()" routine for all filesystems
  105. * that uses the get_xip_page address space operation.
  106. */
  107. static ssize_t
  108. __xip_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
  109. unsigned long nr_segs, loff_t *ppos)
  110. {
  111. struct file *filp = iocb->ki_filp;
  112. ssize_t retval;
  113. unsigned long seg;
  114. size_t count;
  115. count = 0;
  116. for (seg = 0; seg < nr_segs; seg++) {
  117. const struct iovec *iv = &iov[seg];
  118. /*
  119. * If any segment has a negative length, or the cumulative
  120. * length ever wraps negative then return -EINVAL.
  121. */
  122. count += iv->iov_len;
  123. if (unlikely((ssize_t)(count|iv->iov_len) < 0))
  124. return -EINVAL;
  125. if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
  126. continue;
  127. if (seg == 0)
  128. return -EFAULT;
  129. nr_segs = seg;
  130. count -= iv->iov_len; /* This segment is no good */
  131. break;
  132. }
  133. retval = 0;
  134. if (count) {
  135. for (seg = 0; seg < nr_segs; seg++) {
  136. read_descriptor_t desc;
  137. desc.written = 0;
  138. desc.arg.buf = iov[seg].iov_base;
  139. desc.count = iov[seg].iov_len;
  140. if (desc.count == 0)
  141. continue;
  142. desc.error = 0;
  143. do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
  144. ppos, &desc, file_read_actor);
  145. retval += desc.written;
  146. if (!retval) {
  147. retval = desc.error;
  148. break;
  149. }
  150. }
  151. }
  152. return retval;
  153. }
  154. ssize_t
  155. xip_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count,
  156. loff_t pos)
  157. {
  158. struct iovec local_iov = { .iov_base = buf, .iov_len = count };
  159. BUG_ON(iocb->ki_pos != pos);
  160. return __xip_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
  161. }
  162. EXPORT_SYMBOL_GPL(xip_file_aio_read);
  163. ssize_t
  164. xip_file_readv(struct file *filp, const struct iovec *iov,
  165. unsigned long nr_segs, loff_t *ppos)
  166. {
  167. struct kiocb kiocb;
  168. init_sync_kiocb(&kiocb, filp);
  169. return __xip_file_aio_read(&kiocb, iov, nr_segs, ppos);
  170. }
  171. EXPORT_SYMBOL_GPL(xip_file_readv);
  172. ssize_t
  173. xip_file_sendfile(struct file *in_file, loff_t *ppos,
  174. size_t count, read_actor_t actor, void *target)
  175. {
  176. read_descriptor_t desc;
  177. if (!count)
  178. return 0;
  179. desc.written = 0;
  180. desc.count = count;
  181. desc.arg.data = target;
  182. desc.error = 0;
  183. do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file,
  184. ppos, &desc, actor);
  185. if (desc.written)
  186. return desc.written;
  187. return desc.error;
  188. }
  189. EXPORT_SYMBOL_GPL(xip_file_sendfile);
  190. /*
  191. * __xip_unmap is invoked from xip_unmap and
  192. * xip_write
  193. *
  194. * This function walks all vmas of the address_space and unmaps the
  195. * empty_zero_page when found at pgoff. Should it go in rmap.c?
  196. */
  197. static void
  198. __xip_unmap (struct address_space * mapping,
  199. unsigned long pgoff)
  200. {
  201. struct vm_area_struct *vma;
  202. struct mm_struct *mm;
  203. struct prio_tree_iter iter;
  204. unsigned long address;
  205. pte_t *pte;
  206. pte_t pteval;
  207. spin_lock(&mapping->i_mmap_lock);
  208. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  209. mm = vma->vm_mm;
  210. address = vma->vm_start +
  211. ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  212. BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  213. /*
  214. * We need the page_table_lock to protect us from page faults,
  215. * munmap, fork, etc...
  216. */
  217. pte = page_check_address(virt_to_page(empty_zero_page), mm,
  218. address);
  219. if (!IS_ERR(pte)) {
  220. /* Nuke the page table entry. */
  221. flush_cache_page(vma, address, pte_pfn(pte));
  222. pteval = ptep_clear_flush(vma, address, pte);
  223. BUG_ON(pte_dirty(pteval));
  224. pte_unmap(pte);
  225. spin_unlock(&mm->page_table_lock);
  226. }
  227. }
  228. spin_unlock(&mapping->i_mmap_lock);
  229. }
  230. /*
  231. * xip_nopage() is invoked via the vma operations vector for a
  232. * mapped memory region to read in file data during a page fault.
  233. *
  234. * This function is derived from filemap_nopage, but used for execute in place
  235. */
  236. static struct page *
  237. xip_file_nopage(struct vm_area_struct * area,
  238. unsigned long address,
  239. int *type)
  240. {
  241. struct file *file = area->vm_file;
  242. struct address_space *mapping = file->f_mapping;
  243. struct inode *inode = mapping->host;
  244. struct page *page;
  245. unsigned long size, pgoff, endoff;
  246. pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
  247. + area->vm_pgoff;
  248. endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
  249. + area->vm_pgoff;
  250. size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  251. if (pgoff >= size) {
  252. return NULL;
  253. }
  254. page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
  255. if (!IS_ERR(page)) {
  256. BUG_ON(!PageUptodate(page));
  257. return page;
  258. }
  259. if (PTR_ERR(page) != -ENODATA)
  260. return NULL;
  261. /* sparse block */
  262. if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
  263. (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
  264. (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
  265. /* maybe shared writable, allocate new block */
  266. page = mapping->a_ops->get_xip_page (mapping,
  267. pgoff*(PAGE_SIZE/512), 1);
  268. if (IS_ERR(page))
  269. return NULL;
  270. BUG_ON(!PageUptodate(page));
  271. /* unmap page at pgoff from all other vmas */
  272. __xip_unmap(mapping, pgoff);
  273. } else {
  274. /* not shared and writable, use empty_zero_page */
  275. page = virt_to_page(empty_zero_page);
  276. }
  277. return page;
  278. }
  279. static struct vm_operations_struct xip_file_vm_ops = {
  280. .nopage = xip_file_nopage,
  281. };
  282. int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
  283. {
  284. BUG_ON(!file->f_mapping->a_ops->get_xip_page);
  285. file_accessed(file);
  286. vma->vm_ops = &xip_file_vm_ops;
  287. return 0;
  288. }
  289. EXPORT_SYMBOL_GPL(xip_file_mmap);
  290. static ssize_t
  291. do_xip_file_write(struct kiocb *iocb, const struct iovec *iov,
  292. unsigned long nr_segs, loff_t pos, loff_t *ppos,
  293. size_t count)
  294. {
  295. struct file *file = iocb->ki_filp;
  296. struct address_space * mapping = file->f_mapping;
  297. struct address_space_operations *a_ops = mapping->a_ops;
  298. struct inode *inode = mapping->host;
  299. long status = 0;
  300. struct page *page;
  301. size_t bytes;
  302. const struct iovec *cur_iov = iov; /* current iovec */
  303. size_t iov_base = 0; /* offset in the current iovec */
  304. char __user *buf;
  305. ssize_t written = 0;
  306. BUG_ON(!mapping->a_ops->get_xip_page);
  307. buf = iov->iov_base;
  308. do {
  309. unsigned long index;
  310. unsigned long offset;
  311. size_t copied;
  312. offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
  313. index = pos >> PAGE_CACHE_SHIFT;
  314. bytes = PAGE_CACHE_SIZE - offset;
  315. if (bytes > count)
  316. bytes = count;
  317. /*
  318. * Bring in the user page that we will copy from _first_.
  319. * Otherwise there's a nasty deadlock on copying from the
  320. * same page as we're writing to, without it being marked
  321. * up-to-date.
  322. */
  323. fault_in_pages_readable(buf, bytes);
  324. page = a_ops->get_xip_page(mapping,
  325. index*(PAGE_SIZE/512), 0);
  326. if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
  327. /* we allocate a new page unmap it */
  328. page = a_ops->get_xip_page(mapping,
  329. index*(PAGE_SIZE/512), 1);
  330. if (!IS_ERR(page))
  331. /* unmap page at pgoff from all other vmas */
  332. __xip_unmap(mapping, index);
  333. }
  334. if (IS_ERR(page)) {
  335. status = PTR_ERR(page);
  336. break;
  337. }
  338. BUG_ON(!PageUptodate(page));
  339. if (likely(nr_segs == 1))
  340. copied = filemap_copy_from_user(page, offset,
  341. buf, bytes);
  342. else
  343. copied = filemap_copy_from_user_iovec(page, offset,
  344. cur_iov, iov_base, bytes);
  345. flush_dcache_page(page);
  346. if (likely(copied > 0)) {
  347. status = copied;
  348. if (status >= 0) {
  349. written += status;
  350. count -= status;
  351. pos += status;
  352. buf += status;
  353. if (unlikely(nr_segs > 1))
  354. filemap_set_next_iovec(&cur_iov,
  355. &iov_base, status);
  356. }
  357. }
  358. if (unlikely(copied != bytes))
  359. if (status >= 0)
  360. status = -EFAULT;
  361. if (status < 0)
  362. break;
  363. } while (count);
  364. *ppos = pos;
  365. /*
  366. * No need to use i_size_read() here, the i_size
  367. * cannot change under us because we hold i_sem.
  368. */
  369. if (pos > inode->i_size) {
  370. i_size_write(inode, pos);
  371. mark_inode_dirty(inode);
  372. }
  373. return written ? written : status;
  374. }
  375. static ssize_t
  376. xip_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
  377. unsigned long nr_segs, loff_t *ppos)
  378. {
  379. struct file *file = iocb->ki_filp;
  380. struct address_space * mapping = file->f_mapping;
  381. size_t ocount; /* original count */
  382. size_t count; /* after file limit checks */
  383. struct inode *inode = mapping->host;
  384. unsigned long seg;
  385. loff_t pos;
  386. ssize_t written;
  387. ssize_t err;
  388. ocount = 0;
  389. for (seg = 0; seg < nr_segs; seg++) {
  390. const struct iovec *iv = &iov[seg];
  391. /*
  392. * If any segment has a negative length, or the cumulative
  393. * length ever wraps negative then return -EINVAL.
  394. */
  395. ocount += iv->iov_len;
  396. if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
  397. return -EINVAL;
  398. if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
  399. continue;
  400. if (seg == 0)
  401. return -EFAULT;
  402. nr_segs = seg;
  403. ocount -= iv->iov_len; /* This segment is no good */
  404. break;
  405. }
  406. count = ocount;
  407. pos = *ppos;
  408. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  409. written = 0;
  410. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  411. if (err)
  412. goto out;
  413. if (count == 0)
  414. goto out;
  415. err = remove_suid(file->f_dentry);
  416. if (err)
  417. goto out;
  418. inode_update_time(inode, 1);
  419. /* use execute in place to copy directly to disk */
  420. written = do_xip_file_write (iocb, iov,
  421. nr_segs, pos, ppos, count);
  422. out:
  423. return written ? written : err;
  424. }
  425. static ssize_t
  426. __xip_file_write_nolock(struct file *file, const struct iovec *iov,
  427. unsigned long nr_segs, loff_t *ppos)
  428. {
  429. struct kiocb kiocb;
  430. init_sync_kiocb(&kiocb, file);
  431. return xip_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
  432. }
  433. ssize_t
  434. xip_file_aio_write(struct kiocb *iocb, const char __user *buf,
  435. size_t count, loff_t pos)
  436. {
  437. struct file *file = iocb->ki_filp;
  438. struct address_space *mapping = file->f_mapping;
  439. struct inode *inode = mapping->host;
  440. ssize_t ret;
  441. struct iovec local_iov = { .iov_base = (void __user *)buf,
  442. .iov_len = count };
  443. BUG_ON(iocb->ki_pos != pos);
  444. down(&inode->i_sem);
  445. ret = xip_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
  446. up(&inode->i_sem);
  447. return ret;
  448. }
  449. EXPORT_SYMBOL_GPL(xip_file_aio_write);
  450. ssize_t xip_file_writev(struct file *file, const struct iovec *iov,
  451. unsigned long nr_segs, loff_t *ppos)
  452. {
  453. struct address_space *mapping = file->f_mapping;
  454. struct inode *inode = mapping->host;
  455. ssize_t ret;
  456. down(&inode->i_sem);
  457. ret = __xip_file_write_nolock(file, iov, nr_segs, ppos);
  458. up(&inode->i_sem);
  459. return ret;
  460. }
  461. EXPORT_SYMBOL_GPL(xip_file_writev);
  462. /*
  463. * truncate a page used for execute in place
  464. * functionality is analog to block_truncate_page but does use get_xip_page
  465. * to get the page instead of page cache
  466. */
  467. int
  468. xip_truncate_page(struct address_space *mapping, loff_t from)
  469. {
  470. pgoff_t index = from >> PAGE_CACHE_SHIFT;
  471. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  472. unsigned blocksize;
  473. unsigned length;
  474. struct page *page;
  475. void *kaddr;
  476. int err;
  477. BUG_ON(!mapping->a_ops->get_xip_page);
  478. blocksize = 1 << mapping->host->i_blkbits;
  479. length = offset & (blocksize - 1);
  480. /* Block boundary? Nothing to do */
  481. if (!length)
  482. return 0;
  483. length = blocksize - length;
  484. page = mapping->a_ops->get_xip_page(mapping,
  485. index*(PAGE_SIZE/512), 0);
  486. err = -ENOMEM;
  487. if (!page)
  488. goto out;
  489. if (unlikely(IS_ERR(page))) {
  490. if (PTR_ERR(page) == -ENODATA) {
  491. /* Hole? No need to truncate */
  492. return 0;
  493. } else {
  494. err = PTR_ERR(page);
  495. goto out;
  496. }
  497. } else
  498. BUG_ON(!PageUptodate(page));
  499. kaddr = kmap_atomic(page, KM_USER0);
  500. memset(kaddr + offset, 0, length);
  501. kunmap_atomic(kaddr, KM_USER0);
  502. flush_dcache_page(page);
  503. err = 0;
  504. out:
  505. return err;
  506. }
  507. EXPORT_SYMBOL_GPL(xip_truncate_page);