mmap.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /**
  2. * eCryptfs: Linux filesystem encryption layer
  3. * This is where eCryptfs coordinates the symmetric encryption and
  4. * decryption of the file data as it passes between the lower
  5. * encrypted file and the upper decrypted file.
  6. *
  7. * Copyright (C) 1997-2003 Erez Zadok
  8. * Copyright (C) 2001-2003 Stony Brook University
  9. * Copyright (C) 2004-2007 International Business Machines Corp.
  10. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  25. * 02111-1307, USA.
  26. */
  27. #include <linux/pagemap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/page-flags.h>
  30. #include <linux/mount.h>
  31. #include <linux/file.h>
  32. #include <linux/crypto.h>
  33. #include <linux/scatterlist.h>
  34. #include "ecryptfs_kernel.h"
  35. struct kmem_cache *ecryptfs_lower_page_cache;
  36. /**
  37. * ecryptfs_get1page
  38. *
  39. * Get one page from cache or lower f/s, return error otherwise.
  40. *
  41. * Returns unlocked and up-to-date page (if ok), with increased
  42. * refcnt.
  43. */
  44. static struct page *ecryptfs_get1page(struct file *file, int index)
  45. {
  46. struct dentry *dentry;
  47. struct inode *inode;
  48. struct address_space *mapping;
  49. dentry = file->f_path.dentry;
  50. inode = dentry->d_inode;
  51. mapping = inode->i_mapping;
  52. return read_mapping_page(mapping, index, (void *)file);
  53. }
  54. static
  55. int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros);
  56. /**
  57. * ecryptfs_fill_zeros
  58. * @file: The ecryptfs file
  59. * @new_length: The new length of the data in the underlying file;
  60. * everything between the prior end of the file and the
  61. * new end of the file will be filled with zero's.
  62. * new_length must be greater than current length
  63. *
  64. * Function for handling lseek-ing past the end of the file.
  65. *
  66. * This function does not support shrinking, only growing a file.
  67. *
  68. * Returns zero on success; non-zero otherwise.
  69. */
  70. int ecryptfs_fill_zeros(struct file *file, loff_t new_length)
  71. {
  72. int rc = 0;
  73. struct dentry *dentry = file->f_path.dentry;
  74. struct inode *inode = dentry->d_inode;
  75. pgoff_t old_end_page_index = 0;
  76. pgoff_t index = old_end_page_index;
  77. int old_end_pos_in_page = -1;
  78. pgoff_t new_end_page_index;
  79. int new_end_pos_in_page;
  80. loff_t cur_length = i_size_read(inode);
  81. if (cur_length != 0) {
  82. index = old_end_page_index =
  83. ((cur_length - 1) >> PAGE_CACHE_SHIFT);
  84. old_end_pos_in_page = ((cur_length - 1) & ~PAGE_CACHE_MASK);
  85. }
  86. new_end_page_index = ((new_length - 1) >> PAGE_CACHE_SHIFT);
  87. new_end_pos_in_page = ((new_length - 1) & ~PAGE_CACHE_MASK);
  88. ecryptfs_printk(KERN_DEBUG, "old_end_page_index = [0x%.16x]; "
  89. "old_end_pos_in_page = [%d]; "
  90. "new_end_page_index = [0x%.16x]; "
  91. "new_end_pos_in_page = [%d]\n",
  92. old_end_page_index, old_end_pos_in_page,
  93. new_end_page_index, new_end_pos_in_page);
  94. if (old_end_page_index == new_end_page_index) {
  95. /* Start and end are in the same page; we just need to
  96. * set a portion of the existing page to zero's */
  97. rc = write_zeros(file, index, (old_end_pos_in_page + 1),
  98. (new_end_pos_in_page - old_end_pos_in_page));
  99. if (rc)
  100. ecryptfs_printk(KERN_ERR, "write_zeros(file=[%p], "
  101. "index=[0x%.16x], "
  102. "old_end_pos_in_page=[d], "
  103. "(PAGE_CACHE_SIZE - new_end_pos_in_page"
  104. "=[%d]"
  105. ")=[d]) returned [%d]\n", file, index,
  106. old_end_pos_in_page,
  107. new_end_pos_in_page,
  108. (PAGE_CACHE_SIZE - new_end_pos_in_page),
  109. rc);
  110. goto out;
  111. }
  112. /* Fill the remainder of the previous last page with zeros */
  113. rc = write_zeros(file, index, (old_end_pos_in_page + 1),
  114. ((PAGE_CACHE_SIZE - 1) - old_end_pos_in_page));
  115. if (rc) {
  116. ecryptfs_printk(KERN_ERR, "write_zeros(file=[%p], "
  117. "index=[0x%.16x], old_end_pos_in_page=[d], "
  118. "(PAGE_CACHE_SIZE - old_end_pos_in_page)=[d]) "
  119. "returned [%d]\n", file, index,
  120. old_end_pos_in_page,
  121. (PAGE_CACHE_SIZE - old_end_pos_in_page), rc);
  122. goto out;
  123. }
  124. index++;
  125. while (index < new_end_page_index) {
  126. /* Fill all intermediate pages with zeros */
  127. rc = write_zeros(file, index, 0, PAGE_CACHE_SIZE);
  128. if (rc) {
  129. ecryptfs_printk(KERN_ERR, "write_zeros(file=[%p], "
  130. "index=[0x%.16x], "
  131. "old_end_pos_in_page=[d], "
  132. "(PAGE_CACHE_SIZE - new_end_pos_in_page"
  133. "=[%d]"
  134. ")=[d]) returned [%d]\n", file, index,
  135. old_end_pos_in_page,
  136. new_end_pos_in_page,
  137. (PAGE_CACHE_SIZE - new_end_pos_in_page),
  138. rc);
  139. goto out;
  140. }
  141. index++;
  142. }
  143. /* Fill the portion at the beginning of the last new page with
  144. * zero's */
  145. rc = write_zeros(file, index, 0, (new_end_pos_in_page + 1));
  146. if (rc) {
  147. ecryptfs_printk(KERN_ERR, "write_zeros(file="
  148. "[%p], index=[0x%.16x], 0, "
  149. "new_end_pos_in_page=[%d]"
  150. "returned [%d]\n", file, index,
  151. new_end_pos_in_page, rc);
  152. goto out;
  153. }
  154. out:
  155. return rc;
  156. }
  157. /**
  158. * ecryptfs_writepage
  159. * @page: Page that is locked before this call is made
  160. *
  161. * Returns zero on success; non-zero otherwise
  162. */
  163. static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
  164. {
  165. struct ecryptfs_page_crypt_context ctx;
  166. int rc;
  167. ctx.page = page;
  168. ctx.mode = ECRYPTFS_WRITEPAGE_MODE;
  169. ctx.param.wbc = wbc;
  170. rc = ecryptfs_encrypt_page(&ctx);
  171. if (rc) {
  172. ecryptfs_printk(KERN_WARNING, "Error encrypting "
  173. "page (upper index [0x%.16x])\n", page->index);
  174. ClearPageUptodate(page);
  175. goto out;
  176. }
  177. SetPageUptodate(page);
  178. unlock_page(page);
  179. out:
  180. return rc;
  181. }
  182. /**
  183. * Reads the data from the lower file file at index lower_page_index
  184. * and copies that data into page.
  185. *
  186. * @param page Page to fill
  187. * @param lower_page_index Index of the page in the lower file to get
  188. */
  189. int ecryptfs_do_readpage(struct file *file, struct page *page,
  190. pgoff_t lower_page_index)
  191. {
  192. int rc;
  193. struct dentry *dentry;
  194. struct file *lower_file;
  195. struct dentry *lower_dentry;
  196. struct inode *inode;
  197. struct inode *lower_inode;
  198. char *page_data;
  199. struct page *lower_page = NULL;
  200. char *lower_page_data;
  201. const struct address_space_operations *lower_a_ops;
  202. dentry = file->f_path.dentry;
  203. lower_file = ecryptfs_file_to_lower(file);
  204. lower_dentry = ecryptfs_dentry_to_lower(dentry);
  205. inode = dentry->d_inode;
  206. lower_inode = ecryptfs_inode_to_lower(inode);
  207. lower_a_ops = lower_inode->i_mapping->a_ops;
  208. lower_page = read_cache_page(lower_inode->i_mapping, lower_page_index,
  209. (filler_t *)lower_a_ops->readpage,
  210. (void *)lower_file);
  211. if (IS_ERR(lower_page)) {
  212. rc = PTR_ERR(lower_page);
  213. lower_page = NULL;
  214. ecryptfs_printk(KERN_ERR, "Error reading from page cache\n");
  215. goto out;
  216. }
  217. page_data = kmap_atomic(page, KM_USER0);
  218. lower_page_data = kmap_atomic(lower_page, KM_USER1);
  219. memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE);
  220. kunmap_atomic(lower_page_data, KM_USER1);
  221. kunmap_atomic(page_data, KM_USER0);
  222. flush_dcache_page(page);
  223. rc = 0;
  224. out:
  225. if (likely(lower_page))
  226. page_cache_release(lower_page);
  227. if (rc == 0)
  228. SetPageUptodate(page);
  229. else
  230. ClearPageUptodate(page);
  231. return rc;
  232. }
  233. /**
  234. * Header Extent:
  235. * Octets 0-7: Unencrypted file size (big-endian)
  236. * Octets 8-15: eCryptfs special marker
  237. * Octets 16-19: Flags
  238. * Octet 16: File format version number (between 0 and 255)
  239. * Octets 17-18: Reserved
  240. * Octet 19: Bit 1 (lsb): Reserved
  241. * Bit 2: Encrypted?
  242. * Bits 3-8: Reserved
  243. * Octets 20-23: Header extent size (big-endian)
  244. * Octets 24-25: Number of header extents at front of file
  245. * (big-endian)
  246. * Octet 26: Begin RFC 2440 authentication token packet set
  247. */
  248. static void set_header_info(char *page_virt,
  249. struct ecryptfs_crypt_stat *crypt_stat)
  250. {
  251. size_t written;
  252. int save_num_header_extents_at_front =
  253. crypt_stat->num_header_extents_at_front;
  254. crypt_stat->num_header_extents_at_front = 1;
  255. ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written);
  256. crypt_stat->num_header_extents_at_front =
  257. save_num_header_extents_at_front;
  258. }
  259. /**
  260. * ecryptfs_readpage
  261. * @file: This is an ecryptfs file
  262. * @page: ecryptfs associated page to stick the read data into
  263. *
  264. * Read in a page, decrypting if necessary.
  265. *
  266. * Returns zero on success; non-zero on error.
  267. */
  268. static int ecryptfs_readpage(struct file *file, struct page *page)
  269. {
  270. int rc = 0;
  271. struct ecryptfs_crypt_stat *crypt_stat;
  272. BUG_ON(!(file && file->f_path.dentry && file->f_path.dentry->d_inode));
  273. crypt_stat = &ecryptfs_inode_to_private(file->f_path.dentry->d_inode)
  274. ->crypt_stat;
  275. if (!crypt_stat
  276. || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
  277. || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
  278. ecryptfs_printk(KERN_DEBUG,
  279. "Passing through unencrypted page\n");
  280. rc = ecryptfs_do_readpage(file, page, page->index);
  281. if (rc) {
  282. ecryptfs_printk(KERN_ERR, "Error reading page; rc = "
  283. "[%d]\n", rc);
  284. goto out;
  285. }
  286. } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
  287. if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
  288. int num_pages_in_header_region =
  289. (crypt_stat->header_extent_size
  290. / PAGE_CACHE_SIZE);
  291. if (page->index < num_pages_in_header_region) {
  292. char *page_virt;
  293. page_virt = kmap_atomic(page, KM_USER0);
  294. memset(page_virt, 0, PAGE_CACHE_SIZE);
  295. if (page->index == 0) {
  296. rc = ecryptfs_read_xattr_region(
  297. page_virt, file->f_path.dentry);
  298. set_header_info(page_virt, crypt_stat);
  299. }
  300. kunmap_atomic(page_virt, KM_USER0);
  301. flush_dcache_page(page);
  302. if (rc) {
  303. printk(KERN_ERR "Error reading xattr "
  304. "region\n");
  305. goto out;
  306. }
  307. } else {
  308. rc = ecryptfs_do_readpage(
  309. file, page,
  310. (page->index
  311. - num_pages_in_header_region));
  312. if (rc) {
  313. printk(KERN_ERR "Error reading page; "
  314. "rc = [%d]\n", rc);
  315. goto out;
  316. }
  317. }
  318. } else {
  319. rc = ecryptfs_do_readpage(file, page, page->index);
  320. if (rc) {
  321. printk(KERN_ERR "Error reading page; rc = "
  322. "[%d]\n", rc);
  323. goto out;
  324. }
  325. }
  326. } else {
  327. rc = ecryptfs_decrypt_page(file, page);
  328. if (rc) {
  329. ecryptfs_printk(KERN_ERR, "Error decrypting page; "
  330. "rc = [%d]\n", rc);
  331. goto out;
  332. }
  333. }
  334. SetPageUptodate(page);
  335. out:
  336. if (rc)
  337. ClearPageUptodate(page);
  338. ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
  339. page->index);
  340. unlock_page(page);
  341. return rc;
  342. }
  343. /**
  344. * Called with lower inode mutex held.
  345. */
  346. static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
  347. {
  348. struct inode *inode = page->mapping->host;
  349. int end_byte_in_page;
  350. char *page_virt;
  351. if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
  352. goto out;
  353. end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
  354. if (to > end_byte_in_page)
  355. end_byte_in_page = to;
  356. page_virt = kmap_atomic(page, KM_USER0);
  357. memset((page_virt + end_byte_in_page), 0,
  358. (PAGE_CACHE_SIZE - end_byte_in_page));
  359. kunmap_atomic(page_virt, KM_USER0);
  360. flush_dcache_page(page);
  361. out:
  362. return 0;
  363. }
  364. static int ecryptfs_prepare_write(struct file *file, struct page *page,
  365. unsigned from, unsigned to)
  366. {
  367. int rc = 0;
  368. if (from == 0 && to == PAGE_CACHE_SIZE)
  369. goto out; /* If we are writing a full page, it will be
  370. up to date. */
  371. if (!PageUptodate(page))
  372. rc = ecryptfs_do_readpage(file, page, page->index);
  373. out:
  374. return rc;
  375. }
  376. int ecryptfs_writepage_and_release_lower_page(struct page *lower_page,
  377. struct inode *lower_inode,
  378. struct writeback_control *wbc)
  379. {
  380. int rc = 0;
  381. rc = lower_inode->i_mapping->a_ops->writepage(lower_page, wbc);
  382. if (rc) {
  383. ecryptfs_printk(KERN_ERR, "Error calling lower writepage(); "
  384. "rc = [%d]\n", rc);
  385. goto out;
  386. }
  387. lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
  388. page_cache_release(lower_page);
  389. out:
  390. return rc;
  391. }
  392. static
  393. void ecryptfs_release_lower_page(struct page *lower_page, int page_locked)
  394. {
  395. if (page_locked)
  396. unlock_page(lower_page);
  397. page_cache_release(lower_page);
  398. }
  399. /**
  400. * ecryptfs_write_inode_size_to_header
  401. *
  402. * Writes the lower file size to the first 8 bytes of the header.
  403. *
  404. * Returns zero on success; non-zero on error.
  405. */
  406. static int ecryptfs_write_inode_size_to_header(struct file *lower_file,
  407. struct inode *lower_inode,
  408. struct inode *inode)
  409. {
  410. int rc = 0;
  411. struct page *header_page;
  412. char *header_virt;
  413. const struct address_space_operations *lower_a_ops;
  414. u64 file_size;
  415. retry:
  416. header_page = grab_cache_page(lower_inode->i_mapping, 0);
  417. if (!header_page) {
  418. ecryptfs_printk(KERN_ERR, "grab_cache_page for "
  419. "lower_page_index 0 failed\n");
  420. rc = -EINVAL;
  421. goto out;
  422. }
  423. lower_a_ops = lower_inode->i_mapping->a_ops;
  424. rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8);
  425. if (rc) {
  426. if (rc == AOP_TRUNCATED_PAGE) {
  427. ecryptfs_release_lower_page(header_page, 0);
  428. goto retry;
  429. } else
  430. ecryptfs_release_lower_page(header_page, 1);
  431. goto out;
  432. }
  433. file_size = (u64)i_size_read(inode);
  434. ecryptfs_printk(KERN_DEBUG, "Writing size: [0x%.16x]\n", file_size);
  435. file_size = cpu_to_be64(file_size);
  436. header_virt = kmap_atomic(header_page, KM_USER0);
  437. memcpy(header_virt, &file_size, sizeof(u64));
  438. kunmap_atomic(header_virt, KM_USER0);
  439. flush_dcache_page(header_page);
  440. rc = lower_a_ops->commit_write(lower_file, header_page, 0, 8);
  441. if (rc < 0)
  442. ecryptfs_printk(KERN_ERR, "Error commiting header page "
  443. "write\n");
  444. if (rc == AOP_TRUNCATED_PAGE) {
  445. ecryptfs_release_lower_page(header_page, 0);
  446. goto retry;
  447. } else
  448. ecryptfs_release_lower_page(header_page, 1);
  449. lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
  450. mark_inode_dirty_sync(inode);
  451. out:
  452. return rc;
  453. }
  454. static int ecryptfs_write_inode_size_to_xattr(struct inode *lower_inode,
  455. struct inode *inode,
  456. struct dentry *ecryptfs_dentry,
  457. int lower_i_mutex_held)
  458. {
  459. ssize_t size;
  460. void *xattr_virt;
  461. struct dentry *lower_dentry;
  462. u64 file_size;
  463. int rc;
  464. xattr_virt = kmem_cache_alloc(ecryptfs_xattr_cache, GFP_KERNEL);
  465. if (!xattr_virt) {
  466. printk(KERN_ERR "Out of memory whilst attempting to write "
  467. "inode size to xattr\n");
  468. rc = -ENOMEM;
  469. goto out;
  470. }
  471. lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
  472. if (!lower_dentry->d_inode->i_op->getxattr ||
  473. !lower_dentry->d_inode->i_op->setxattr) {
  474. printk(KERN_WARNING
  475. "No support for setting xattr in lower filesystem\n");
  476. rc = -ENOSYS;
  477. kmem_cache_free(ecryptfs_xattr_cache, xattr_virt);
  478. goto out;
  479. }
  480. if (!lower_i_mutex_held)
  481. mutex_lock(&lower_dentry->d_inode->i_mutex);
  482. size = lower_dentry->d_inode->i_op->getxattr(lower_dentry,
  483. ECRYPTFS_XATTR_NAME,
  484. xattr_virt,
  485. PAGE_CACHE_SIZE);
  486. if (!lower_i_mutex_held)
  487. mutex_unlock(&lower_dentry->d_inode->i_mutex);
  488. if (size < 0)
  489. size = 8;
  490. file_size = (u64)i_size_read(inode);
  491. file_size = cpu_to_be64(file_size);
  492. memcpy(xattr_virt, &file_size, sizeof(u64));
  493. if (!lower_i_mutex_held)
  494. mutex_lock(&lower_dentry->d_inode->i_mutex);
  495. rc = lower_dentry->d_inode->i_op->setxattr(lower_dentry,
  496. ECRYPTFS_XATTR_NAME,
  497. xattr_virt, size, 0);
  498. if (!lower_i_mutex_held)
  499. mutex_unlock(&lower_dentry->d_inode->i_mutex);
  500. if (rc)
  501. printk(KERN_ERR "Error whilst attempting to write inode size "
  502. "to lower file xattr; rc = [%d]\n", rc);
  503. kmem_cache_free(ecryptfs_xattr_cache, xattr_virt);
  504. out:
  505. return rc;
  506. }
  507. int
  508. ecryptfs_write_inode_size_to_metadata(struct file *lower_file,
  509. struct inode *lower_inode,
  510. struct inode *inode,
  511. struct dentry *ecryptfs_dentry,
  512. int lower_i_mutex_held)
  513. {
  514. struct ecryptfs_crypt_stat *crypt_stat;
  515. crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
  516. if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
  517. return ecryptfs_write_inode_size_to_xattr(lower_inode, inode,
  518. ecryptfs_dentry,
  519. lower_i_mutex_held);
  520. else
  521. return ecryptfs_write_inode_size_to_header(lower_file,
  522. lower_inode,
  523. inode);
  524. }
  525. int ecryptfs_get_lower_page(struct page **lower_page, struct inode *lower_inode,
  526. struct file *lower_file,
  527. unsigned long lower_page_index, int byte_offset,
  528. int region_bytes)
  529. {
  530. int rc = 0;
  531. retry:
  532. *lower_page = grab_cache_page(lower_inode->i_mapping, lower_page_index);
  533. if (!(*lower_page)) {
  534. rc = -EINVAL;
  535. ecryptfs_printk(KERN_ERR, "Error attempting to grab "
  536. "lower page with index [0x%.16x]\n",
  537. lower_page_index);
  538. goto out;
  539. }
  540. rc = lower_inode->i_mapping->a_ops->prepare_write(lower_file,
  541. (*lower_page),
  542. byte_offset,
  543. region_bytes);
  544. if (rc) {
  545. if (rc == AOP_TRUNCATED_PAGE) {
  546. ecryptfs_release_lower_page(*lower_page, 0);
  547. goto retry;
  548. } else {
  549. ecryptfs_printk(KERN_ERR, "prepare_write for "
  550. "lower_page_index = [0x%.16x] failed; rc = "
  551. "[%d]\n", lower_page_index, rc);
  552. ecryptfs_release_lower_page(*lower_page, 1);
  553. (*lower_page) = NULL;
  554. }
  555. }
  556. out:
  557. return rc;
  558. }
  559. /**
  560. * ecryptfs_commit_lower_page
  561. *
  562. * Returns zero on success; non-zero on error
  563. */
  564. int
  565. ecryptfs_commit_lower_page(struct page *lower_page, struct inode *lower_inode,
  566. struct file *lower_file, int byte_offset,
  567. int region_size)
  568. {
  569. int page_locked = 1;
  570. int rc = 0;
  571. rc = lower_inode->i_mapping->a_ops->commit_write(
  572. lower_file, lower_page, byte_offset, region_size);
  573. if (rc == AOP_TRUNCATED_PAGE)
  574. page_locked = 0;
  575. if (rc < 0) {
  576. ecryptfs_printk(KERN_ERR,
  577. "Error committing write; rc = [%d]\n", rc);
  578. } else
  579. rc = 0;
  580. ecryptfs_release_lower_page(lower_page, page_locked);
  581. return rc;
  582. }
  583. /**
  584. * ecryptfs_copy_page_to_lower
  585. *
  586. * Used for plaintext pass-through; no page index interpolation
  587. * required.
  588. */
  589. int ecryptfs_copy_page_to_lower(struct page *page, struct inode *lower_inode,
  590. struct file *lower_file)
  591. {
  592. int rc = 0;
  593. struct page *lower_page;
  594. rc = ecryptfs_get_lower_page(&lower_page, lower_inode, lower_file,
  595. page->index, 0, PAGE_CACHE_SIZE);
  596. if (rc) {
  597. ecryptfs_printk(KERN_ERR, "Error attempting to get page "
  598. "at index [0x%.16x]\n", page->index);
  599. goto out;
  600. }
  601. /* TODO: aops */
  602. memcpy((char *)page_address(lower_page), page_address(page),
  603. PAGE_CACHE_SIZE);
  604. rc = ecryptfs_commit_lower_page(lower_page, lower_inode, lower_file,
  605. 0, PAGE_CACHE_SIZE);
  606. if (rc)
  607. ecryptfs_printk(KERN_ERR, "Error attempting to commit page "
  608. "at index [0x%.16x]\n", page->index);
  609. out:
  610. return rc;
  611. }
  612. struct kmem_cache *ecryptfs_xattr_cache;
  613. /**
  614. * ecryptfs_commit_write
  615. * @file: The eCryptfs file object
  616. * @page: The eCryptfs page
  617. * @from: Ignored (we rotate the page IV on each write)
  618. * @to: Ignored
  619. *
  620. * This is where we encrypt the data and pass the encrypted data to
  621. * the lower filesystem. In OpenPGP-compatible mode, we operate on
  622. * entire underlying packets.
  623. */
  624. static int ecryptfs_commit_write(struct file *file, struct page *page,
  625. unsigned from, unsigned to)
  626. {
  627. struct ecryptfs_page_crypt_context ctx;
  628. loff_t pos;
  629. struct inode *inode;
  630. struct inode *lower_inode;
  631. struct file *lower_file;
  632. struct ecryptfs_crypt_stat *crypt_stat;
  633. int rc;
  634. inode = page->mapping->host;
  635. lower_inode = ecryptfs_inode_to_lower(inode);
  636. lower_file = ecryptfs_file_to_lower(file);
  637. mutex_lock(&lower_inode->i_mutex);
  638. crypt_stat = &ecryptfs_inode_to_private(file->f_path.dentry->d_inode)
  639. ->crypt_stat;
  640. if (crypt_stat->flags & ECRYPTFS_NEW_FILE) {
  641. ecryptfs_printk(KERN_DEBUG, "ECRYPTFS_NEW_FILE flag set in "
  642. "crypt_stat at memory location [%p]\n", crypt_stat);
  643. crypt_stat->flags &= ~(ECRYPTFS_NEW_FILE);
  644. } else
  645. ecryptfs_printk(KERN_DEBUG, "Not a new file\n");
  646. ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
  647. "(page w/ index = [0x%.16x], to = [%d])\n", page->index,
  648. to);
  649. rc = fill_zeros_to_end_of_page(page, to);
  650. if (rc) {
  651. ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
  652. "zeros in page with index = [0x%.16x]\n",
  653. page->index);
  654. goto out;
  655. }
  656. ctx.page = page;
  657. ctx.mode = ECRYPTFS_PREPARE_COMMIT_MODE;
  658. ctx.param.lower_file = lower_file;
  659. rc = ecryptfs_encrypt_page(&ctx);
  660. if (rc) {
  661. ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
  662. "index [0x%.16x])\n", page->index);
  663. goto out;
  664. }
  665. inode->i_blocks = lower_inode->i_blocks;
  666. pos = (page->index << PAGE_CACHE_SHIFT) + to;
  667. if (pos > i_size_read(inode)) {
  668. i_size_write(inode, pos);
  669. ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
  670. "[0x%.16x]\n", i_size_read(inode));
  671. }
  672. rc = ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode,
  673. inode, file->f_dentry,
  674. ECRYPTFS_LOWER_I_MUTEX_HELD);
  675. if (rc)
  676. printk(KERN_ERR "Error writing inode size to metadata; "
  677. "rc = [%d]\n", rc);
  678. lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
  679. mark_inode_dirty_sync(inode);
  680. out:
  681. if (rc < 0)
  682. ClearPageUptodate(page);
  683. else
  684. SetPageUptodate(page);
  685. mutex_unlock(&lower_inode->i_mutex);
  686. return rc;
  687. }
  688. /**
  689. * write_zeros
  690. * @file: The ecryptfs file
  691. * @index: The index in which we are writing
  692. * @start: The position after the last block of data
  693. * @num_zeros: The number of zeros to write
  694. *
  695. * Write a specified number of zero's to a page.
  696. *
  697. * (start + num_zeros) must be less than or equal to PAGE_CACHE_SIZE
  698. */
  699. static
  700. int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
  701. {
  702. int rc = 0;
  703. struct page *tmp_page;
  704. char *tmp_page_virt;
  705. tmp_page = ecryptfs_get1page(file, index);
  706. if (IS_ERR(tmp_page)) {
  707. ecryptfs_printk(KERN_ERR, "Error getting page at index "
  708. "[0x%.16x]\n", index);
  709. rc = PTR_ERR(tmp_page);
  710. goto out;
  711. }
  712. rc = ecryptfs_prepare_write(file, tmp_page, start, start + num_zeros);
  713. if (rc) {
  714. ecryptfs_printk(KERN_ERR, "Error preparing to write zero's "
  715. "to remainder of page at index [0x%.16x]\n",
  716. index);
  717. page_cache_release(tmp_page);
  718. goto out;
  719. }
  720. tmp_page_virt = kmap_atomic(tmp_page, KM_USER0);
  721. memset(((char *)tmp_page_virt + start), 0, num_zeros);
  722. kunmap_atomic(tmp_page_virt, KM_USER0);
  723. flush_dcache_page(tmp_page);
  724. rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros);
  725. if (rc < 0) {
  726. ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
  727. "to remainder of page at index [0x%.16x]\n",
  728. index);
  729. page_cache_release(tmp_page);
  730. goto out;
  731. }
  732. rc = 0;
  733. page_cache_release(tmp_page);
  734. out:
  735. return rc;
  736. }
  737. static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
  738. {
  739. int rc = 0;
  740. struct inode *inode;
  741. struct inode *lower_inode;
  742. inode = (struct inode *)mapping->host;
  743. lower_inode = ecryptfs_inode_to_lower(inode);
  744. if (lower_inode->i_mapping->a_ops->bmap)
  745. rc = lower_inode->i_mapping->a_ops->bmap(lower_inode->i_mapping,
  746. block);
  747. return rc;
  748. }
  749. static void ecryptfs_sync_page(struct page *page)
  750. {
  751. struct inode *inode;
  752. struct inode *lower_inode;
  753. struct page *lower_page;
  754. inode = page->mapping->host;
  755. lower_inode = ecryptfs_inode_to_lower(inode);
  756. /* NOTE: Recently swapped with grab_cache_page(), since
  757. * sync_page() just makes sure that pending I/O gets done. */
  758. lower_page = find_lock_page(lower_inode->i_mapping, page->index);
  759. if (!lower_page) {
  760. ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n");
  761. return;
  762. }
  763. lower_page->mapping->a_ops->sync_page(lower_page);
  764. ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
  765. lower_page->index);
  766. unlock_page(lower_page);
  767. page_cache_release(lower_page);
  768. }
  769. struct address_space_operations ecryptfs_aops = {
  770. .writepage = ecryptfs_writepage,
  771. .readpage = ecryptfs_readpage,
  772. .prepare_write = ecryptfs_prepare_write,
  773. .commit_write = ecryptfs_commit_write,
  774. .bmap = ecryptfs_bmap,
  775. .sync_page = ecryptfs_sync_page,
  776. };