sync.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /*
  2. * High-level sync()-related operations
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/file.h>
  6. #include <linux/fs.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/writeback.h>
  10. #include <linux/syscalls.h>
  11. #include <linux/linkage.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/quotaops.h>
  14. #include <linux/buffer_head.h>
  15. #include "internal.h"
  16. #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
  17. SYNC_FILE_RANGE_WAIT_AFTER)
  18. /*
  19. * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0)
  20. * just dirties buffers with inodes so we have to submit IO for these buffers
  21. * via __sync_blockdev(). This also speeds up the wait == 1 case since in that
  22. * case write_inode() functions do sync_dirty_buffer() and thus effectively
  23. * write one block at a time.
  24. */
  25. static int __sync_filesystem(struct super_block *sb, int wait)
  26. {
  27. /* Avoid doing twice syncing and cache pruning for quota sync */
  28. if (!wait)
  29. writeout_quota_sb(sb, -1);
  30. else
  31. sync_quota_sb(sb, -1);
  32. sync_inodes_sb(sb, wait);
  33. lock_super(sb);
  34. if (sb->s_dirt && sb->s_op->write_super)
  35. sb->s_op->write_super(sb);
  36. unlock_super(sb);
  37. if (sb->s_op->sync_fs)
  38. sb->s_op->sync_fs(sb, wait);
  39. return __sync_blockdev(sb->s_bdev, wait);
  40. }
  41. /*
  42. * Write out and wait upon all dirty data associated with this
  43. * superblock. Filesystem data as well as the underlying block
  44. * device. Takes the superblock lock.
  45. */
  46. int sync_filesystem(struct super_block *sb)
  47. {
  48. int ret;
  49. /*
  50. * We need to be protected against the filesystem going from
  51. * r/o to r/w or vice versa.
  52. */
  53. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  54. /*
  55. * No point in syncing out anything if the filesystem is read-only.
  56. */
  57. if (sb->s_flags & MS_RDONLY)
  58. return 0;
  59. ret = __sync_filesystem(sb, 0);
  60. if (ret < 0)
  61. return ret;
  62. return __sync_filesystem(sb, 1);
  63. }
  64. EXPORT_SYMBOL_GPL(sync_filesystem);
  65. /*
  66. * Sync all the data for all the filesystems (called by sys_sync() and
  67. * emergency sync)
  68. *
  69. * This operation is careful to avoid the livelock which could easily happen
  70. * if two or more filesystems are being continuously dirtied. s_need_sync
  71. * is used only here. We set it against all filesystems and then clear it as
  72. * we sync them. So redirtied filesystems are skipped.
  73. *
  74. * But if process A is currently running sync_filesystems and then process B
  75. * calls sync_filesystems as well, process B will set all the s_need_sync
  76. * flags again, which will cause process A to resync everything. Fix that with
  77. * a local mutex.
  78. */
  79. static void sync_filesystems(int wait)
  80. {
  81. struct super_block *sb;
  82. static DEFINE_MUTEX(mutex);
  83. mutex_lock(&mutex); /* Could be down_interruptible */
  84. spin_lock(&sb_lock);
  85. list_for_each_entry(sb, &super_blocks, s_list)
  86. sb->s_need_sync = 1;
  87. restart:
  88. list_for_each_entry(sb, &super_blocks, s_list) {
  89. if (!sb->s_need_sync)
  90. continue;
  91. sb->s_need_sync = 0;
  92. sb->s_count++;
  93. spin_unlock(&sb_lock);
  94. down_read(&sb->s_umount);
  95. if (!(sb->s_flags & MS_RDONLY) && sb->s_root)
  96. __sync_filesystem(sb, wait);
  97. up_read(&sb->s_umount);
  98. /* restart only when sb is no longer on the list */
  99. spin_lock(&sb_lock);
  100. if (__put_super_and_need_restart(sb))
  101. goto restart;
  102. }
  103. spin_unlock(&sb_lock);
  104. mutex_unlock(&mutex);
  105. }
  106. SYSCALL_DEFINE0(sync)
  107. {
  108. sync_filesystems(0);
  109. sync_filesystems(1);
  110. if (unlikely(laptop_mode))
  111. laptop_sync_completion();
  112. return 0;
  113. }
  114. static void do_sync_work(struct work_struct *work)
  115. {
  116. /*
  117. * Sync twice to reduce the possibility we skipped some inodes / pages
  118. * because they were temporarily locked
  119. */
  120. sync_filesystems(0);
  121. sync_filesystems(0);
  122. printk("Emergency Sync complete\n");
  123. kfree(work);
  124. }
  125. void emergency_sync(void)
  126. {
  127. struct work_struct *work;
  128. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  129. if (work) {
  130. INIT_WORK(work, do_sync_work);
  131. schedule_work(work);
  132. }
  133. }
  134. /*
  135. * Generic function to fsync a file.
  136. *
  137. * filp may be NULL if called via the msync of a vma.
  138. */
  139. int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
  140. {
  141. struct inode * inode = dentry->d_inode;
  142. struct super_block * sb;
  143. int ret, err;
  144. /* sync the inode to buffers */
  145. ret = write_inode_now(inode, 0);
  146. /* sync the superblock to buffers */
  147. sb = inode->i_sb;
  148. lock_super(sb);
  149. if (sb->s_dirt && sb->s_op->write_super)
  150. sb->s_op->write_super(sb);
  151. unlock_super(sb);
  152. /* .. finally sync the buffers to disk */
  153. err = sync_blockdev(sb->s_bdev);
  154. if (!ret)
  155. ret = err;
  156. return ret;
  157. }
  158. /**
  159. * vfs_fsync - perform a fsync or fdatasync on a file
  160. * @file: file to sync
  161. * @dentry: dentry of @file
  162. * @data: only perform a fdatasync operation
  163. *
  164. * Write back data and metadata for @file to disk. If @datasync is
  165. * set only metadata needed to access modified file data is written.
  166. *
  167. * In case this function is called from nfsd @file may be %NULL and
  168. * only @dentry is set. This can only happen when the filesystem
  169. * implements the export_operations API.
  170. */
  171. int vfs_fsync(struct file *file, struct dentry *dentry, int datasync)
  172. {
  173. const struct file_operations *fop;
  174. struct address_space *mapping;
  175. int err, ret;
  176. /*
  177. * Get mapping and operations from the file in case we have
  178. * as file, or get the default values for them in case we
  179. * don't have a struct file available. Damn nfsd..
  180. */
  181. if (file) {
  182. mapping = file->f_mapping;
  183. fop = file->f_op;
  184. } else {
  185. mapping = dentry->d_inode->i_mapping;
  186. fop = dentry->d_inode->i_fop;
  187. }
  188. if (!fop || !fop->fsync) {
  189. ret = -EINVAL;
  190. goto out;
  191. }
  192. ret = filemap_fdatawrite(mapping);
  193. /*
  194. * We need to protect against concurrent writers, which could cause
  195. * livelocks in fsync_buffers_list().
  196. */
  197. mutex_lock(&mapping->host->i_mutex);
  198. err = fop->fsync(file, dentry, datasync);
  199. if (!ret)
  200. ret = err;
  201. mutex_unlock(&mapping->host->i_mutex);
  202. err = filemap_fdatawait(mapping);
  203. if (!ret)
  204. ret = err;
  205. out:
  206. return ret;
  207. }
  208. EXPORT_SYMBOL(vfs_fsync);
  209. static int do_fsync(unsigned int fd, int datasync)
  210. {
  211. struct file *file;
  212. int ret = -EBADF;
  213. file = fget(fd);
  214. if (file) {
  215. ret = vfs_fsync(file, file->f_path.dentry, datasync);
  216. fput(file);
  217. }
  218. return ret;
  219. }
  220. SYSCALL_DEFINE1(fsync, unsigned int, fd)
  221. {
  222. return do_fsync(fd, 0);
  223. }
  224. SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
  225. {
  226. return do_fsync(fd, 1);
  227. }
  228. /*
  229. * sys_sync_file_range() permits finely controlled syncing over a segment of
  230. * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
  231. * zero then sys_sync_file_range() will operate from offset out to EOF.
  232. *
  233. * The flag bits are:
  234. *
  235. * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
  236. * before performing the write.
  237. *
  238. * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
  239. * range which are not presently under writeback. Note that this may block for
  240. * significant periods due to exhaustion of disk request structures.
  241. *
  242. * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
  243. * after performing the write.
  244. *
  245. * Useful combinations of the flag bits are:
  246. *
  247. * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
  248. * in the range which were dirty on entry to sys_sync_file_range() are placed
  249. * under writeout. This is a start-write-for-data-integrity operation.
  250. *
  251. * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
  252. * are not presently under writeout. This is an asynchronous flush-to-disk
  253. * operation. Not suitable for data integrity operations.
  254. *
  255. * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
  256. * completion of writeout of all pages in the range. This will be used after an
  257. * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
  258. * for that operation to complete and to return the result.
  259. *
  260. * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
  261. * a traditional sync() operation. This is a write-for-data-integrity operation
  262. * which will ensure that all pages in the range which were dirty on entry to
  263. * sys_sync_file_range() are committed to disk.
  264. *
  265. *
  266. * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
  267. * I/O errors or ENOSPC conditions and will return those to the caller, after
  268. * clearing the EIO and ENOSPC flags in the address_space.
  269. *
  270. * It should be noted that none of these operations write out the file's
  271. * metadata. So unless the application is strictly performing overwrites of
  272. * already-instantiated disk blocks, there are no guarantees here that the data
  273. * will be available after a crash.
  274. */
  275. SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
  276. unsigned int flags)
  277. {
  278. int ret;
  279. struct file *file;
  280. loff_t endbyte; /* inclusive */
  281. int fput_needed;
  282. umode_t i_mode;
  283. ret = -EINVAL;
  284. if (flags & ~VALID_FLAGS)
  285. goto out;
  286. endbyte = offset + nbytes;
  287. if ((s64)offset < 0)
  288. goto out;
  289. if ((s64)endbyte < 0)
  290. goto out;
  291. if (endbyte < offset)
  292. goto out;
  293. if (sizeof(pgoff_t) == 4) {
  294. if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
  295. /*
  296. * The range starts outside a 32 bit machine's
  297. * pagecache addressing capabilities. Let it "succeed"
  298. */
  299. ret = 0;
  300. goto out;
  301. }
  302. if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
  303. /*
  304. * Out to EOF
  305. */
  306. nbytes = 0;
  307. }
  308. }
  309. if (nbytes == 0)
  310. endbyte = LLONG_MAX;
  311. else
  312. endbyte--; /* inclusive */
  313. ret = -EBADF;
  314. file = fget_light(fd, &fput_needed);
  315. if (!file)
  316. goto out;
  317. i_mode = file->f_path.dentry->d_inode->i_mode;
  318. ret = -ESPIPE;
  319. if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
  320. !S_ISLNK(i_mode))
  321. goto out_put;
  322. ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags);
  323. out_put:
  324. fput_light(file, fput_needed);
  325. out:
  326. return ret;
  327. }
  328. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  329. asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes,
  330. long flags)
  331. {
  332. return SYSC_sync_file_range((int) fd, offset, nbytes,
  333. (unsigned int) flags);
  334. }
  335. SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range);
  336. #endif
  337. /* It would be nice if people remember that not all the world's an i386
  338. when they introduce new system calls */
  339. SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags,
  340. loff_t offset, loff_t nbytes)
  341. {
  342. return sys_sync_file_range(fd, offset, nbytes, flags);
  343. }
  344. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  345. asmlinkage long SyS_sync_file_range2(long fd, long flags,
  346. loff_t offset, loff_t nbytes)
  347. {
  348. return SYSC_sync_file_range2((int) fd, (unsigned int) flags,
  349. offset, nbytes);
  350. }
  351. SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2);
  352. #endif
  353. /*
  354. * `endbyte' is inclusive
  355. */
  356. int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
  357. loff_t endbyte, unsigned int flags)
  358. {
  359. int ret;
  360. if (!mapping) {
  361. ret = -EINVAL;
  362. goto out;
  363. }
  364. ret = 0;
  365. if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
  366. ret = wait_on_page_writeback_range(mapping,
  367. offset >> PAGE_CACHE_SHIFT,
  368. endbyte >> PAGE_CACHE_SHIFT);
  369. if (ret < 0)
  370. goto out;
  371. }
  372. if (flags & SYNC_FILE_RANGE_WRITE) {
  373. ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
  374. WB_SYNC_ALL);
  375. if (ret < 0)
  376. goto out;
  377. }
  378. if (flags & SYNC_FILE_RANGE_WAIT_AFTER) {
  379. ret = wait_on_page_writeback_range(mapping,
  380. offset >> PAGE_CACHE_SHIFT,
  381. endbyte >> PAGE_CACHE_SHIFT);
  382. }
  383. out:
  384. return ret;
  385. }
  386. EXPORT_SYMBOL_GPL(do_sync_mapping_range);