sync.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /*
  2. * High-level sync()-related operations
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/file.h>
  6. #include <linux/fs.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/writeback.h>
  10. #include <linux/syscalls.h>
  11. #include <linux/linkage.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/quotaops.h>
  14. #include <linux/buffer_head.h>
  15. #include "internal.h"
  16. #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
  17. SYNC_FILE_RANGE_WAIT_AFTER)
  18. /*
  19. * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0)
  20. * just dirties buffers with inodes so we have to submit IO for these buffers
  21. * via __sync_blockdev(). This also speeds up the wait == 1 case since in that
  22. * case write_inode() functions do sync_dirty_buffer() and thus effectively
  23. * write one block at a time.
  24. */
  25. static int __sync_filesystem(struct super_block *sb, int wait)
  26. {
  27. sync_quota_sb(sb, -1);
  28. sync_inodes_sb(sb, wait);
  29. lock_super(sb);
  30. if (sb->s_dirt && sb->s_op->write_super)
  31. sb->s_op->write_super(sb);
  32. unlock_super(sb);
  33. if (sb->s_op->sync_fs)
  34. sb->s_op->sync_fs(sb, wait);
  35. return __sync_blockdev(sb->s_bdev, wait);
  36. }
  37. /*
  38. * Write out and wait upon all dirty data associated with this
  39. * superblock. Filesystem data as well as the underlying block
  40. * device. Takes the superblock lock.
  41. */
  42. int sync_filesystem(struct super_block *sb)
  43. {
  44. int ret;
  45. ret = __sync_filesystem(sb, 0);
  46. if (ret < 0)
  47. return ret;
  48. return __sync_filesystem(sb, 1);
  49. }
  50. EXPORT_SYMBOL_GPL(sync_filesystem);
  51. /*
  52. * Sync all the data for all the filesystems (called by sys_sync() and
  53. * emergency sync)
  54. *
  55. * This operation is careful to avoid the livelock which could easily happen
  56. * if two or more filesystems are being continuously dirtied. s_need_sync
  57. * is used only here. We set it against all filesystems and then clear it as
  58. * we sync them. So redirtied filesystems are skipped.
  59. *
  60. * But if process A is currently running sync_filesystems and then process B
  61. * calls sync_filesystems as well, process B will set all the s_need_sync
  62. * flags again, which will cause process A to resync everything. Fix that with
  63. * a local mutex.
  64. */
  65. static void sync_filesystems(int wait)
  66. {
  67. struct super_block *sb;
  68. static DEFINE_MUTEX(mutex);
  69. mutex_lock(&mutex); /* Could be down_interruptible */
  70. spin_lock(&sb_lock);
  71. list_for_each_entry(sb, &super_blocks, s_list) {
  72. if (sb->s_flags & MS_RDONLY)
  73. continue;
  74. sb->s_need_sync = 1;
  75. }
  76. restart:
  77. list_for_each_entry(sb, &super_blocks, s_list) {
  78. if (!sb->s_need_sync)
  79. continue;
  80. sb->s_need_sync = 0;
  81. if (sb->s_flags & MS_RDONLY)
  82. continue; /* hm. Was remounted r/o meanwhile */
  83. sb->s_count++;
  84. spin_unlock(&sb_lock);
  85. down_read(&sb->s_umount);
  86. if (sb->s_root)
  87. __sync_filesystem(sb, wait);
  88. up_read(&sb->s_umount);
  89. /* restart only when sb is no longer on the list */
  90. spin_lock(&sb_lock);
  91. if (__put_super_and_need_restart(sb))
  92. goto restart;
  93. }
  94. spin_unlock(&sb_lock);
  95. mutex_unlock(&mutex);
  96. }
  97. SYSCALL_DEFINE0(sync)
  98. {
  99. sync_filesystems(0);
  100. sync_filesystems(1);
  101. if (unlikely(laptop_mode))
  102. laptop_sync_completion();
  103. return 0;
  104. }
  105. static void do_sync_work(struct work_struct *work)
  106. {
  107. /*
  108. * Sync twice to reduce the possibility we skipped some inodes / pages
  109. * because they were temporarily locked
  110. */
  111. sync_filesystems(0);
  112. sync_filesystems(0);
  113. printk("Emergency Sync complete\n");
  114. kfree(work);
  115. }
  116. void emergency_sync(void)
  117. {
  118. struct work_struct *work;
  119. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  120. if (work) {
  121. INIT_WORK(work, do_sync_work);
  122. schedule_work(work);
  123. }
  124. }
  125. /*
  126. * Generic function to fsync a file.
  127. *
  128. * filp may be NULL if called via the msync of a vma.
  129. */
  130. int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
  131. {
  132. struct inode * inode = dentry->d_inode;
  133. struct super_block * sb;
  134. int ret, err;
  135. /* sync the inode to buffers */
  136. ret = write_inode_now(inode, 0);
  137. /* sync the superblock to buffers */
  138. sb = inode->i_sb;
  139. lock_super(sb);
  140. if (sb->s_dirt && sb->s_op->write_super)
  141. sb->s_op->write_super(sb);
  142. unlock_super(sb);
  143. /* .. finally sync the buffers to disk */
  144. err = sync_blockdev(sb->s_bdev);
  145. if (!ret)
  146. ret = err;
  147. return ret;
  148. }
  149. /**
  150. * vfs_fsync - perform a fsync or fdatasync on a file
  151. * @file: file to sync
  152. * @dentry: dentry of @file
  153. * @data: only perform a fdatasync operation
  154. *
  155. * Write back data and metadata for @file to disk. If @datasync is
  156. * set only metadata needed to access modified file data is written.
  157. *
  158. * In case this function is called from nfsd @file may be %NULL and
  159. * only @dentry is set. This can only happen when the filesystem
  160. * implements the export_operations API.
  161. */
  162. int vfs_fsync(struct file *file, struct dentry *dentry, int datasync)
  163. {
  164. const struct file_operations *fop;
  165. struct address_space *mapping;
  166. int err, ret;
  167. /*
  168. * Get mapping and operations from the file in case we have
  169. * as file, or get the default values for them in case we
  170. * don't have a struct file available. Damn nfsd..
  171. */
  172. if (file) {
  173. mapping = file->f_mapping;
  174. fop = file->f_op;
  175. } else {
  176. mapping = dentry->d_inode->i_mapping;
  177. fop = dentry->d_inode->i_fop;
  178. }
  179. if (!fop || !fop->fsync) {
  180. ret = -EINVAL;
  181. goto out;
  182. }
  183. ret = filemap_fdatawrite(mapping);
  184. /*
  185. * We need to protect against concurrent writers, which could cause
  186. * livelocks in fsync_buffers_list().
  187. */
  188. mutex_lock(&mapping->host->i_mutex);
  189. err = fop->fsync(file, dentry, datasync);
  190. if (!ret)
  191. ret = err;
  192. mutex_unlock(&mapping->host->i_mutex);
  193. err = filemap_fdatawait(mapping);
  194. if (!ret)
  195. ret = err;
  196. out:
  197. return ret;
  198. }
  199. EXPORT_SYMBOL(vfs_fsync);
  200. static int do_fsync(unsigned int fd, int datasync)
  201. {
  202. struct file *file;
  203. int ret = -EBADF;
  204. file = fget(fd);
  205. if (file) {
  206. ret = vfs_fsync(file, file->f_path.dentry, datasync);
  207. fput(file);
  208. }
  209. return ret;
  210. }
  211. SYSCALL_DEFINE1(fsync, unsigned int, fd)
  212. {
  213. return do_fsync(fd, 0);
  214. }
  215. SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
  216. {
  217. return do_fsync(fd, 1);
  218. }
  219. /*
  220. * sys_sync_file_range() permits finely controlled syncing over a segment of
  221. * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
  222. * zero then sys_sync_file_range() will operate from offset out to EOF.
  223. *
  224. * The flag bits are:
  225. *
  226. * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
  227. * before performing the write.
  228. *
  229. * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
  230. * range which are not presently under writeback. Note that this may block for
  231. * significant periods due to exhaustion of disk request structures.
  232. *
  233. * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
  234. * after performing the write.
  235. *
  236. * Useful combinations of the flag bits are:
  237. *
  238. * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
  239. * in the range which were dirty on entry to sys_sync_file_range() are placed
  240. * under writeout. This is a start-write-for-data-integrity operation.
  241. *
  242. * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
  243. * are not presently under writeout. This is an asynchronous flush-to-disk
  244. * operation. Not suitable for data integrity operations.
  245. *
  246. * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
  247. * completion of writeout of all pages in the range. This will be used after an
  248. * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
  249. * for that operation to complete and to return the result.
  250. *
  251. * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
  252. * a traditional sync() operation. This is a write-for-data-integrity operation
  253. * which will ensure that all pages in the range which were dirty on entry to
  254. * sys_sync_file_range() are committed to disk.
  255. *
  256. *
  257. * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
  258. * I/O errors or ENOSPC conditions and will return those to the caller, after
  259. * clearing the EIO and ENOSPC flags in the address_space.
  260. *
  261. * It should be noted that none of these operations write out the file's
  262. * metadata. So unless the application is strictly performing overwrites of
  263. * already-instantiated disk blocks, there are no guarantees here that the data
  264. * will be available after a crash.
  265. */
  266. SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
  267. unsigned int flags)
  268. {
  269. int ret;
  270. struct file *file;
  271. loff_t endbyte; /* inclusive */
  272. int fput_needed;
  273. umode_t i_mode;
  274. ret = -EINVAL;
  275. if (flags & ~VALID_FLAGS)
  276. goto out;
  277. endbyte = offset + nbytes;
  278. if ((s64)offset < 0)
  279. goto out;
  280. if ((s64)endbyte < 0)
  281. goto out;
  282. if (endbyte < offset)
  283. goto out;
  284. if (sizeof(pgoff_t) == 4) {
  285. if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
  286. /*
  287. * The range starts outside a 32 bit machine's
  288. * pagecache addressing capabilities. Let it "succeed"
  289. */
  290. ret = 0;
  291. goto out;
  292. }
  293. if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
  294. /*
  295. * Out to EOF
  296. */
  297. nbytes = 0;
  298. }
  299. }
  300. if (nbytes == 0)
  301. endbyte = LLONG_MAX;
  302. else
  303. endbyte--; /* inclusive */
  304. ret = -EBADF;
  305. file = fget_light(fd, &fput_needed);
  306. if (!file)
  307. goto out;
  308. i_mode = file->f_path.dentry->d_inode->i_mode;
  309. ret = -ESPIPE;
  310. if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
  311. !S_ISLNK(i_mode))
  312. goto out_put;
  313. ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags);
  314. out_put:
  315. fput_light(file, fput_needed);
  316. out:
  317. return ret;
  318. }
  319. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  320. asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes,
  321. long flags)
  322. {
  323. return SYSC_sync_file_range((int) fd, offset, nbytes,
  324. (unsigned int) flags);
  325. }
  326. SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range);
  327. #endif
  328. /* It would be nice if people remember that not all the world's an i386
  329. when they introduce new system calls */
  330. SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags,
  331. loff_t offset, loff_t nbytes)
  332. {
  333. return sys_sync_file_range(fd, offset, nbytes, flags);
  334. }
  335. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  336. asmlinkage long SyS_sync_file_range2(long fd, long flags,
  337. loff_t offset, loff_t nbytes)
  338. {
  339. return SYSC_sync_file_range2((int) fd, (unsigned int) flags,
  340. offset, nbytes);
  341. }
  342. SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2);
  343. #endif
  344. /*
  345. * `endbyte' is inclusive
  346. */
  347. int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
  348. loff_t endbyte, unsigned int flags)
  349. {
  350. int ret;
  351. if (!mapping) {
  352. ret = -EINVAL;
  353. goto out;
  354. }
  355. ret = 0;
  356. if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
  357. ret = wait_on_page_writeback_range(mapping,
  358. offset >> PAGE_CACHE_SHIFT,
  359. endbyte >> PAGE_CACHE_SHIFT);
  360. if (ret < 0)
  361. goto out;
  362. }
  363. if (flags & SYNC_FILE_RANGE_WRITE) {
  364. ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
  365. WB_SYNC_ALL);
  366. if (ret < 0)
  367. goto out;
  368. }
  369. if (flags & SYNC_FILE_RANGE_WAIT_AFTER) {
  370. ret = wait_on_page_writeback_range(mapping,
  371. offset >> PAGE_CACHE_SHIFT,
  372. endbyte >> PAGE_CACHE_SHIFT);
  373. }
  374. out:
  375. return ret;
  376. }
  377. EXPORT_SYMBOL_GPL(do_sync_mapping_range);