sync.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * High-level sync()-related operations
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/file.h>
  6. #include <linux/fs.h>
  7. #include <linux/slab.h>
  8. #include <linux/export.h>
  9. #include <linux/namei.h>
  10. #include <linux/sched.h>
  11. #include <linux/writeback.h>
  12. #include <linux/syscalls.h>
  13. #include <linux/linkage.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/quotaops.h>
  16. #include <linux/backing-dev.h>
  17. #include "internal.h"
  18. #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
  19. SYNC_FILE_RANGE_WAIT_AFTER)
  20. /*
  21. * Do the filesystem syncing work. For simple filesystems
  22. * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
  23. * submit IO for these buffers via __sync_blockdev(). This also speeds up the
  24. * wait == 1 case since in that case write_inode() functions do
  25. * sync_dirty_buffer() and thus effectively write one block at a time.
  26. */
  27. static int __sync_filesystem(struct super_block *sb, int wait)
  28. {
  29. /*
  30. * This should be safe, as we require bdi backing to actually
  31. * write out data in the first place
  32. */
  33. if (sb->s_bdi == &noop_backing_dev_info)
  34. return 0;
  35. if (sb->s_qcop && sb->s_qcop->quota_sync)
  36. sb->s_qcop->quota_sync(sb, -1, wait);
  37. if (wait)
  38. sync_inodes_sb(sb);
  39. else
  40. writeback_inodes_sb(sb, WB_REASON_SYNC);
  41. if (sb->s_op->sync_fs)
  42. sb->s_op->sync_fs(sb, wait);
  43. return __sync_blockdev(sb->s_bdev, wait);
  44. }
  45. /*
  46. * Write out and wait upon all dirty data associated with this
  47. * superblock. Filesystem data as well as the underlying block
  48. * device. Takes the superblock lock.
  49. */
  50. int sync_filesystem(struct super_block *sb)
  51. {
  52. int ret;
  53. /*
  54. * We need to be protected against the filesystem going from
  55. * r/o to r/w or vice versa.
  56. */
  57. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  58. /*
  59. * No point in syncing out anything if the filesystem is read-only.
  60. */
  61. if (sb->s_flags & MS_RDONLY)
  62. return 0;
  63. ret = __sync_filesystem(sb, 0);
  64. if (ret < 0)
  65. return ret;
  66. return __sync_filesystem(sb, 1);
  67. }
  68. EXPORT_SYMBOL_GPL(sync_filesystem);
  69. static void sync_one_sb(struct super_block *sb, void *arg)
  70. {
  71. if (!(sb->s_flags & MS_RDONLY))
  72. __sync_filesystem(sb, *(int *)arg);
  73. }
  74. /*
  75. * Sync all the data for all the filesystems (called by sys_sync() and
  76. * emergency sync)
  77. */
  78. static void sync_filesystems(int wait)
  79. {
  80. iterate_supers(sync_one_sb, &wait);
  81. }
  82. /*
  83. * sync everything. Start out by waking pdflush, because that writes back
  84. * all queues in parallel.
  85. */
  86. static void do_sync(void)
  87. {
  88. wakeup_flusher_threads(0, WB_REASON_SYNC);
  89. sync_filesystems(0);
  90. sync_filesystems(1);
  91. if (unlikely(laptop_mode))
  92. laptop_sync_completion();
  93. return;
  94. }
  95. static DEFINE_MUTEX(sync_mutex); /* One do_sync() at a time. */
  96. static unsigned long sync_seq; /* Many sync()s from one do_sync(). */
  97. /* Overflow harmless, extra wait. */
  98. /*
  99. * Only allow one task to do sync() at a time, and further allow
  100. * concurrent sync() calls to be satisfied by a single do_sync()
  101. * invocation.
  102. */
  103. SYSCALL_DEFINE0(sync)
  104. {
  105. unsigned long snap;
  106. unsigned long snap_done;
  107. snap = ACCESS_ONCE(sync_seq);
  108. smp_mb(); /* Prevent above from bleeding into critical section. */
  109. mutex_lock(&sync_mutex);
  110. snap_done = sync_seq;
  111. /*
  112. * If the value in snap is odd, we need to wait for the current
  113. * do_sync() to complete, then wait for the next one, in other
  114. * words, we need the value of snap_done to be three larger than
  115. * the value of snap. On the other hand, if the value in snap is
  116. * even, we only have to wait for the next request to complete,
  117. * in other words, we need the value of snap_done to be only two
  118. * greater than the value of snap. The "(snap + 3) & 0x1" computes
  119. * this for us (thank you, Linus!).
  120. */
  121. if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
  122. /*
  123. * A full do_sync() executed between our two fetches from
  124. * sync_seq, so our work is done!
  125. */
  126. smp_mb(); /* Order test with caller's subsequent code. */
  127. mutex_unlock(&sync_mutex);
  128. return 0;
  129. }
  130. /* Record the start of do_sync(). */
  131. ACCESS_ONCE(sync_seq)++;
  132. WARN_ON_ONCE((sync_seq & 0x1) != 1);
  133. smp_mb(); /* Keep prior increment out of do_sync(). */
  134. do_sync();
  135. /* Record the end of do_sync(). */
  136. smp_mb(); /* Keep subsequent increment out of do_sync(). */
  137. ACCESS_ONCE(sync_seq)++;
  138. WARN_ON_ONCE((sync_seq & 0x1) != 0);
  139. mutex_unlock(&sync_mutex);
  140. return 0;
  141. }
  142. static void do_sync_work(struct work_struct *work)
  143. {
  144. /*
  145. * Sync twice to reduce the possibility we skipped some inodes / pages
  146. * because they were temporarily locked
  147. */
  148. sync_filesystems(0);
  149. sync_filesystems(0);
  150. printk("Emergency Sync complete\n");
  151. kfree(work);
  152. }
  153. void emergency_sync(void)
  154. {
  155. struct work_struct *work;
  156. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  157. if (work) {
  158. INIT_WORK(work, do_sync_work);
  159. schedule_work(work);
  160. }
  161. }
  162. /*
  163. * sync a single super
  164. */
  165. SYSCALL_DEFINE1(syncfs, int, fd)
  166. {
  167. struct file *file;
  168. struct super_block *sb;
  169. int ret;
  170. int fput_needed;
  171. file = fget_light(fd, &fput_needed);
  172. if (!file)
  173. return -EBADF;
  174. sb = file->f_dentry->d_sb;
  175. down_read(&sb->s_umount);
  176. ret = sync_filesystem(sb);
  177. up_read(&sb->s_umount);
  178. fput_light(file, fput_needed);
  179. return ret;
  180. }
  181. /**
  182. * vfs_fsync_range - helper to sync a range of data & metadata to disk
  183. * @file: file to sync
  184. * @start: offset in bytes of the beginning of data range to sync
  185. * @end: offset in bytes of the end of data range (inclusive)
  186. * @datasync: perform only datasync
  187. *
  188. * Write back data in range @start..@end and metadata for @file to disk. If
  189. * @datasync is set only metadata needed to access modified file data is
  190. * written.
  191. */
  192. int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
  193. {
  194. if (!file->f_op || !file->f_op->fsync)
  195. return -EINVAL;
  196. return file->f_op->fsync(file, start, end, datasync);
  197. }
  198. EXPORT_SYMBOL(vfs_fsync_range);
  199. /**
  200. * vfs_fsync - perform a fsync or fdatasync on a file
  201. * @file: file to sync
  202. * @datasync: only perform a fdatasync operation
  203. *
  204. * Write back data and metadata for @file to disk. If @datasync is
  205. * set only metadata needed to access modified file data is written.
  206. */
  207. int vfs_fsync(struct file *file, int datasync)
  208. {
  209. return vfs_fsync_range(file, 0, LLONG_MAX, datasync);
  210. }
  211. EXPORT_SYMBOL(vfs_fsync);
  212. static int do_fsync(unsigned int fd, int datasync)
  213. {
  214. struct file *file;
  215. int ret = -EBADF;
  216. file = fget(fd);
  217. if (file) {
  218. ret = vfs_fsync(file, datasync);
  219. fput(file);
  220. }
  221. return ret;
  222. }
  223. SYSCALL_DEFINE1(fsync, unsigned int, fd)
  224. {
  225. return do_fsync(fd, 0);
  226. }
  227. SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
  228. {
  229. return do_fsync(fd, 1);
  230. }
  231. /**
  232. * generic_write_sync - perform syncing after a write if file / inode is sync
  233. * @file: file to which the write happened
  234. * @pos: offset where the write started
  235. * @count: length of the write
  236. *
  237. * This is just a simple wrapper about our general syncing function.
  238. */
  239. int generic_write_sync(struct file *file, loff_t pos, loff_t count)
  240. {
  241. if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
  242. return 0;
  243. return vfs_fsync_range(file, pos, pos + count - 1,
  244. (file->f_flags & __O_SYNC) ? 0 : 1);
  245. }
  246. EXPORT_SYMBOL(generic_write_sync);
  247. /*
  248. * sys_sync_file_range() permits finely controlled syncing over a segment of
  249. * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
  250. * zero then sys_sync_file_range() will operate from offset out to EOF.
  251. *
  252. * The flag bits are:
  253. *
  254. * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
  255. * before performing the write.
  256. *
  257. * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
  258. * range which are not presently under writeback. Note that this may block for
  259. * significant periods due to exhaustion of disk request structures.
  260. *
  261. * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
  262. * after performing the write.
  263. *
  264. * Useful combinations of the flag bits are:
  265. *
  266. * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
  267. * in the range which were dirty on entry to sys_sync_file_range() are placed
  268. * under writeout. This is a start-write-for-data-integrity operation.
  269. *
  270. * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
  271. * are not presently under writeout. This is an asynchronous flush-to-disk
  272. * operation. Not suitable for data integrity operations.
  273. *
  274. * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
  275. * completion of writeout of all pages in the range. This will be used after an
  276. * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
  277. * for that operation to complete and to return the result.
  278. *
  279. * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
  280. * a traditional sync() operation. This is a write-for-data-integrity operation
  281. * which will ensure that all pages in the range which were dirty on entry to
  282. * sys_sync_file_range() are committed to disk.
  283. *
  284. *
  285. * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
  286. * I/O errors or ENOSPC conditions and will return those to the caller, after
  287. * clearing the EIO and ENOSPC flags in the address_space.
  288. *
  289. * It should be noted that none of these operations write out the file's
  290. * metadata. So unless the application is strictly performing overwrites of
  291. * already-instantiated disk blocks, there are no guarantees here that the data
  292. * will be available after a crash.
  293. */
  294. SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
  295. unsigned int flags)
  296. {
  297. int ret;
  298. struct file *file;
  299. struct address_space *mapping;
  300. loff_t endbyte; /* inclusive */
  301. int fput_needed;
  302. umode_t i_mode;
  303. ret = -EINVAL;
  304. if (flags & ~VALID_FLAGS)
  305. goto out;
  306. endbyte = offset + nbytes;
  307. if ((s64)offset < 0)
  308. goto out;
  309. if ((s64)endbyte < 0)
  310. goto out;
  311. if (endbyte < offset)
  312. goto out;
  313. if (sizeof(pgoff_t) == 4) {
  314. if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
  315. /*
  316. * The range starts outside a 32 bit machine's
  317. * pagecache addressing capabilities. Let it "succeed"
  318. */
  319. ret = 0;
  320. goto out;
  321. }
  322. if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
  323. /*
  324. * Out to EOF
  325. */
  326. nbytes = 0;
  327. }
  328. }
  329. if (nbytes == 0)
  330. endbyte = LLONG_MAX;
  331. else
  332. endbyte--; /* inclusive */
  333. ret = -EBADF;
  334. file = fget_light(fd, &fput_needed);
  335. if (!file)
  336. goto out;
  337. i_mode = file->f_path.dentry->d_inode->i_mode;
  338. ret = -ESPIPE;
  339. if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
  340. !S_ISLNK(i_mode))
  341. goto out_put;
  342. mapping = file->f_mapping;
  343. if (!mapping) {
  344. ret = -EINVAL;
  345. goto out_put;
  346. }
  347. ret = 0;
  348. if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
  349. ret = filemap_fdatawait_range(mapping, offset, endbyte);
  350. if (ret < 0)
  351. goto out_put;
  352. }
  353. if (flags & SYNC_FILE_RANGE_WRITE) {
  354. ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
  355. WB_SYNC_NONE);
  356. if (ret < 0)
  357. goto out_put;
  358. }
  359. if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
  360. ret = filemap_fdatawait_range(mapping, offset, endbyte);
  361. out_put:
  362. fput_light(file, fput_needed);
  363. out:
  364. return ret;
  365. }
  366. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  367. asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes,
  368. long flags)
  369. {
  370. return SYSC_sync_file_range((int) fd, offset, nbytes,
  371. (unsigned int) flags);
  372. }
  373. SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range);
  374. #endif
  375. /* It would be nice if people remember that not all the world's an i386
  376. when they introduce new system calls */
  377. SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags,
  378. loff_t offset, loff_t nbytes)
  379. {
  380. return sys_sync_file_range(fd, offset, nbytes, flags);
  381. }
  382. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  383. asmlinkage long SyS_sync_file_range2(long fd, long flags,
  384. loff_t offset, loff_t nbytes)
  385. {
  386. return SYSC_sync_file_range2((int) fd, (unsigned int) flags,
  387. offset, nbytes);
  388. }
  389. SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2);
  390. #endif