truncate.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 Andrew Morton
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/gfp.h>
  12. #include <linux/mm.h>
  13. #include <linux/swap.h>
  14. #include <linux/export.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/highmem.h>
  17. #include <linux/pagevec.h>
  18. #include <linux/task_io_accounting_ops.h>
  19. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  20. do_invalidatepage */
  21. #include <linux/cleancache.h>
  22. #include <linux/rmap.h>
  23. #include "internal.h"
  24. /**
  25. * do_invalidatepage - invalidate part or all of a page
  26. * @page: the page which is affected
  27. * @offset: the index of the truncation point
  28. *
  29. * do_invalidatepage() is called when all or part of the page has become
  30. * invalidated by a truncate operation.
  31. *
  32. * do_invalidatepage() does not have to release all buffers, but it must
  33. * ensure that no dirty buffer is left outside @offset and that no I/O
  34. * is underway against any of the blocks which are outside the truncation
  35. * point. Because the caller is about to free (and possibly reuse) those
  36. * blocks on-disk.
  37. */
  38. void do_invalidatepage(struct page *page, unsigned long offset)
  39. {
  40. void (*invalidatepage)(struct page *, unsigned long);
  41. invalidatepage = page->mapping->a_ops->invalidatepage;
  42. #ifdef CONFIG_BLOCK
  43. if (!invalidatepage)
  44. invalidatepage = block_invalidatepage;
  45. #endif
  46. if (invalidatepage)
  47. (*invalidatepage)(page, offset);
  48. }
  49. static inline void truncate_partial_page(struct page *page, unsigned partial)
  50. {
  51. zero_user_segment(page, partial, PAGE_CACHE_SIZE);
  52. cleancache_invalidate_page(page->mapping, page);
  53. if (page_has_private(page))
  54. do_invalidatepage(page, partial);
  55. }
  56. /*
  57. * This cancels just the dirty bit on the kernel page itself, it
  58. * does NOT actually remove dirty bits on any mmap's that may be
  59. * around. It also leaves the page tagged dirty, so any sync
  60. * activity will still find it on the dirty lists, and in particular,
  61. * clear_page_dirty_for_io() will still look at the dirty bits in
  62. * the VM.
  63. *
  64. * Doing this should *normally* only ever be done when a page
  65. * is truncated, and is not actually mapped anywhere at all. However,
  66. * fs/buffer.c does this when it notices that somebody has cleaned
  67. * out all the buffers on a page without actually doing it through
  68. * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
  69. */
  70. void cancel_dirty_page(struct page *page, unsigned int account_size)
  71. {
  72. if (TestClearPageDirty(page)) {
  73. struct address_space *mapping = page->mapping;
  74. if (mapping && mapping_cap_account_dirty(mapping)) {
  75. dec_zone_page_state(page, NR_FILE_DIRTY);
  76. dec_bdi_stat(mapping->backing_dev_info,
  77. BDI_RECLAIMABLE);
  78. if (account_size)
  79. task_io_account_cancelled_write(account_size);
  80. }
  81. }
  82. }
  83. EXPORT_SYMBOL(cancel_dirty_page);
  84. /*
  85. * If truncate cannot remove the fs-private metadata from the page, the page
  86. * becomes orphaned. It will be left on the LRU and may even be mapped into
  87. * user pagetables if we're racing with filemap_fault().
  88. *
  89. * We need to bale out if page->mapping is no longer equal to the original
  90. * mapping. This happens a) when the VM reclaimed the page while we waited on
  91. * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  92. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  93. */
  94. static int
  95. truncate_complete_page(struct address_space *mapping, struct page *page)
  96. {
  97. if (page->mapping != mapping)
  98. return -EIO;
  99. if (page_has_private(page))
  100. do_invalidatepage(page, 0);
  101. cancel_dirty_page(page, PAGE_CACHE_SIZE);
  102. clear_page_mlock(page);
  103. ClearPageMappedToDisk(page);
  104. delete_from_page_cache(page);
  105. return 0;
  106. }
  107. /*
  108. * This is for invalidate_mapping_pages(). That function can be called at
  109. * any time, and is not supposed to throw away dirty pages. But pages can
  110. * be marked dirty at any time too, so use remove_mapping which safely
  111. * discards clean, unused pages.
  112. *
  113. * Returns non-zero if the page was successfully invalidated.
  114. */
  115. static int
  116. invalidate_complete_page(struct address_space *mapping, struct page *page)
  117. {
  118. int ret;
  119. if (page->mapping != mapping)
  120. return 0;
  121. if (page_has_private(page) && !try_to_release_page(page, 0))
  122. return 0;
  123. clear_page_mlock(page);
  124. ret = remove_mapping(mapping, page);
  125. return ret;
  126. }
  127. int truncate_inode_page(struct address_space *mapping, struct page *page)
  128. {
  129. if (page_mapped(page)) {
  130. unmap_mapping_range(mapping,
  131. (loff_t)page->index << PAGE_CACHE_SHIFT,
  132. PAGE_CACHE_SIZE, 0);
  133. }
  134. return truncate_complete_page(mapping, page);
  135. }
  136. /*
  137. * Used to get rid of pages on hardware memory corruption.
  138. */
  139. int generic_error_remove_page(struct address_space *mapping, struct page *page)
  140. {
  141. if (!mapping)
  142. return -EINVAL;
  143. /*
  144. * Only punch for normal data pages for now.
  145. * Handling other types like directories would need more auditing.
  146. */
  147. if (!S_ISREG(mapping->host->i_mode))
  148. return -EIO;
  149. return truncate_inode_page(mapping, page);
  150. }
  151. EXPORT_SYMBOL(generic_error_remove_page);
  152. /*
  153. * Safely invalidate one page from its pagecache mapping.
  154. * It only drops clean, unused pages. The page must be locked.
  155. *
  156. * Returns 1 if the page is successfully invalidated, otherwise 0.
  157. */
  158. int invalidate_inode_page(struct page *page)
  159. {
  160. struct address_space *mapping = page_mapping(page);
  161. if (!mapping)
  162. return 0;
  163. if (PageDirty(page) || PageWriteback(page))
  164. return 0;
  165. if (page_mapped(page))
  166. return 0;
  167. return invalidate_complete_page(mapping, page);
  168. }
  169. /**
  170. * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
  171. * @mapping: mapping to truncate
  172. * @lstart: offset from which to truncate
  173. * @lend: offset to which to truncate
  174. *
  175. * Truncate the page cache, removing the pages that are between
  176. * specified offsets (and zeroing out partial page
  177. * (if lstart is not page aligned)).
  178. *
  179. * Truncate takes two passes - the first pass is nonblocking. It will not
  180. * block on page locks and it will not block on writeback. The second pass
  181. * will wait. This is to prevent as much IO as possible in the affected region.
  182. * The first pass will remove most pages, so the search cost of the second pass
  183. * is low.
  184. *
  185. * We pass down the cache-hot hint to the page freeing code. Even if the
  186. * mapping is large, it is probably the case that the final pages are the most
  187. * recently touched, and freeing happens in ascending file offset order.
  188. */
  189. void truncate_inode_pages_range(struct address_space *mapping,
  190. loff_t lstart, loff_t lend)
  191. {
  192. const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  193. const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
  194. struct pagevec pvec;
  195. pgoff_t index;
  196. pgoff_t end;
  197. int i;
  198. cleancache_invalidate_inode(mapping);
  199. if (mapping->nrpages == 0)
  200. return;
  201. BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
  202. end = (lend >> PAGE_CACHE_SHIFT);
  203. pagevec_init(&pvec, 0);
  204. index = start;
  205. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  206. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  207. mem_cgroup_uncharge_start();
  208. for (i = 0; i < pagevec_count(&pvec); i++) {
  209. struct page *page = pvec.pages[i];
  210. /* We rely upon deletion not changing page->index */
  211. index = page->index;
  212. if (index > end)
  213. break;
  214. if (!trylock_page(page))
  215. continue;
  216. WARN_ON(page->index != index);
  217. if (PageWriteback(page)) {
  218. unlock_page(page);
  219. continue;
  220. }
  221. truncate_inode_page(mapping, page);
  222. unlock_page(page);
  223. }
  224. pagevec_release(&pvec);
  225. mem_cgroup_uncharge_end();
  226. cond_resched();
  227. index++;
  228. }
  229. if (partial) {
  230. struct page *page = find_lock_page(mapping, start - 1);
  231. if (page) {
  232. wait_on_page_writeback(page);
  233. truncate_partial_page(page, partial);
  234. unlock_page(page);
  235. page_cache_release(page);
  236. }
  237. }
  238. index = start;
  239. for ( ; ; ) {
  240. cond_resched();
  241. if (!pagevec_lookup(&pvec, mapping, index,
  242. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  243. if (index == start)
  244. break;
  245. index = start;
  246. continue;
  247. }
  248. if (index == start && pvec.pages[0]->index > end) {
  249. pagevec_release(&pvec);
  250. break;
  251. }
  252. mem_cgroup_uncharge_start();
  253. for (i = 0; i < pagevec_count(&pvec); i++) {
  254. struct page *page = pvec.pages[i];
  255. /* We rely upon deletion not changing page->index */
  256. index = page->index;
  257. if (index > end)
  258. break;
  259. lock_page(page);
  260. WARN_ON(page->index != index);
  261. wait_on_page_writeback(page);
  262. truncate_inode_page(mapping, page);
  263. unlock_page(page);
  264. }
  265. pagevec_release(&pvec);
  266. mem_cgroup_uncharge_end();
  267. index++;
  268. }
  269. cleancache_invalidate_inode(mapping);
  270. }
  271. EXPORT_SYMBOL(truncate_inode_pages_range);
  272. /**
  273. * truncate_inode_pages - truncate *all* the pages from an offset
  274. * @mapping: mapping to truncate
  275. * @lstart: offset from which to truncate
  276. *
  277. * Called under (and serialised by) inode->i_mutex.
  278. *
  279. * Note: When this function returns, there can be a page in the process of
  280. * deletion (inside __delete_from_page_cache()) in the specified range. Thus
  281. * mapping->nrpages can be non-zero when this function returns even after
  282. * truncation of the whole mapping.
  283. */
  284. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  285. {
  286. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  287. }
  288. EXPORT_SYMBOL(truncate_inode_pages);
  289. /**
  290. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  291. * @mapping: the address_space which holds the pages to invalidate
  292. * @start: the offset 'from' which to invalidate
  293. * @end: the offset 'to' which to invalidate (inclusive)
  294. *
  295. * This function only removes the unlocked pages, if you want to
  296. * remove all the pages of one inode, you must call truncate_inode_pages.
  297. *
  298. * invalidate_mapping_pages() will not block on IO activity. It will not
  299. * invalidate pages which are dirty, locked, under writeback or mapped into
  300. * pagetables.
  301. */
  302. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  303. pgoff_t start, pgoff_t end)
  304. {
  305. struct pagevec pvec;
  306. pgoff_t index = start;
  307. unsigned long ret;
  308. unsigned long count = 0;
  309. int i;
  310. /*
  311. * Note: this function may get called on a shmem/tmpfs mapping:
  312. * pagevec_lookup() might then return 0 prematurely (because it
  313. * got a gangful of swap entries); but it's hardly worth worrying
  314. * about - it can rarely have anything to free from such a mapping
  315. * (most pages are dirty), and already skips over any difficulties.
  316. */
  317. pagevec_init(&pvec, 0);
  318. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  319. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  320. mem_cgroup_uncharge_start();
  321. for (i = 0; i < pagevec_count(&pvec); i++) {
  322. struct page *page = pvec.pages[i];
  323. /* We rely upon deletion not changing page->index */
  324. index = page->index;
  325. if (index > end)
  326. break;
  327. if (!trylock_page(page))
  328. continue;
  329. WARN_ON(page->index != index);
  330. ret = invalidate_inode_page(page);
  331. unlock_page(page);
  332. /*
  333. * Invalidation is a hint that the page is no longer
  334. * of interest and try to speed up its reclaim.
  335. */
  336. if (!ret)
  337. deactivate_page(page);
  338. count += ret;
  339. }
  340. pagevec_release(&pvec);
  341. mem_cgroup_uncharge_end();
  342. cond_resched();
  343. index++;
  344. }
  345. return count;
  346. }
  347. EXPORT_SYMBOL(invalidate_mapping_pages);
  348. /*
  349. * This is like invalidate_complete_page(), except it ignores the page's
  350. * refcount. We do this because invalidate_inode_pages2() needs stronger
  351. * invalidation guarantees, and cannot afford to leave pages behind because
  352. * shrink_page_list() has a temp ref on them, or because they're transiently
  353. * sitting in the lru_cache_add() pagevecs.
  354. */
  355. static int
  356. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  357. {
  358. if (page->mapping != mapping)
  359. return 0;
  360. if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
  361. return 0;
  362. clear_page_mlock(page);
  363. spin_lock_irq(&mapping->tree_lock);
  364. if (PageDirty(page))
  365. goto failed;
  366. BUG_ON(page_has_private(page));
  367. __delete_from_page_cache(page);
  368. spin_unlock_irq(&mapping->tree_lock);
  369. mem_cgroup_uncharge_cache_page(page);
  370. if (mapping->a_ops->freepage)
  371. mapping->a_ops->freepage(page);
  372. page_cache_release(page); /* pagecache ref */
  373. return 1;
  374. failed:
  375. spin_unlock_irq(&mapping->tree_lock);
  376. return 0;
  377. }
  378. static int do_launder_page(struct address_space *mapping, struct page *page)
  379. {
  380. if (!PageDirty(page))
  381. return 0;
  382. if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
  383. return 0;
  384. return mapping->a_ops->launder_page(page);
  385. }
  386. /**
  387. * invalidate_inode_pages2_range - remove range of pages from an address_space
  388. * @mapping: the address_space
  389. * @start: the page offset 'from' which to invalidate
  390. * @end: the page offset 'to' which to invalidate (inclusive)
  391. *
  392. * Any pages which are found to be mapped into pagetables are unmapped prior to
  393. * invalidation.
  394. *
  395. * Returns -EBUSY if any pages could not be invalidated.
  396. */
  397. int invalidate_inode_pages2_range(struct address_space *mapping,
  398. pgoff_t start, pgoff_t end)
  399. {
  400. struct pagevec pvec;
  401. pgoff_t index;
  402. int i;
  403. int ret = 0;
  404. int ret2 = 0;
  405. int did_range_unmap = 0;
  406. cleancache_invalidate_inode(mapping);
  407. pagevec_init(&pvec, 0);
  408. index = start;
  409. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  410. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  411. mem_cgroup_uncharge_start();
  412. for (i = 0; i < pagevec_count(&pvec); i++) {
  413. struct page *page = pvec.pages[i];
  414. /* We rely upon deletion not changing page->index */
  415. index = page->index;
  416. if (index > end)
  417. break;
  418. lock_page(page);
  419. WARN_ON(page->index != index);
  420. if (page->mapping != mapping) {
  421. unlock_page(page);
  422. continue;
  423. }
  424. wait_on_page_writeback(page);
  425. if (page_mapped(page)) {
  426. if (!did_range_unmap) {
  427. /*
  428. * Zap the rest of the file in one hit.
  429. */
  430. unmap_mapping_range(mapping,
  431. (loff_t)index << PAGE_CACHE_SHIFT,
  432. (loff_t)(1 + end - index)
  433. << PAGE_CACHE_SHIFT,
  434. 0);
  435. did_range_unmap = 1;
  436. } else {
  437. /*
  438. * Just zap this page
  439. */
  440. unmap_mapping_range(mapping,
  441. (loff_t)index << PAGE_CACHE_SHIFT,
  442. PAGE_CACHE_SIZE, 0);
  443. }
  444. }
  445. BUG_ON(page_mapped(page));
  446. ret2 = do_launder_page(mapping, page);
  447. if (ret2 == 0) {
  448. if (!invalidate_complete_page2(mapping, page))
  449. ret2 = -EBUSY;
  450. }
  451. if (ret2 < 0)
  452. ret = ret2;
  453. unlock_page(page);
  454. }
  455. pagevec_release(&pvec);
  456. mem_cgroup_uncharge_end();
  457. cond_resched();
  458. index++;
  459. }
  460. cleancache_invalidate_inode(mapping);
  461. return ret;
  462. }
  463. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  464. /**
  465. * invalidate_inode_pages2 - remove all pages from an address_space
  466. * @mapping: the address_space
  467. *
  468. * Any pages which are found to be mapped into pagetables are unmapped prior to
  469. * invalidation.
  470. *
  471. * Returns -EBUSY if any pages could not be invalidated.
  472. */
  473. int invalidate_inode_pages2(struct address_space *mapping)
  474. {
  475. return invalidate_inode_pages2_range(mapping, 0, -1);
  476. }
  477. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
  478. /**
  479. * truncate_pagecache - unmap and remove pagecache that has been truncated
  480. * @inode: inode
  481. * @oldsize: old file size
  482. * @newsize: new file size
  483. *
  484. * inode's new i_size must already be written before truncate_pagecache
  485. * is called.
  486. *
  487. * This function should typically be called before the filesystem
  488. * releases resources associated with the freed range (eg. deallocates
  489. * blocks). This way, pagecache will always stay logically coherent
  490. * with on-disk format, and the filesystem would not have to deal with
  491. * situations such as writepage being called for a page that has already
  492. * had its underlying blocks deallocated.
  493. */
  494. void truncate_pagecache(struct inode *inode, loff_t oldsize, loff_t newsize)
  495. {
  496. struct address_space *mapping = inode->i_mapping;
  497. loff_t holebegin = round_up(newsize, PAGE_SIZE);
  498. /*
  499. * unmap_mapping_range is called twice, first simply for
  500. * efficiency so that truncate_inode_pages does fewer
  501. * single-page unmaps. However after this first call, and
  502. * before truncate_inode_pages finishes, it is possible for
  503. * private pages to be COWed, which remain after
  504. * truncate_inode_pages finishes, hence the second
  505. * unmap_mapping_range call must be made for correctness.
  506. */
  507. unmap_mapping_range(mapping, holebegin, 0, 1);
  508. truncate_inode_pages(mapping, newsize);
  509. unmap_mapping_range(mapping, holebegin, 0, 1);
  510. }
  511. EXPORT_SYMBOL(truncate_pagecache);
  512. /**
  513. * truncate_setsize - update inode and pagecache for a new file size
  514. * @inode: inode
  515. * @newsize: new file size
  516. *
  517. * truncate_setsize updates i_size and performs pagecache truncation (if
  518. * necessary) to @newsize. It will be typically be called from the filesystem's
  519. * setattr function when ATTR_SIZE is passed in.
  520. *
  521. * Must be called with inode_mutex held and before all filesystem specific
  522. * block truncation has been performed.
  523. */
  524. void truncate_setsize(struct inode *inode, loff_t newsize)
  525. {
  526. loff_t oldsize = inode->i_size;
  527. i_size_write(inode, newsize);
  528. if (newsize > oldsize)
  529. pagecache_isize_extended(inode, oldsize, newsize);
  530. truncate_pagecache(inode, oldsize, newsize);
  531. }
  532. EXPORT_SYMBOL(truncate_setsize);
  533. /**
  534. * pagecache_isize_extended - update pagecache after extension of i_size
  535. * @inode: inode for which i_size was extended
  536. * @from: original inode size
  537. * @to: new inode size
  538. *
  539. * Handle extension of inode size either caused by extending truncate or by
  540. * write starting after current i_size. We mark the page straddling current
  541. * i_size RO so that page_mkwrite() is called on the nearest write access to
  542. * the page. This way filesystem can be sure that page_mkwrite() is called on
  543. * the page before user writes to the page via mmap after the i_size has been
  544. * changed.
  545. *
  546. * The function must be called after i_size is updated so that page fault
  547. * coming after we unlock the page will already see the new i_size.
  548. * The function must be called while we still hold i_mutex - this not only
  549. * makes sure i_size is stable but also that userspace cannot observe new
  550. * i_size value before we are prepared to store mmap writes at new inode size.
  551. */
  552. void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
  553. {
  554. int bsize = 1 << inode->i_blkbits;
  555. loff_t rounded_from;
  556. struct page *page;
  557. pgoff_t index;
  558. WARN_ON(to > inode->i_size);
  559. if (from >= to || bsize == PAGE_CACHE_SIZE)
  560. return;
  561. /* Page straddling @from will not have any hole block created? */
  562. rounded_from = round_up(from, bsize);
  563. if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
  564. return;
  565. index = from >> PAGE_CACHE_SHIFT;
  566. page = find_lock_page(inode->i_mapping, index);
  567. /* Page not cached? Nothing to do */
  568. if (!page)
  569. return;
  570. /*
  571. * See clear_page_dirty_for_io() for details why set_page_dirty()
  572. * is needed.
  573. */
  574. if (page_mkclean(page))
  575. set_page_dirty(page);
  576. unlock_page(page);
  577. page_cache_release(page);
  578. }
  579. EXPORT_SYMBOL(pagecache_isize_extended);
  580. /**
  581. * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  582. * @inode: inode
  583. * @lstart: offset of beginning of hole
  584. * vmtruncate - unmap mappings "freed" by truncate() syscall
  585. * @inode: inode of the file used
  586. * @newsize: file offset to start truncating
  587. *
  588. * This function is deprecated and truncate_setsize or truncate_pagecache
  589. * should be used instead, together with filesystem specific block truncation.
  590. */
  591. int vmtruncate(struct inode *inode, loff_t newsize)
  592. {
  593. int error;
  594. error = inode_newsize_ok(inode, newsize);
  595. if (error)
  596. return error;
  597. truncate_setsize(inode, newsize);
  598. if (inode->i_op->truncate)
  599. inode->i_op->truncate(inode);
  600. return 0;
  601. }
  602. EXPORT_SYMBOL(vmtruncate);
  603. /**
  604. * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  605. * @inode: inode
  606. * @lstart: offset of beginning of hole
  607. * @lend: offset of last byte of hole
  608. *
  609. * This function should typically be called before the filesystem
  610. * releases resources associated with the freed range (eg. deallocates
  611. * blocks). This way, pagecache will always stay logically coherent
  612. * with on-disk format, and the filesystem would not have to deal with
  613. * situations such as writepage being called for a page that has already
  614. * had its underlying blocks deallocated.
  615. */
  616. void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
  617. {
  618. struct address_space *mapping = inode->i_mapping;
  619. loff_t unmap_start = round_up(lstart, PAGE_SIZE);
  620. loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
  621. /*
  622. * This rounding is currently just for example: unmap_mapping_range
  623. * expands its hole outwards, whereas we want it to contract the hole
  624. * inwards. However, existing callers of truncate_pagecache_range are
  625. * doing their own page rounding first; and truncate_inode_pages_range
  626. * currently BUGs if lend is not pagealigned-1 (it handles partial
  627. * page at start of hole, but not partial page at end of hole). Note
  628. * unmap_mapping_range allows holelen 0 for all, and we allow lend -1.
  629. */
  630. /*
  631. * Unlike in truncate_pagecache, unmap_mapping_range is called only
  632. * once (before truncating pagecache), and without "even_cows" flag:
  633. * hole-punching should not remove private COWed pages from the hole.
  634. */
  635. if ((u64)unmap_end > (u64)unmap_start)
  636. unmap_mapping_range(mapping, unmap_start,
  637. 1 + unmap_end - unmap_start, 0);
  638. truncate_inode_pages_range(mapping, lstart, lend);
  639. }
  640. EXPORT_SYMBOL(truncate_pagecache_range);