page.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * decide whether a page can be released, possibly by cancelling a store to it
  41. * - we're allowed to sleep if __GFP_WAIT is flagged
  42. */
  43. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  44. struct page *page,
  45. gfp_t gfp)
  46. {
  47. struct page *xpage;
  48. void *val;
  49. _enter("%p,%p,%x", cookie, page, gfp);
  50. rcu_read_lock();
  51. val = radix_tree_lookup(&cookie->stores, page->index);
  52. if (!val) {
  53. rcu_read_unlock();
  54. fscache_stat(&fscache_n_store_vmscan_not_storing);
  55. __fscache_uncache_page(cookie, page);
  56. return true;
  57. }
  58. /* see if the page is actually undergoing storage - if so we can't get
  59. * rid of it till the cache has finished with it */
  60. if (radix_tree_tag_get(&cookie->stores, page->index,
  61. FSCACHE_COOKIE_STORING_TAG)) {
  62. rcu_read_unlock();
  63. goto page_busy;
  64. }
  65. /* the page is pending storage, so we attempt to cancel the store and
  66. * discard the store request so that the page can be reclaimed */
  67. spin_lock(&cookie->stores_lock);
  68. rcu_read_unlock();
  69. if (radix_tree_tag_get(&cookie->stores, page->index,
  70. FSCACHE_COOKIE_STORING_TAG)) {
  71. /* the page started to undergo storage whilst we were looking,
  72. * so now we can only wait or return */
  73. spin_unlock(&cookie->stores_lock);
  74. goto page_busy;
  75. }
  76. xpage = radix_tree_delete(&cookie->stores, page->index);
  77. spin_unlock(&cookie->stores_lock);
  78. if (xpage) {
  79. fscache_stat(&fscache_n_store_vmscan_cancelled);
  80. fscache_stat(&fscache_n_store_radix_deletes);
  81. ASSERTCMP(xpage, ==, page);
  82. } else {
  83. fscache_stat(&fscache_n_store_vmscan_gone);
  84. }
  85. wake_up_bit(&cookie->flags, 0);
  86. if (xpage)
  87. page_cache_release(xpage);
  88. __fscache_uncache_page(cookie, page);
  89. return true;
  90. page_busy:
  91. /* we might want to wait here, but that could deadlock the allocator as
  92. * the work threads writing to the cache may all end up sleeping
  93. * on memory allocation */
  94. fscache_stat(&fscache_n_store_vmscan_busy);
  95. return false;
  96. }
  97. EXPORT_SYMBOL(__fscache_maybe_release_page);
  98. /*
  99. * note that a page has finished being written to the cache
  100. */
  101. static void fscache_end_page_write(struct fscache_object *object,
  102. struct page *page)
  103. {
  104. struct fscache_cookie *cookie;
  105. struct page *xpage = NULL;
  106. spin_lock(&object->lock);
  107. cookie = object->cookie;
  108. if (cookie) {
  109. /* delete the page from the tree if it is now no longer
  110. * pending */
  111. spin_lock(&cookie->stores_lock);
  112. radix_tree_tag_clear(&cookie->stores, page->index,
  113. FSCACHE_COOKIE_STORING_TAG);
  114. if (!radix_tree_tag_get(&cookie->stores, page->index,
  115. FSCACHE_COOKIE_PENDING_TAG)) {
  116. fscache_stat(&fscache_n_store_radix_deletes);
  117. xpage = radix_tree_delete(&cookie->stores, page->index);
  118. }
  119. spin_unlock(&cookie->stores_lock);
  120. wake_up_bit(&cookie->flags, 0);
  121. }
  122. spin_unlock(&object->lock);
  123. if (xpage)
  124. page_cache_release(xpage);
  125. }
  126. /*
  127. * actually apply the changed attributes to a cache object
  128. */
  129. static void fscache_attr_changed_op(struct fscache_operation *op)
  130. {
  131. struct fscache_object *object = op->object;
  132. int ret;
  133. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  134. fscache_stat(&fscache_n_attr_changed_calls);
  135. if (fscache_object_is_active(object)) {
  136. fscache_stat(&fscache_n_cop_attr_changed);
  137. ret = object->cache->ops->attr_changed(object);
  138. fscache_stat_d(&fscache_n_cop_attr_changed);
  139. if (ret < 0)
  140. fscache_abort_object(object);
  141. }
  142. fscache_op_complete(op);
  143. _leave("");
  144. }
  145. /*
  146. * notification that the attributes on an object have changed
  147. */
  148. int __fscache_attr_changed(struct fscache_cookie *cookie)
  149. {
  150. struct fscache_operation *op;
  151. struct fscache_object *object;
  152. _enter("%p", cookie);
  153. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  154. fscache_stat(&fscache_n_attr_changed);
  155. op = kzalloc(sizeof(*op), GFP_KERNEL);
  156. if (!op) {
  157. fscache_stat(&fscache_n_attr_changed_nomem);
  158. _leave(" = -ENOMEM");
  159. return -ENOMEM;
  160. }
  161. fscache_operation_init(op, fscache_attr_changed_op, NULL);
  162. op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
  163. spin_lock(&cookie->lock);
  164. if (hlist_empty(&cookie->backing_objects))
  165. goto nobufs;
  166. object = hlist_entry(cookie->backing_objects.first,
  167. struct fscache_object, cookie_link);
  168. if (fscache_submit_exclusive_op(object, op) < 0)
  169. goto nobufs;
  170. spin_unlock(&cookie->lock);
  171. fscache_stat(&fscache_n_attr_changed_ok);
  172. fscache_put_operation(op);
  173. _leave(" = 0");
  174. return 0;
  175. nobufs:
  176. spin_unlock(&cookie->lock);
  177. kfree(op);
  178. fscache_stat(&fscache_n_attr_changed_nobufs);
  179. _leave(" = %d", -ENOBUFS);
  180. return -ENOBUFS;
  181. }
  182. EXPORT_SYMBOL(__fscache_attr_changed);
  183. /*
  184. * release a retrieval op reference
  185. */
  186. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  187. {
  188. struct fscache_retrieval *op =
  189. container_of(_op, struct fscache_retrieval, op);
  190. _enter("{OP%x}", op->op.debug_id);
  191. ASSERTCMP(op->n_pages, ==, 0);
  192. fscache_hist(fscache_retrieval_histogram, op->start_time);
  193. if (op->context)
  194. fscache_put_context(op->op.object->cookie, op->context);
  195. _leave("");
  196. }
  197. /*
  198. * allocate a retrieval op
  199. */
  200. static struct fscache_retrieval *fscache_alloc_retrieval(
  201. struct address_space *mapping,
  202. fscache_rw_complete_t end_io_func,
  203. void *context)
  204. {
  205. struct fscache_retrieval *op;
  206. /* allocate a retrieval operation and attempt to submit it */
  207. op = kzalloc(sizeof(*op), GFP_NOIO);
  208. if (!op) {
  209. fscache_stat(&fscache_n_retrievals_nomem);
  210. return NULL;
  211. }
  212. fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
  213. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  214. op->mapping = mapping;
  215. op->end_io_func = end_io_func;
  216. op->context = context;
  217. op->start_time = jiffies;
  218. INIT_LIST_HEAD(&op->to_do);
  219. return op;
  220. }
  221. /*
  222. * wait for a deferred lookup to complete
  223. */
  224. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  225. {
  226. unsigned long jif;
  227. _enter("");
  228. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  229. _leave(" = 0 [imm]");
  230. return 0;
  231. }
  232. fscache_stat(&fscache_n_retrievals_wait);
  233. jif = jiffies;
  234. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  235. fscache_wait_bit_interruptible,
  236. TASK_INTERRUPTIBLE) != 0) {
  237. fscache_stat(&fscache_n_retrievals_intr);
  238. _leave(" = -ERESTARTSYS");
  239. return -ERESTARTSYS;
  240. }
  241. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  242. smp_rmb();
  243. fscache_hist(fscache_retrieval_delay_histogram, jif);
  244. _leave(" = 0 [dly]");
  245. return 0;
  246. }
  247. /*
  248. * wait for an object to become active (or dead)
  249. */
  250. static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
  251. struct fscache_retrieval *op,
  252. atomic_t *stat_op_waits,
  253. atomic_t *stat_object_dead)
  254. {
  255. int ret;
  256. if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
  257. goto check_if_dead;
  258. _debug(">>> WT");
  259. fscache_stat(stat_op_waits);
  260. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  261. fscache_wait_bit_interruptible,
  262. TASK_INTERRUPTIBLE) < 0) {
  263. ret = fscache_cancel_op(&op->op);
  264. if (ret == 0)
  265. return -ERESTARTSYS;
  266. /* it's been removed from the pending queue by another party,
  267. * so we should get to run shortly */
  268. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  269. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  270. }
  271. _debug("<<< GO");
  272. check_if_dead:
  273. if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
  274. fscache_stat(stat_object_dead);
  275. _leave(" = -ENOBUFS [cancelled]");
  276. return -ENOBUFS;
  277. }
  278. if (unlikely(fscache_object_is_dead(object))) {
  279. fscache_stat(stat_object_dead);
  280. return -ENOBUFS;
  281. }
  282. return 0;
  283. }
  284. /*
  285. * read a page from the cache or allocate a block in which to store it
  286. * - we return:
  287. * -ENOMEM - out of memory, nothing done
  288. * -ERESTARTSYS - interrupted
  289. * -ENOBUFS - no backing object available in which to cache the block
  290. * -ENODATA - no data available in the backing object for this block
  291. * 0 - dispatched a read - it'll call end_io_func() when finished
  292. */
  293. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  294. struct page *page,
  295. fscache_rw_complete_t end_io_func,
  296. void *context,
  297. gfp_t gfp)
  298. {
  299. struct fscache_retrieval *op;
  300. struct fscache_object *object;
  301. int ret;
  302. _enter("%p,%p,,,", cookie, page);
  303. fscache_stat(&fscache_n_retrievals);
  304. if (hlist_empty(&cookie->backing_objects))
  305. goto nobufs;
  306. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  307. ASSERTCMP(page, !=, NULL);
  308. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  309. return -ERESTARTSYS;
  310. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  311. if (!op) {
  312. _leave(" = -ENOMEM");
  313. return -ENOMEM;
  314. }
  315. op->n_pages = 1;
  316. spin_lock(&cookie->lock);
  317. if (hlist_empty(&cookie->backing_objects))
  318. goto nobufs_unlock;
  319. object = hlist_entry(cookie->backing_objects.first,
  320. struct fscache_object, cookie_link);
  321. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  322. atomic_inc(&object->n_reads);
  323. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  324. if (fscache_submit_op(object, &op->op) < 0)
  325. goto nobufs_unlock_dec;
  326. spin_unlock(&cookie->lock);
  327. fscache_stat(&fscache_n_retrieval_ops);
  328. /* pin the netfs read context in case we need to do the actual netfs
  329. * read because we've encountered a cache read failure */
  330. fscache_get_context(object->cookie, op->context);
  331. /* we wait for the operation to become active, and then process it
  332. * *here*, in this thread, and not in the thread pool */
  333. ret = fscache_wait_for_retrieval_activation(
  334. object, op,
  335. __fscache_stat(&fscache_n_retrieval_op_waits),
  336. __fscache_stat(&fscache_n_retrievals_object_dead));
  337. if (ret < 0)
  338. goto error;
  339. /* ask the cache to honour the operation */
  340. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  341. fscache_stat(&fscache_n_cop_allocate_page);
  342. ret = object->cache->ops->allocate_page(op, page, gfp);
  343. fscache_stat_d(&fscache_n_cop_allocate_page);
  344. if (ret == 0)
  345. ret = -ENODATA;
  346. } else {
  347. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  348. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  349. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  350. }
  351. error:
  352. if (ret == -ENOMEM)
  353. fscache_stat(&fscache_n_retrievals_nomem);
  354. else if (ret == -ERESTARTSYS)
  355. fscache_stat(&fscache_n_retrievals_intr);
  356. else if (ret == -ENODATA)
  357. fscache_stat(&fscache_n_retrievals_nodata);
  358. else if (ret < 0)
  359. fscache_stat(&fscache_n_retrievals_nobufs);
  360. else
  361. fscache_stat(&fscache_n_retrievals_ok);
  362. fscache_put_retrieval(op);
  363. _leave(" = %d", ret);
  364. return ret;
  365. nobufs_unlock_dec:
  366. atomic_dec(&object->n_reads);
  367. nobufs_unlock:
  368. spin_unlock(&cookie->lock);
  369. kfree(op);
  370. nobufs:
  371. fscache_stat(&fscache_n_retrievals_nobufs);
  372. _leave(" = -ENOBUFS");
  373. return -ENOBUFS;
  374. }
  375. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  376. /*
  377. * read a list of page from the cache or allocate a block in which to store
  378. * them
  379. * - we return:
  380. * -ENOMEM - out of memory, some pages may be being read
  381. * -ERESTARTSYS - interrupted, some pages may be being read
  382. * -ENOBUFS - no backing object or space available in which to cache any
  383. * pages not being read
  384. * -ENODATA - no data available in the backing object for some or all of
  385. * the pages
  386. * 0 - dispatched a read on all pages
  387. *
  388. * end_io_func() will be called for each page read from the cache as it is
  389. * finishes being read
  390. *
  391. * any pages for which a read is dispatched will be removed from pages and
  392. * nr_pages
  393. */
  394. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  395. struct address_space *mapping,
  396. struct list_head *pages,
  397. unsigned *nr_pages,
  398. fscache_rw_complete_t end_io_func,
  399. void *context,
  400. gfp_t gfp)
  401. {
  402. struct fscache_retrieval *op;
  403. struct fscache_object *object;
  404. int ret;
  405. _enter("%p,,%d,,,", cookie, *nr_pages);
  406. fscache_stat(&fscache_n_retrievals);
  407. if (hlist_empty(&cookie->backing_objects))
  408. goto nobufs;
  409. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  410. ASSERTCMP(*nr_pages, >, 0);
  411. ASSERT(!list_empty(pages));
  412. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  413. return -ERESTARTSYS;
  414. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  415. if (!op)
  416. return -ENOMEM;
  417. op->n_pages = *nr_pages;
  418. spin_lock(&cookie->lock);
  419. if (hlist_empty(&cookie->backing_objects))
  420. goto nobufs_unlock;
  421. object = hlist_entry(cookie->backing_objects.first,
  422. struct fscache_object, cookie_link);
  423. atomic_inc(&object->n_reads);
  424. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  425. if (fscache_submit_op(object, &op->op) < 0)
  426. goto nobufs_unlock_dec;
  427. spin_unlock(&cookie->lock);
  428. fscache_stat(&fscache_n_retrieval_ops);
  429. /* pin the netfs read context in case we need to do the actual netfs
  430. * read because we've encountered a cache read failure */
  431. fscache_get_context(object->cookie, op->context);
  432. /* we wait for the operation to become active, and then process it
  433. * *here*, in this thread, and not in the thread pool */
  434. ret = fscache_wait_for_retrieval_activation(
  435. object, op,
  436. __fscache_stat(&fscache_n_retrieval_op_waits),
  437. __fscache_stat(&fscache_n_retrievals_object_dead));
  438. if (ret < 0)
  439. goto error;
  440. /* ask the cache to honour the operation */
  441. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  442. fscache_stat(&fscache_n_cop_allocate_pages);
  443. ret = object->cache->ops->allocate_pages(
  444. op, pages, nr_pages, gfp);
  445. fscache_stat_d(&fscache_n_cop_allocate_pages);
  446. } else {
  447. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  448. ret = object->cache->ops->read_or_alloc_pages(
  449. op, pages, nr_pages, gfp);
  450. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  451. }
  452. error:
  453. if (ret == -ENOMEM)
  454. fscache_stat(&fscache_n_retrievals_nomem);
  455. else if (ret == -ERESTARTSYS)
  456. fscache_stat(&fscache_n_retrievals_intr);
  457. else if (ret == -ENODATA)
  458. fscache_stat(&fscache_n_retrievals_nodata);
  459. else if (ret < 0)
  460. fscache_stat(&fscache_n_retrievals_nobufs);
  461. else
  462. fscache_stat(&fscache_n_retrievals_ok);
  463. fscache_put_retrieval(op);
  464. _leave(" = %d", ret);
  465. return ret;
  466. nobufs_unlock_dec:
  467. atomic_dec(&object->n_reads);
  468. nobufs_unlock:
  469. spin_unlock(&cookie->lock);
  470. kfree(op);
  471. nobufs:
  472. fscache_stat(&fscache_n_retrievals_nobufs);
  473. _leave(" = -ENOBUFS");
  474. return -ENOBUFS;
  475. }
  476. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  477. /*
  478. * allocate a block in the cache on which to store a page
  479. * - we return:
  480. * -ENOMEM - out of memory, nothing done
  481. * -ERESTARTSYS - interrupted
  482. * -ENOBUFS - no backing object available in which to cache the block
  483. * 0 - block allocated
  484. */
  485. int __fscache_alloc_page(struct fscache_cookie *cookie,
  486. struct page *page,
  487. gfp_t gfp)
  488. {
  489. struct fscache_retrieval *op;
  490. struct fscache_object *object;
  491. int ret;
  492. _enter("%p,%p,,,", cookie, page);
  493. fscache_stat(&fscache_n_allocs);
  494. if (hlist_empty(&cookie->backing_objects))
  495. goto nobufs;
  496. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  497. ASSERTCMP(page, !=, NULL);
  498. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  499. return -ERESTARTSYS;
  500. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  501. if (!op)
  502. return -ENOMEM;
  503. op->n_pages = 1;
  504. spin_lock(&cookie->lock);
  505. if (hlist_empty(&cookie->backing_objects))
  506. goto nobufs_unlock;
  507. object = hlist_entry(cookie->backing_objects.first,
  508. struct fscache_object, cookie_link);
  509. if (fscache_submit_op(object, &op->op) < 0)
  510. goto nobufs_unlock;
  511. spin_unlock(&cookie->lock);
  512. fscache_stat(&fscache_n_alloc_ops);
  513. ret = fscache_wait_for_retrieval_activation(
  514. object, op,
  515. __fscache_stat(&fscache_n_alloc_op_waits),
  516. __fscache_stat(&fscache_n_allocs_object_dead));
  517. if (ret < 0)
  518. goto error;
  519. /* ask the cache to honour the operation */
  520. fscache_stat(&fscache_n_cop_allocate_page);
  521. ret = object->cache->ops->allocate_page(op, page, gfp);
  522. fscache_stat_d(&fscache_n_cop_allocate_page);
  523. error:
  524. if (ret == -ERESTARTSYS)
  525. fscache_stat(&fscache_n_allocs_intr);
  526. else if (ret < 0)
  527. fscache_stat(&fscache_n_allocs_nobufs);
  528. else
  529. fscache_stat(&fscache_n_allocs_ok);
  530. fscache_put_retrieval(op);
  531. _leave(" = %d", ret);
  532. return ret;
  533. nobufs_unlock:
  534. spin_unlock(&cookie->lock);
  535. kfree(op);
  536. nobufs:
  537. fscache_stat(&fscache_n_allocs_nobufs);
  538. _leave(" = -ENOBUFS");
  539. return -ENOBUFS;
  540. }
  541. EXPORT_SYMBOL(__fscache_alloc_page);
  542. /*
  543. * release a write op reference
  544. */
  545. static void fscache_release_write_op(struct fscache_operation *_op)
  546. {
  547. _enter("{OP%x}", _op->debug_id);
  548. }
  549. /*
  550. * perform the background storage of a page into the cache
  551. */
  552. static void fscache_write_op(struct fscache_operation *_op)
  553. {
  554. struct fscache_storage *op =
  555. container_of(_op, struct fscache_storage, op);
  556. struct fscache_object *object = op->op.object;
  557. struct fscache_cookie *cookie;
  558. struct page *page;
  559. unsigned n;
  560. void *results[1];
  561. int ret;
  562. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  563. spin_lock(&object->lock);
  564. cookie = object->cookie;
  565. if (!fscache_object_is_active(object) || !cookie) {
  566. spin_unlock(&object->lock);
  567. _leave("");
  568. return;
  569. }
  570. spin_lock(&cookie->stores_lock);
  571. fscache_stat(&fscache_n_store_calls);
  572. /* find a page to store */
  573. page = NULL;
  574. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  575. FSCACHE_COOKIE_PENDING_TAG);
  576. if (n != 1)
  577. goto superseded;
  578. page = results[0];
  579. _debug("gang %d [%lx]", n, page->index);
  580. if (page->index >= op->store_limit) {
  581. fscache_stat(&fscache_n_store_pages_over_limit);
  582. goto superseded;
  583. }
  584. radix_tree_tag_set(&cookie->stores, page->index,
  585. FSCACHE_COOKIE_STORING_TAG);
  586. radix_tree_tag_clear(&cookie->stores, page->index,
  587. FSCACHE_COOKIE_PENDING_TAG);
  588. spin_unlock(&cookie->stores_lock);
  589. spin_unlock(&object->lock);
  590. fscache_stat(&fscache_n_store_pages);
  591. fscache_stat(&fscache_n_cop_write_page);
  592. ret = object->cache->ops->write_page(op, page);
  593. fscache_stat_d(&fscache_n_cop_write_page);
  594. fscache_end_page_write(object, page);
  595. if (ret < 0) {
  596. fscache_abort_object(object);
  597. fscache_op_complete(&op->op);
  598. } else {
  599. fscache_enqueue_operation(&op->op);
  600. }
  601. _leave("");
  602. return;
  603. superseded:
  604. /* this writer is going away and there aren't any more things to
  605. * write */
  606. _debug("cease");
  607. spin_unlock(&cookie->stores_lock);
  608. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  609. spin_unlock(&object->lock);
  610. fscache_op_complete(&op->op);
  611. _leave("");
  612. }
  613. /*
  614. * request a page be stored in the cache
  615. * - returns:
  616. * -ENOMEM - out of memory, nothing done
  617. * -ENOBUFS - no backing object available in which to cache the page
  618. * 0 - dispatched a write - it'll call end_io_func() when finished
  619. *
  620. * if the cookie still has a backing object at this point, that object can be
  621. * in one of a few states with respect to storage processing:
  622. *
  623. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  624. * set)
  625. *
  626. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  627. * fill op)
  628. *
  629. * (b) writes deferred till post-creation (mark page for writing and
  630. * return immediately)
  631. *
  632. * (2) negative lookup, object created, initial fill being made from netfs
  633. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  634. *
  635. * (a) fill point not yet reached this page (mark page for writing and
  636. * return)
  637. *
  638. * (b) fill point passed this page (queue op to store this page)
  639. *
  640. * (3) object extant (queue op to store this page)
  641. *
  642. * any other state is invalid
  643. */
  644. int __fscache_write_page(struct fscache_cookie *cookie,
  645. struct page *page,
  646. gfp_t gfp)
  647. {
  648. struct fscache_storage *op;
  649. struct fscache_object *object;
  650. int ret;
  651. _enter("%p,%x,", cookie, (u32) page->flags);
  652. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  653. ASSERT(PageFsCache(page));
  654. fscache_stat(&fscache_n_stores);
  655. op = kzalloc(sizeof(*op), GFP_NOIO);
  656. if (!op)
  657. goto nomem;
  658. fscache_operation_init(&op->op, fscache_write_op,
  659. fscache_release_write_op);
  660. op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
  661. ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
  662. if (ret < 0)
  663. goto nomem_free;
  664. ret = -ENOBUFS;
  665. spin_lock(&cookie->lock);
  666. if (hlist_empty(&cookie->backing_objects))
  667. goto nobufs;
  668. object = hlist_entry(cookie->backing_objects.first,
  669. struct fscache_object, cookie_link);
  670. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  671. goto nobufs;
  672. /* add the page to the pending-storage radix tree on the backing
  673. * object */
  674. spin_lock(&object->lock);
  675. spin_lock(&cookie->stores_lock);
  676. _debug("store limit %llx", (unsigned long long) object->store_limit);
  677. ret = radix_tree_insert(&cookie->stores, page->index, page);
  678. if (ret < 0) {
  679. if (ret == -EEXIST)
  680. goto already_queued;
  681. _debug("insert failed %d", ret);
  682. goto nobufs_unlock_obj;
  683. }
  684. radix_tree_tag_set(&cookie->stores, page->index,
  685. FSCACHE_COOKIE_PENDING_TAG);
  686. page_cache_get(page);
  687. /* we only want one writer at a time, but we do need to queue new
  688. * writers after exclusive ops */
  689. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  690. goto already_pending;
  691. spin_unlock(&cookie->stores_lock);
  692. spin_unlock(&object->lock);
  693. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  694. op->store_limit = object->store_limit;
  695. if (fscache_submit_op(object, &op->op) < 0)
  696. goto submit_failed;
  697. spin_unlock(&cookie->lock);
  698. radix_tree_preload_end();
  699. fscache_stat(&fscache_n_store_ops);
  700. fscache_stat(&fscache_n_stores_ok);
  701. /* the work queue now carries its own ref on the object */
  702. fscache_put_operation(&op->op);
  703. _leave(" = 0");
  704. return 0;
  705. already_queued:
  706. fscache_stat(&fscache_n_stores_again);
  707. already_pending:
  708. spin_unlock(&cookie->stores_lock);
  709. spin_unlock(&object->lock);
  710. spin_unlock(&cookie->lock);
  711. radix_tree_preload_end();
  712. kfree(op);
  713. fscache_stat(&fscache_n_stores_ok);
  714. _leave(" = 0");
  715. return 0;
  716. submit_failed:
  717. spin_lock(&cookie->stores_lock);
  718. radix_tree_delete(&cookie->stores, page->index);
  719. spin_unlock(&cookie->stores_lock);
  720. page_cache_release(page);
  721. ret = -ENOBUFS;
  722. goto nobufs;
  723. nobufs_unlock_obj:
  724. spin_unlock(&cookie->stores_lock);
  725. spin_unlock(&object->lock);
  726. nobufs:
  727. spin_unlock(&cookie->lock);
  728. radix_tree_preload_end();
  729. kfree(op);
  730. fscache_stat(&fscache_n_stores_nobufs);
  731. _leave(" = -ENOBUFS");
  732. return -ENOBUFS;
  733. nomem_free:
  734. kfree(op);
  735. nomem:
  736. fscache_stat(&fscache_n_stores_oom);
  737. _leave(" = -ENOMEM");
  738. return -ENOMEM;
  739. }
  740. EXPORT_SYMBOL(__fscache_write_page);
  741. /*
  742. * remove a page from the cache
  743. */
  744. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  745. {
  746. struct fscache_object *object;
  747. _enter(",%p", page);
  748. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  749. ASSERTCMP(page, !=, NULL);
  750. fscache_stat(&fscache_n_uncaches);
  751. /* cache withdrawal may beat us to it */
  752. if (!PageFsCache(page))
  753. goto done;
  754. /* get the object */
  755. spin_lock(&cookie->lock);
  756. if (hlist_empty(&cookie->backing_objects)) {
  757. ClearPageFsCache(page);
  758. goto done_unlock;
  759. }
  760. object = hlist_entry(cookie->backing_objects.first,
  761. struct fscache_object, cookie_link);
  762. /* there might now be stuff on disk we could read */
  763. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  764. /* only invoke the cache backend if we managed to mark the page
  765. * uncached here; this deals with synchronisation vs withdrawal */
  766. if (TestClearPageFsCache(page) &&
  767. object->cache->ops->uncache_page) {
  768. /* the cache backend releases the cookie lock */
  769. fscache_stat(&fscache_n_cop_uncache_page);
  770. object->cache->ops->uncache_page(object, page);
  771. fscache_stat_d(&fscache_n_cop_uncache_page);
  772. goto done;
  773. }
  774. done_unlock:
  775. spin_unlock(&cookie->lock);
  776. done:
  777. _leave("");
  778. }
  779. EXPORT_SYMBOL(__fscache_uncache_page);
  780. /**
  781. * fscache_mark_pages_cached - Mark pages as being cached
  782. * @op: The retrieval op pages are being marked for
  783. * @pagevec: The pages to be marked
  784. *
  785. * Mark a bunch of netfs pages as being cached. After this is called,
  786. * the netfs must call fscache_uncache_page() to remove the mark.
  787. */
  788. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  789. struct pagevec *pagevec)
  790. {
  791. struct fscache_cookie *cookie = op->op.object->cookie;
  792. unsigned long loop;
  793. #ifdef CONFIG_FSCACHE_STATS
  794. atomic_add(pagevec->nr, &fscache_n_marks);
  795. #endif
  796. for (loop = 0; loop < pagevec->nr; loop++) {
  797. struct page *page = pagevec->pages[loop];
  798. _debug("- mark %p{%lx}", page, page->index);
  799. if (TestSetPageFsCache(page)) {
  800. static bool once_only;
  801. if (!once_only) {
  802. once_only = true;
  803. printk(KERN_WARNING "FS-Cache:"
  804. " Cookie type %s marked page %lx"
  805. " multiple times\n",
  806. cookie->def->name, page->index);
  807. }
  808. }
  809. }
  810. if (cookie->def->mark_pages_cached)
  811. cookie->def->mark_pages_cached(cookie->netfs_data,
  812. op->mapping, pagevec);
  813. pagevec_reinit(pagevec);
  814. }
  815. EXPORT_SYMBOL(fscache_mark_pages_cached);
  816. /*
  817. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  818. * to be associated with the given cookie.
  819. */
  820. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  821. struct inode *inode)
  822. {
  823. struct address_space *mapping = inode->i_mapping;
  824. struct pagevec pvec;
  825. pgoff_t next;
  826. int i;
  827. _enter("%p,%p", cookie, inode);
  828. if (!mapping || mapping->nrpages == 0) {
  829. _leave(" [no pages]");
  830. return;
  831. }
  832. pagevec_init(&pvec, 0);
  833. next = 0;
  834. do {
  835. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  836. break;
  837. for (i = 0; i < pagevec_count(&pvec); i++) {
  838. struct page *page = pvec.pages[i];
  839. next = page->index;
  840. if (PageFsCache(page)) {
  841. __fscache_wait_on_page_write(cookie, page);
  842. __fscache_uncache_page(cookie, page);
  843. }
  844. }
  845. pagevec_release(&pvec);
  846. cond_resched();
  847. } while (++next);
  848. _leave("");
  849. }
  850. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);