read.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * linux/fs/nfs/read.c
  3. *
  4. * Block I/O for NFS
  5. *
  6. * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  7. * modified for async RPC by okir@monad.swb.de
  8. */
  9. #include <linux/time.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/fcntl.h>
  13. #include <linux/stat.h>
  14. #include <linux/mm.h>
  15. #include <linux/slab.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/sunrpc/clnt.h>
  18. #include <linux/nfs_fs.h>
  19. #include <linux/nfs_page.h>
  20. #include <linux/module.h>
  21. #include "pnfs.h"
  22. #include "nfs4_fs.h"
  23. #include "internal.h"
  24. #include "iostat.h"
  25. #include "fscache.h"
  26. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  27. static const struct nfs_pageio_ops nfs_pageio_read_ops;
  28. static const struct rpc_call_ops nfs_read_partial_ops;
  29. static const struct rpc_call_ops nfs_read_full_ops;
  30. static struct kmem_cache *nfs_rdata_cachep;
  31. struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
  32. {
  33. struct nfs_read_data *p;
  34. p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
  35. if (p) {
  36. INIT_LIST_HEAD(&p->pages);
  37. p->npages = pagecount;
  38. if (pagecount <= ARRAY_SIZE(p->page_array))
  39. p->pagevec = p->page_array;
  40. else {
  41. p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
  42. if (!p->pagevec) {
  43. kmem_cache_free(nfs_rdata_cachep, p);
  44. p = NULL;
  45. }
  46. }
  47. }
  48. return p;
  49. }
  50. void nfs_readdata_free(struct nfs_read_data *p)
  51. {
  52. if (p && (p->pagevec != &p->page_array[0]))
  53. kfree(p->pagevec);
  54. kmem_cache_free(nfs_rdata_cachep, p);
  55. }
  56. void nfs_readdata_release(struct nfs_read_data *rdata)
  57. {
  58. put_nfs_open_context(rdata->args.context);
  59. nfs_readdata_free(rdata);
  60. }
  61. static
  62. int nfs_return_empty_page(struct page *page)
  63. {
  64. zero_user(page, 0, PAGE_CACHE_SIZE);
  65. SetPageUptodate(page);
  66. unlock_page(page);
  67. return 0;
  68. }
  69. static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
  70. {
  71. unsigned int remainder = data->args.count - data->res.count;
  72. unsigned int base = data->args.pgbase + data->res.count;
  73. unsigned int pglen;
  74. struct page **pages;
  75. if (data->res.eof == 0 || remainder == 0)
  76. return;
  77. /*
  78. * Note: "remainder" can never be negative, since we check for
  79. * this in the XDR code.
  80. */
  81. pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
  82. base &= ~PAGE_CACHE_MASK;
  83. pglen = PAGE_CACHE_SIZE - base;
  84. for (;;) {
  85. if (remainder <= pglen) {
  86. zero_user(*pages, base, remainder);
  87. break;
  88. }
  89. zero_user(*pages, base, pglen);
  90. pages++;
  91. remainder -= pglen;
  92. pglen = PAGE_CACHE_SIZE;
  93. base = 0;
  94. }
  95. }
  96. void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
  97. struct inode *inode)
  98. {
  99. nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
  100. NFS_SERVER(inode)->rsize, 0);
  101. }
  102. void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
  103. {
  104. pgio->pg_ops = &nfs_pageio_read_ops;
  105. pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
  106. }
  107. EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
  108. static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
  109. struct inode *inode)
  110. {
  111. if (!pnfs_pageio_init_read(pgio, inode))
  112. nfs_pageio_init_read_mds(pgio, inode);
  113. }
  114. int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
  115. struct page *page)
  116. {
  117. struct nfs_page *new;
  118. unsigned int len;
  119. struct nfs_pageio_descriptor pgio;
  120. len = nfs_page_length(page);
  121. if (len == 0)
  122. return nfs_return_empty_page(page);
  123. new = nfs_create_request(ctx, inode, page, 0, len);
  124. if (IS_ERR(new)) {
  125. unlock_page(page);
  126. return PTR_ERR(new);
  127. }
  128. if (len < PAGE_CACHE_SIZE)
  129. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  130. nfs_pageio_init_read(&pgio, inode);
  131. nfs_pageio_add_request(&pgio, new);
  132. nfs_pageio_complete(&pgio);
  133. return 0;
  134. }
  135. static void nfs_readpage_release(struct nfs_page *req)
  136. {
  137. struct inode *d_inode = req->wb_context->dentry->d_inode;
  138. if (PageUptodate(req->wb_page))
  139. nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
  140. unlock_page(req->wb_page);
  141. dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
  142. req->wb_context->dentry->d_inode->i_sb->s_id,
  143. (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
  144. req->wb_bytes,
  145. (long long)req_offset(req));
  146. nfs_release_request(req);
  147. }
  148. int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
  149. const struct rpc_call_ops *call_ops)
  150. {
  151. struct inode *inode = data->inode;
  152. int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
  153. struct rpc_task *task;
  154. struct rpc_message msg = {
  155. .rpc_argp = &data->args,
  156. .rpc_resp = &data->res,
  157. .rpc_cred = data->cred,
  158. };
  159. struct rpc_task_setup task_setup_data = {
  160. .task = &data->task,
  161. .rpc_client = clnt,
  162. .rpc_message = &msg,
  163. .callback_ops = call_ops,
  164. .callback_data = data,
  165. .workqueue = nfsiod_workqueue,
  166. .flags = RPC_TASK_ASYNC | swap_flags,
  167. };
  168. /* Set up the initial task struct. */
  169. NFS_PROTO(inode)->read_setup(data, &msg);
  170. dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
  171. "offset %llu)\n",
  172. data->task.tk_pid,
  173. inode->i_sb->s_id,
  174. (long long)NFS_FILEID(inode),
  175. data->args.count,
  176. (unsigned long long)data->args.offset);
  177. task = rpc_run_task(&task_setup_data);
  178. if (IS_ERR(task))
  179. return PTR_ERR(task);
  180. rpc_put_task(task);
  181. return 0;
  182. }
  183. EXPORT_SYMBOL_GPL(nfs_initiate_read);
  184. /*
  185. * Set up the NFS read request struct
  186. */
  187. static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
  188. unsigned int count, unsigned int offset)
  189. {
  190. struct inode *inode = req->wb_context->dentry->d_inode;
  191. data->req = req;
  192. data->inode = inode;
  193. data->cred = req->wb_context->cred;
  194. data->args.fh = NFS_FH(inode);
  195. data->args.offset = req_offset(req) + offset;
  196. data->args.pgbase = req->wb_pgbase + offset;
  197. data->args.pages = data->pagevec;
  198. data->args.count = count;
  199. data->args.context = get_nfs_open_context(req->wb_context);
  200. data->args.lock_context = req->wb_lock_context;
  201. data->res.fattr = &data->fattr;
  202. data->res.count = count;
  203. data->res.eof = 0;
  204. nfs_fattr_init(&data->fattr);
  205. }
  206. static int nfs_do_read(struct nfs_read_data *data,
  207. const struct rpc_call_ops *call_ops)
  208. {
  209. struct inode *inode = data->args.context->dentry->d_inode;
  210. return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
  211. }
  212. static int
  213. nfs_do_multiple_reads(struct list_head *head,
  214. const struct rpc_call_ops *call_ops)
  215. {
  216. struct nfs_read_data *data;
  217. int ret = 0;
  218. while (!list_empty(head)) {
  219. int ret2;
  220. data = list_entry(head->next, struct nfs_read_data, list);
  221. list_del_init(&data->list);
  222. ret2 = nfs_do_read(data, call_ops);
  223. if (ret == 0)
  224. ret = ret2;
  225. }
  226. return ret;
  227. }
  228. static void
  229. nfs_async_read_error(struct list_head *head)
  230. {
  231. struct nfs_page *req;
  232. while (!list_empty(head)) {
  233. req = nfs_list_entry(head->next);
  234. nfs_list_remove_request(req);
  235. nfs_readpage_release(req);
  236. }
  237. }
  238. /*
  239. * Generate multiple requests to fill a single page.
  240. *
  241. * We optimize to reduce the number of read operations on the wire. If we
  242. * detect that we're reading a page, or an area of a page, that is past the
  243. * end of file, we do not generate NFS read operations but just clear the
  244. * parts of the page that would have come back zero from the server anyway.
  245. *
  246. * We rely on the cached value of i_size to make this determination; another
  247. * client can fill pages on the server past our cached end-of-file, but we
  248. * won't see the new data until our attribute cache is updated. This is more
  249. * or less conventional NFS client behavior.
  250. */
  251. static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
  252. {
  253. struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
  254. struct page *page = req->wb_page;
  255. struct nfs_read_data *data;
  256. size_t rsize = desc->pg_bsize, nbytes;
  257. unsigned int offset;
  258. int requests = 0;
  259. int ret = 0;
  260. nfs_list_remove_request(req);
  261. offset = 0;
  262. nbytes = desc->pg_count;
  263. do {
  264. size_t len = min(nbytes,rsize);
  265. data = nfs_readdata_alloc(1);
  266. if (!data)
  267. goto out_bad;
  268. data->pagevec[0] = page;
  269. nfs_read_rpcsetup(req, data, len, offset);
  270. list_add(&data->list, res);
  271. requests++;
  272. nbytes -= len;
  273. offset += len;
  274. } while(nbytes != 0);
  275. atomic_set(&req->wb_complete, requests);
  276. desc->pg_rpc_callops = &nfs_read_partial_ops;
  277. return ret;
  278. out_bad:
  279. while (!list_empty(res)) {
  280. data = list_entry(res->next, struct nfs_read_data, list);
  281. list_del(&data->list);
  282. nfs_readdata_release(data);
  283. }
  284. nfs_readpage_release(req);
  285. return -ENOMEM;
  286. }
  287. static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
  288. {
  289. struct nfs_page *req;
  290. struct page **pages;
  291. struct nfs_read_data *data;
  292. struct list_head *head = &desc->pg_list;
  293. int ret = 0;
  294. data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
  295. desc->pg_count));
  296. if (!data) {
  297. nfs_async_read_error(head);
  298. ret = -ENOMEM;
  299. goto out;
  300. }
  301. pages = data->pagevec;
  302. while (!list_empty(head)) {
  303. req = nfs_list_entry(head->next);
  304. nfs_list_remove_request(req);
  305. nfs_list_add_request(req, &data->pages);
  306. *pages++ = req->wb_page;
  307. }
  308. req = nfs_list_entry(data->pages.next);
  309. nfs_read_rpcsetup(req, data, desc->pg_count, 0);
  310. list_add(&data->list, res);
  311. desc->pg_rpc_callops = &nfs_read_full_ops;
  312. out:
  313. return ret;
  314. }
  315. int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
  316. {
  317. if (desc->pg_bsize < PAGE_CACHE_SIZE)
  318. return nfs_pagein_multi(desc, head);
  319. return nfs_pagein_one(desc, head);
  320. }
  321. static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  322. {
  323. LIST_HEAD(head);
  324. int ret;
  325. ret = nfs_generic_pagein(desc, &head);
  326. if (ret == 0)
  327. ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
  328. return ret;
  329. }
  330. static const struct nfs_pageio_ops nfs_pageio_read_ops = {
  331. .pg_test = nfs_generic_pg_test,
  332. .pg_doio = nfs_generic_pg_readpages,
  333. };
  334. /*
  335. * This is the callback from RPC telling us whether a reply was
  336. * received or some error occurred (timeout or socket shutdown).
  337. */
  338. int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
  339. {
  340. int status;
  341. dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
  342. task->tk_status);
  343. status = NFS_PROTO(data->inode)->read_done(task, data);
  344. if (status != 0)
  345. return status;
  346. nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
  347. if (task->tk_status == -ESTALE) {
  348. set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
  349. nfs_mark_for_revalidate(data->inode);
  350. }
  351. return 0;
  352. }
  353. static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
  354. {
  355. struct nfs_readargs *argp = &data->args;
  356. struct nfs_readres *resp = &data->res;
  357. if (resp->eof || resp->count == argp->count)
  358. return;
  359. /* This is a short read! */
  360. nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
  361. /* Has the server at least made some progress? */
  362. if (resp->count == 0)
  363. return;
  364. /* Yes, so retry the read at the end of the data */
  365. data->mds_offset += resp->count;
  366. argp->offset += resp->count;
  367. argp->pgbase += resp->count;
  368. argp->count -= resp->count;
  369. rpc_restart_call_prepare(task);
  370. }
  371. /*
  372. * Handle a read reply that fills part of a page.
  373. */
  374. static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
  375. {
  376. struct nfs_read_data *data = calldata;
  377. if (nfs_readpage_result(task, data) != 0)
  378. return;
  379. if (task->tk_status < 0)
  380. return;
  381. nfs_readpage_truncate_uninitialised_page(data);
  382. nfs_readpage_retry(task, data);
  383. }
  384. static void nfs_readpage_release_partial(void *calldata)
  385. {
  386. struct nfs_read_data *data = calldata;
  387. struct nfs_page *req = data->req;
  388. struct page *page = req->wb_page;
  389. int status = data->task.tk_status;
  390. if (status < 0)
  391. set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
  392. if (atomic_dec_and_test(&req->wb_complete)) {
  393. if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
  394. SetPageUptodate(page);
  395. nfs_readpage_release(req);
  396. }
  397. nfs_readdata_release(calldata);
  398. }
  399. void nfs_read_prepare(struct rpc_task *task, void *calldata)
  400. {
  401. struct nfs_read_data *data = calldata;
  402. NFS_PROTO(data->inode)->read_rpc_prepare(task, data);
  403. }
  404. static const struct rpc_call_ops nfs_read_partial_ops = {
  405. .rpc_call_prepare = nfs_read_prepare,
  406. .rpc_call_done = nfs_readpage_result_partial,
  407. .rpc_release = nfs_readpage_release_partial,
  408. };
  409. static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
  410. {
  411. unsigned int count = data->res.count;
  412. unsigned int base = data->args.pgbase;
  413. struct page **pages;
  414. if (data->res.eof)
  415. count = data->args.count;
  416. if (unlikely(count == 0))
  417. return;
  418. pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
  419. base &= ~PAGE_CACHE_MASK;
  420. count += base;
  421. for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
  422. SetPageUptodate(*pages);
  423. if (count == 0)
  424. return;
  425. /* Was this a short read? */
  426. if (data->res.eof || data->res.count == data->args.count)
  427. SetPageUptodate(*pages);
  428. }
  429. /*
  430. * This is the callback from RPC telling us whether a reply was
  431. * received or some error occurred (timeout or socket shutdown).
  432. */
  433. static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
  434. {
  435. struct nfs_read_data *data = calldata;
  436. if (nfs_readpage_result(task, data) != 0)
  437. return;
  438. if (task->tk_status < 0)
  439. return;
  440. /*
  441. * Note: nfs_readpage_retry may change the values of
  442. * data->args. In the multi-page case, we therefore need
  443. * to ensure that we call nfs_readpage_set_pages_uptodate()
  444. * first.
  445. */
  446. nfs_readpage_truncate_uninitialised_page(data);
  447. nfs_readpage_set_pages_uptodate(data);
  448. nfs_readpage_retry(task, data);
  449. }
  450. static void nfs_readpage_release_full(void *calldata)
  451. {
  452. struct nfs_read_data *data = calldata;
  453. while (!list_empty(&data->pages)) {
  454. struct nfs_page *req = nfs_list_entry(data->pages.next);
  455. nfs_list_remove_request(req);
  456. nfs_readpage_release(req);
  457. }
  458. nfs_readdata_release(calldata);
  459. }
  460. static const struct rpc_call_ops nfs_read_full_ops = {
  461. .rpc_call_prepare = nfs_read_prepare,
  462. .rpc_call_done = nfs_readpage_result_full,
  463. .rpc_release = nfs_readpage_release_full,
  464. };
  465. /*
  466. * Read a page over NFS.
  467. * We read the page synchronously in the following case:
  468. * - The error flag is set for this page. This happens only when a
  469. * previous async read operation failed.
  470. */
  471. int nfs_readpage(struct file *file, struct page *page)
  472. {
  473. struct nfs_open_context *ctx;
  474. struct inode *inode = page->mapping->host;
  475. int error;
  476. dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
  477. page, PAGE_CACHE_SIZE, page->index);
  478. nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
  479. nfs_add_stats(inode, NFSIOS_READPAGES, 1);
  480. /*
  481. * Try to flush any pending writes to the file..
  482. *
  483. * NOTE! Because we own the page lock, there cannot
  484. * be any new pending writes generated at this point
  485. * for this page (other pages can be written to).
  486. */
  487. error = nfs_wb_page(inode, page);
  488. if (error)
  489. goto out_unlock;
  490. if (PageUptodate(page))
  491. goto out_unlock;
  492. error = -ESTALE;
  493. if (NFS_STALE(inode))
  494. goto out_unlock;
  495. if (file == NULL) {
  496. error = -EBADF;
  497. ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  498. if (ctx == NULL)
  499. goto out_unlock;
  500. } else
  501. ctx = get_nfs_open_context(nfs_file_open_context(file));
  502. if (!IS_SYNC(inode)) {
  503. error = nfs_readpage_from_fscache(ctx, inode, page);
  504. if (error == 0)
  505. goto out;
  506. }
  507. error = nfs_readpage_async(ctx, inode, page);
  508. out:
  509. put_nfs_open_context(ctx);
  510. return error;
  511. out_unlock:
  512. unlock_page(page);
  513. return error;
  514. }
  515. struct nfs_readdesc {
  516. struct nfs_pageio_descriptor *pgio;
  517. struct nfs_open_context *ctx;
  518. };
  519. static int
  520. readpage_async_filler(void *data, struct page *page)
  521. {
  522. struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
  523. struct inode *inode = page->mapping->host;
  524. struct nfs_page *new;
  525. unsigned int len;
  526. int error;
  527. len = nfs_page_length(page);
  528. if (len == 0)
  529. return nfs_return_empty_page(page);
  530. new = nfs_create_request(desc->ctx, inode, page, 0, len);
  531. if (IS_ERR(new))
  532. goto out_error;
  533. if (len < PAGE_CACHE_SIZE)
  534. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  535. if (!nfs_pageio_add_request(desc->pgio, new)) {
  536. error = desc->pgio->pg_error;
  537. goto out_unlock;
  538. }
  539. return 0;
  540. out_error:
  541. error = PTR_ERR(new);
  542. out_unlock:
  543. unlock_page(page);
  544. return error;
  545. }
  546. int nfs_readpages(struct file *filp, struct address_space *mapping,
  547. struct list_head *pages, unsigned nr_pages)
  548. {
  549. struct nfs_pageio_descriptor pgio;
  550. struct nfs_readdesc desc = {
  551. .pgio = &pgio,
  552. };
  553. struct inode *inode = mapping->host;
  554. unsigned long npages;
  555. int ret = -ESTALE;
  556. dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
  557. inode->i_sb->s_id,
  558. (long long)NFS_FILEID(inode),
  559. nr_pages);
  560. nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
  561. if (NFS_STALE(inode))
  562. goto out;
  563. if (filp == NULL) {
  564. desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  565. if (desc.ctx == NULL)
  566. return -EBADF;
  567. } else
  568. desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
  569. /* attempt to read as many of the pages as possible from the cache
  570. * - this returns -ENOBUFS immediately if the cookie is negative
  571. */
  572. ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
  573. pages, &nr_pages);
  574. if (ret == 0)
  575. goto read_complete; /* all pages were read */
  576. nfs_pageio_init_read(&pgio, inode);
  577. ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
  578. nfs_pageio_complete(&pgio);
  579. npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  580. nfs_add_stats(inode, NFSIOS_READPAGES, npages);
  581. read_complete:
  582. put_nfs_open_context(desc.ctx);
  583. out:
  584. return ret;
  585. }
  586. int __init nfs_init_readpagecache(void)
  587. {
  588. nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
  589. sizeof(struct nfs_read_data),
  590. 0, SLAB_HWCACHE_ALIGN,
  591. NULL);
  592. if (nfs_rdata_cachep == NULL)
  593. return -ENOMEM;
  594. return 0;
  595. }
  596. void nfs_destroy_readpagecache(void)
  597. {
  598. kmem_cache_destroy(nfs_rdata_cachep);
  599. }