read.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * linux/fs/nfs/read.c
  3. *
  4. * Block I/O for NFS
  5. *
  6. * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  7. * modified for async RPC by okir@monad.swb.de
  8. */
  9. #include <linux/time.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/fcntl.h>
  13. #include <linux/stat.h>
  14. #include <linux/mm.h>
  15. #include <linux/slab.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/sunrpc/clnt.h>
  18. #include <linux/nfs_fs.h>
  19. #include <linux/nfs_page.h>
  20. #include <linux/module.h>
  21. #include "nfs4_fs.h"
  22. #include "internal.h"
  23. #include "iostat.h"
  24. #include "fscache.h"
  25. #include "pnfs.h"
  26. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  27. static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
  28. static const struct nfs_rw_ops nfs_rw_read_ops;
  29. static struct kmem_cache *nfs_rdata_cachep;
  30. static struct nfs_pgio_header *nfs_readhdr_alloc(void)
  31. {
  32. return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
  33. }
  34. static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
  35. {
  36. kmem_cache_free(nfs_rdata_cachep, rhdr);
  37. }
  38. static
  39. int nfs_return_empty_page(struct page *page)
  40. {
  41. zero_user(page, 0, PAGE_SIZE);
  42. SetPageUptodate(page);
  43. unlock_page(page);
  44. return 0;
  45. }
  46. void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
  47. struct inode *inode, bool force_mds,
  48. const struct nfs_pgio_completion_ops *compl_ops)
  49. {
  50. struct nfs_server *server = NFS_SERVER(inode);
  51. const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
  52. #ifdef CONFIG_NFS_V4_1
  53. if (server->pnfs_curr_ld && !force_mds)
  54. pg_ops = server->pnfs_curr_ld->pg_read_ops;
  55. #endif
  56. nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
  57. server->rsize, 0);
  58. }
  59. EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
  60. void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
  61. {
  62. struct nfs_pgio_mirror *mirror;
  63. if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
  64. pgio->pg_ops->pg_cleanup(pgio);
  65. pgio->pg_ops = &nfs_pgio_rw_ops;
  66. /* read path should never have more than one mirror */
  67. WARN_ON_ONCE(pgio->pg_mirror_count != 1);
  68. mirror = &pgio->pg_mirrors[0];
  69. mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
  70. }
  71. EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
  72. static void nfs_readpage_release(struct nfs_page *req)
  73. {
  74. struct inode *inode = d_inode(req->wb_context->dentry);
  75. dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
  76. (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
  77. (long long)req_offset(req));
  78. if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
  79. if (PageUptodate(req->wb_page))
  80. nfs_readpage_to_fscache(inode, req->wb_page, 0);
  81. unlock_page(req->wb_page);
  82. }
  83. nfs_release_request(req);
  84. }
  85. int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
  86. struct page *page)
  87. {
  88. struct nfs_page *new;
  89. unsigned int len;
  90. struct nfs_pageio_descriptor pgio;
  91. struct nfs_pgio_mirror *pgm;
  92. len = nfs_page_length(page);
  93. if (len == 0)
  94. return nfs_return_empty_page(page);
  95. new = nfs_create_request(ctx, page, NULL, 0, len);
  96. if (IS_ERR(new)) {
  97. unlock_page(page);
  98. return PTR_ERR(new);
  99. }
  100. if (len < PAGE_SIZE)
  101. zero_user_segment(page, len, PAGE_SIZE);
  102. nfs_pageio_init_read(&pgio, inode, false,
  103. &nfs_async_read_completion_ops);
  104. if (!nfs_pageio_add_request(&pgio, new)) {
  105. nfs_list_remove_request(new);
  106. nfs_readpage_release(new);
  107. }
  108. nfs_pageio_complete(&pgio);
  109. /* It doesn't make sense to do mirrored reads! */
  110. WARN_ON_ONCE(pgio.pg_mirror_count != 1);
  111. pgm = &pgio.pg_mirrors[0];
  112. NFS_I(inode)->read_io += pgm->pg_bytes_written;
  113. return pgio.pg_error < 0 ? pgio.pg_error : 0;
  114. }
  115. static void nfs_page_group_set_uptodate(struct nfs_page *req)
  116. {
  117. if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
  118. SetPageUptodate(req->wb_page);
  119. }
  120. static void nfs_read_completion(struct nfs_pgio_header *hdr)
  121. {
  122. unsigned long bytes = 0;
  123. if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
  124. goto out;
  125. while (!list_empty(&hdr->pages)) {
  126. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  127. struct page *page = req->wb_page;
  128. unsigned long start = req->wb_pgbase;
  129. unsigned long end = req->wb_pgbase + req->wb_bytes;
  130. if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
  131. /* note: regions of the page not covered by a
  132. * request are zeroed in nfs_readpage_async /
  133. * readpage_async_filler */
  134. if (bytes > hdr->good_bytes) {
  135. /* nothing in this request was good, so zero
  136. * the full extent of the request */
  137. zero_user_segment(page, start, end);
  138. } else if (hdr->good_bytes - bytes < req->wb_bytes) {
  139. /* part of this request has good bytes, but
  140. * not all. zero the bad bytes */
  141. start += hdr->good_bytes - bytes;
  142. WARN_ON(start < req->wb_pgbase);
  143. zero_user_segment(page, start, end);
  144. }
  145. }
  146. bytes += req->wb_bytes;
  147. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
  148. if (bytes <= hdr->good_bytes)
  149. nfs_page_group_set_uptodate(req);
  150. } else
  151. nfs_page_group_set_uptodate(req);
  152. nfs_list_remove_request(req);
  153. nfs_readpage_release(req);
  154. }
  155. out:
  156. hdr->release(hdr);
  157. }
  158. static void nfs_initiate_read(struct nfs_pgio_header *hdr,
  159. struct rpc_message *msg,
  160. const struct nfs_rpc_ops *rpc_ops,
  161. struct rpc_task_setup *task_setup_data, int how)
  162. {
  163. struct inode *inode = hdr->inode;
  164. int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
  165. task_setup_data->flags |= swap_flags;
  166. rpc_ops->read_setup(hdr, msg);
  167. }
  168. static void
  169. nfs_async_read_error(struct list_head *head)
  170. {
  171. struct nfs_page *req;
  172. while (!list_empty(head)) {
  173. req = nfs_list_entry(head->next);
  174. nfs_list_remove_request(req);
  175. nfs_readpage_release(req);
  176. }
  177. }
  178. static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
  179. .error_cleanup = nfs_async_read_error,
  180. .completion = nfs_read_completion,
  181. };
  182. /*
  183. * This is the callback from RPC telling us whether a reply was
  184. * received or some error occurred (timeout or socket shutdown).
  185. */
  186. static int nfs_readpage_done(struct rpc_task *task,
  187. struct nfs_pgio_header *hdr,
  188. struct inode *inode)
  189. {
  190. int status = NFS_PROTO(inode)->read_done(task, hdr);
  191. if (status != 0)
  192. return status;
  193. nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
  194. if (task->tk_status == -ESTALE) {
  195. set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
  196. nfs_mark_for_revalidate(inode);
  197. }
  198. return 0;
  199. }
  200. static void nfs_readpage_retry(struct rpc_task *task,
  201. struct nfs_pgio_header *hdr)
  202. {
  203. struct nfs_pgio_args *argp = &hdr->args;
  204. struct nfs_pgio_res *resp = &hdr->res;
  205. /* This is a short read! */
  206. nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
  207. /* Has the server at least made some progress? */
  208. if (resp->count == 0) {
  209. nfs_set_pgio_error(hdr, -EIO, argp->offset);
  210. return;
  211. }
  212. /* For non rpc-based layout drivers, retry-through-MDS */
  213. if (!task->tk_ops) {
  214. hdr->pnfs_error = -EAGAIN;
  215. return;
  216. }
  217. /* Yes, so retry the read at the end of the hdr */
  218. hdr->mds_offset += resp->count;
  219. argp->offset += resp->count;
  220. argp->pgbase += resp->count;
  221. argp->count -= resp->count;
  222. rpc_restart_call_prepare(task);
  223. }
  224. static void nfs_readpage_result(struct rpc_task *task,
  225. struct nfs_pgio_header *hdr)
  226. {
  227. if (hdr->res.eof) {
  228. loff_t bound;
  229. bound = hdr->args.offset + hdr->res.count;
  230. spin_lock(&hdr->lock);
  231. if (bound < hdr->io_start + hdr->good_bytes) {
  232. set_bit(NFS_IOHDR_EOF, &hdr->flags);
  233. clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
  234. hdr->good_bytes = bound - hdr->io_start;
  235. }
  236. spin_unlock(&hdr->lock);
  237. } else if (hdr->res.count < hdr->args.count)
  238. nfs_readpage_retry(task, hdr);
  239. }
  240. /*
  241. * Read a page over NFS.
  242. * We read the page synchronously in the following case:
  243. * - The error flag is set for this page. This happens only when a
  244. * previous async read operation failed.
  245. */
  246. int nfs_readpage(struct file *file, struct page *page)
  247. {
  248. struct nfs_open_context *ctx;
  249. struct inode *inode = page_file_mapping(page)->host;
  250. int error;
  251. dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
  252. page, PAGE_SIZE, page_index(page));
  253. nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
  254. nfs_add_stats(inode, NFSIOS_READPAGES, 1);
  255. /*
  256. * Try to flush any pending writes to the file..
  257. *
  258. * NOTE! Because we own the page lock, there cannot
  259. * be any new pending writes generated at this point
  260. * for this page (other pages can be written to).
  261. */
  262. error = nfs_wb_page(inode, page);
  263. if (error)
  264. goto out_unlock;
  265. if (PageUptodate(page))
  266. goto out_unlock;
  267. error = -ESTALE;
  268. if (NFS_STALE(inode))
  269. goto out_unlock;
  270. if (file == NULL) {
  271. error = -EBADF;
  272. ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  273. if (ctx == NULL)
  274. goto out_unlock;
  275. } else
  276. ctx = get_nfs_open_context(nfs_file_open_context(file));
  277. if (!IS_SYNC(inode)) {
  278. error = nfs_readpage_from_fscache(ctx, inode, page);
  279. if (error == 0)
  280. goto out;
  281. }
  282. error = nfs_readpage_async(ctx, inode, page);
  283. out:
  284. put_nfs_open_context(ctx);
  285. return error;
  286. out_unlock:
  287. unlock_page(page);
  288. return error;
  289. }
  290. struct nfs_readdesc {
  291. struct nfs_pageio_descriptor *pgio;
  292. struct nfs_open_context *ctx;
  293. };
  294. static int
  295. readpage_async_filler(void *data, struct page *page)
  296. {
  297. struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
  298. struct nfs_page *new;
  299. unsigned int len;
  300. int error;
  301. len = nfs_page_length(page);
  302. if (len == 0)
  303. return nfs_return_empty_page(page);
  304. new = nfs_create_request(desc->ctx, page, NULL, 0, len);
  305. if (IS_ERR(new))
  306. goto out_error;
  307. if (len < PAGE_SIZE)
  308. zero_user_segment(page, len, PAGE_SIZE);
  309. if (!nfs_pageio_add_request(desc->pgio, new)) {
  310. nfs_list_remove_request(new);
  311. nfs_readpage_release(new);
  312. error = desc->pgio->pg_error;
  313. goto out;
  314. }
  315. return 0;
  316. out_error:
  317. error = PTR_ERR(new);
  318. unlock_page(page);
  319. out:
  320. return error;
  321. }
  322. int nfs_readpages(struct file *filp, struct address_space *mapping,
  323. struct list_head *pages, unsigned nr_pages)
  324. {
  325. struct nfs_pageio_descriptor pgio;
  326. struct nfs_pgio_mirror *pgm;
  327. struct nfs_readdesc desc = {
  328. .pgio = &pgio,
  329. };
  330. struct inode *inode = mapping->host;
  331. unsigned long npages;
  332. int ret = -ESTALE;
  333. dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
  334. inode->i_sb->s_id,
  335. (unsigned long long)NFS_FILEID(inode),
  336. nr_pages);
  337. nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
  338. if (NFS_STALE(inode))
  339. goto out;
  340. if (filp == NULL) {
  341. desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  342. if (desc.ctx == NULL)
  343. return -EBADF;
  344. } else
  345. desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
  346. /* attempt to read as many of the pages as possible from the cache
  347. * - this returns -ENOBUFS immediately if the cookie is negative
  348. */
  349. ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
  350. pages, &nr_pages);
  351. if (ret == 0)
  352. goto read_complete; /* all pages were read */
  353. nfs_pageio_init_read(&pgio, inode, false,
  354. &nfs_async_read_completion_ops);
  355. ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
  356. nfs_pageio_complete(&pgio);
  357. /* It doesn't make sense to do mirrored reads! */
  358. WARN_ON_ONCE(pgio.pg_mirror_count != 1);
  359. pgm = &pgio.pg_mirrors[0];
  360. NFS_I(inode)->read_io += pgm->pg_bytes_written;
  361. npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
  362. PAGE_SHIFT;
  363. nfs_add_stats(inode, NFSIOS_READPAGES, npages);
  364. read_complete:
  365. put_nfs_open_context(desc.ctx);
  366. out:
  367. return ret;
  368. }
  369. int __init nfs_init_readpagecache(void)
  370. {
  371. nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
  372. sizeof(struct nfs_pgio_header),
  373. 0, SLAB_HWCACHE_ALIGN,
  374. NULL);
  375. if (nfs_rdata_cachep == NULL)
  376. return -ENOMEM;
  377. return 0;
  378. }
  379. void nfs_destroy_readpagecache(void)
  380. {
  381. kmem_cache_destroy(nfs_rdata_cachep);
  382. }
  383. static const struct nfs_rw_ops nfs_rw_read_ops = {
  384. .rw_mode = FMODE_READ,
  385. .rw_alloc_header = nfs_readhdr_alloc,
  386. .rw_free_header = nfs_readhdr_free,
  387. .rw_done = nfs_readpage_done,
  388. .rw_result = nfs_readpage_result,
  389. .rw_initiate = nfs_initiate_read,
  390. };