pnfs.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include "internal.h"
  31. #include "pnfs.h"
  32. #include "iostat.h"
  33. #define NFSDBG_FACILITY NFSDBG_PNFS
  34. /* Locking:
  35. *
  36. * pnfs_spinlock:
  37. * protects pnfs_modules_tbl.
  38. */
  39. static DEFINE_SPINLOCK(pnfs_spinlock);
  40. /*
  41. * pnfs_modules_tbl holds all pnfs modules
  42. */
  43. static LIST_HEAD(pnfs_modules_tbl);
  44. /* Return the registered pnfs layout driver module matching given id */
  45. static struct pnfs_layoutdriver_type *
  46. find_pnfs_driver_locked(u32 id)
  47. {
  48. struct pnfs_layoutdriver_type *local;
  49. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  50. if (local->id == id)
  51. goto out;
  52. local = NULL;
  53. out:
  54. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  55. return local;
  56. }
  57. static struct pnfs_layoutdriver_type *
  58. find_pnfs_driver(u32 id)
  59. {
  60. struct pnfs_layoutdriver_type *local;
  61. spin_lock(&pnfs_spinlock);
  62. local = find_pnfs_driver_locked(id);
  63. spin_unlock(&pnfs_spinlock);
  64. return local;
  65. }
  66. void
  67. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  68. {
  69. if (nfss->pnfs_curr_ld)
  70. module_put(nfss->pnfs_curr_ld->owner);
  71. nfss->pnfs_curr_ld = NULL;
  72. }
  73. /*
  74. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  75. * Currently only one pNFS layout driver per filesystem is supported.
  76. *
  77. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  78. */
  79. void
  80. set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
  81. {
  82. struct pnfs_layoutdriver_type *ld_type = NULL;
  83. if (id == 0)
  84. goto out_no_driver;
  85. if (!(server->nfs_client->cl_exchange_flags &
  86. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  87. printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
  88. id, server->nfs_client->cl_exchange_flags);
  89. goto out_no_driver;
  90. }
  91. ld_type = find_pnfs_driver(id);
  92. if (!ld_type) {
  93. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  94. ld_type = find_pnfs_driver(id);
  95. if (!ld_type) {
  96. dprintk("%s: No pNFS module found for %u.\n",
  97. __func__, id);
  98. goto out_no_driver;
  99. }
  100. }
  101. if (!try_module_get(ld_type->owner)) {
  102. dprintk("%s: Could not grab reference on module\n", __func__);
  103. goto out_no_driver;
  104. }
  105. server->pnfs_curr_ld = ld_type;
  106. dprintk("%s: pNFS module for %u set\n", __func__, id);
  107. return;
  108. out_no_driver:
  109. dprintk("%s: Using NFSv4 I/O\n", __func__);
  110. server->pnfs_curr_ld = NULL;
  111. }
  112. int
  113. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  114. {
  115. int status = -EINVAL;
  116. struct pnfs_layoutdriver_type *tmp;
  117. if (ld_type->id == 0) {
  118. printk(KERN_ERR "%s id 0 is reserved\n", __func__);
  119. return status;
  120. }
  121. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  122. printk(KERN_ERR "%s Layout driver must provide "
  123. "alloc_lseg and free_lseg.\n", __func__);
  124. return status;
  125. }
  126. spin_lock(&pnfs_spinlock);
  127. tmp = find_pnfs_driver_locked(ld_type->id);
  128. if (!tmp) {
  129. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  130. status = 0;
  131. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  132. ld_type->name);
  133. } else {
  134. printk(KERN_ERR "%s Module with id %d already loaded!\n",
  135. __func__, ld_type->id);
  136. }
  137. spin_unlock(&pnfs_spinlock);
  138. return status;
  139. }
  140. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  141. void
  142. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  143. {
  144. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  145. spin_lock(&pnfs_spinlock);
  146. list_del(&ld_type->pnfs_tblid);
  147. spin_unlock(&pnfs_spinlock);
  148. }
  149. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  150. /*
  151. * pNFS client layout cache
  152. */
  153. /* Need to hold i_lock if caller does not already hold reference */
  154. void
  155. get_layout_hdr(struct pnfs_layout_hdr *lo)
  156. {
  157. atomic_inc(&lo->plh_refcount);
  158. }
  159. static struct pnfs_layout_hdr *
  160. pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  161. {
  162. struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
  163. return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
  164. kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
  165. }
  166. static void
  167. pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
  168. {
  169. struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
  170. put_rpccred(lo->plh_lc_cred);
  171. return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
  172. }
  173. static void
  174. destroy_layout_hdr(struct pnfs_layout_hdr *lo)
  175. {
  176. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  177. BUG_ON(!list_empty(&lo->plh_layouts));
  178. NFS_I(lo->plh_inode)->layout = NULL;
  179. pnfs_free_layout_hdr(lo);
  180. }
  181. static void
  182. put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
  183. {
  184. if (atomic_dec_and_test(&lo->plh_refcount))
  185. destroy_layout_hdr(lo);
  186. }
  187. void
  188. put_layout_hdr(struct pnfs_layout_hdr *lo)
  189. {
  190. struct inode *inode = lo->plh_inode;
  191. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  192. destroy_layout_hdr(lo);
  193. spin_unlock(&inode->i_lock);
  194. }
  195. }
  196. static void
  197. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  198. {
  199. INIT_LIST_HEAD(&lseg->pls_list);
  200. INIT_LIST_HEAD(&lseg->pls_lc_list);
  201. atomic_set(&lseg->pls_refcount, 1);
  202. smp_mb();
  203. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  204. lseg->pls_layout = lo;
  205. }
  206. static void free_lseg(struct pnfs_layout_segment *lseg)
  207. {
  208. struct inode *ino = lseg->pls_layout->plh_inode;
  209. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  210. /* Matched by get_layout_hdr in pnfs_insert_layout */
  211. put_layout_hdr(NFS_I(ino)->layout);
  212. }
  213. static void
  214. put_lseg_common(struct pnfs_layout_segment *lseg)
  215. {
  216. struct inode *inode = lseg->pls_layout->plh_inode;
  217. WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  218. list_del_init(&lseg->pls_list);
  219. if (list_empty(&lseg->pls_layout->plh_segs)) {
  220. set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
  221. /* Matched by initial refcount set in alloc_init_layout_hdr */
  222. put_layout_hdr_locked(lseg->pls_layout);
  223. }
  224. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  225. }
  226. void
  227. put_lseg(struct pnfs_layout_segment *lseg)
  228. {
  229. struct inode *inode;
  230. if (!lseg)
  231. return;
  232. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  233. atomic_read(&lseg->pls_refcount),
  234. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  235. inode = lseg->pls_layout->plh_inode;
  236. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  237. LIST_HEAD(free_me);
  238. put_lseg_common(lseg);
  239. list_add(&lseg->pls_list, &free_me);
  240. spin_unlock(&inode->i_lock);
  241. pnfs_free_lseg_list(&free_me);
  242. }
  243. }
  244. EXPORT_SYMBOL_GPL(put_lseg);
  245. static inline u64
  246. end_offset(u64 start, u64 len)
  247. {
  248. u64 end;
  249. end = start + len;
  250. return end >= start ? end : NFS4_MAX_UINT64;
  251. }
  252. /* last octet in a range */
  253. static inline u64
  254. last_byte_offset(u64 start, u64 len)
  255. {
  256. u64 end;
  257. BUG_ON(!len);
  258. end = start + len;
  259. return end > start ? end - 1 : NFS4_MAX_UINT64;
  260. }
  261. /*
  262. * is l2 fully contained in l1?
  263. * start1 end1
  264. * [----------------------------------)
  265. * start2 end2
  266. * [----------------)
  267. */
  268. static inline int
  269. lo_seg_contained(struct pnfs_layout_range *l1,
  270. struct pnfs_layout_range *l2)
  271. {
  272. u64 start1 = l1->offset;
  273. u64 end1 = end_offset(start1, l1->length);
  274. u64 start2 = l2->offset;
  275. u64 end2 = end_offset(start2, l2->length);
  276. return (start1 <= start2) && (end1 >= end2);
  277. }
  278. /*
  279. * is l1 and l2 intersecting?
  280. * start1 end1
  281. * [----------------------------------)
  282. * start2 end2
  283. * [----------------)
  284. */
  285. static inline int
  286. lo_seg_intersecting(struct pnfs_layout_range *l1,
  287. struct pnfs_layout_range *l2)
  288. {
  289. u64 start1 = l1->offset;
  290. u64 end1 = end_offset(start1, l1->length);
  291. u64 start2 = l2->offset;
  292. u64 end2 = end_offset(start2, l2->length);
  293. return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
  294. (end2 == NFS4_MAX_UINT64 || end2 > start1);
  295. }
  296. static bool
  297. should_free_lseg(struct pnfs_layout_range *lseg_range,
  298. struct pnfs_layout_range *recall_range)
  299. {
  300. return (recall_range->iomode == IOMODE_ANY ||
  301. lseg_range->iomode == recall_range->iomode) &&
  302. lo_seg_intersecting(lseg_range, recall_range);
  303. }
  304. /* Returns 1 if lseg is removed from list, 0 otherwise */
  305. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  306. struct list_head *tmp_list)
  307. {
  308. int rv = 0;
  309. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  310. /* Remove the reference keeping the lseg in the
  311. * list. It will now be removed when all
  312. * outstanding io is finished.
  313. */
  314. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  315. atomic_read(&lseg->pls_refcount));
  316. if (atomic_dec_and_test(&lseg->pls_refcount)) {
  317. put_lseg_common(lseg);
  318. list_add(&lseg->pls_list, tmp_list);
  319. rv = 1;
  320. }
  321. }
  322. return rv;
  323. }
  324. /* Returns count of number of matching invalid lsegs remaining in list
  325. * after call.
  326. */
  327. int
  328. mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  329. struct list_head *tmp_list,
  330. struct pnfs_layout_range *recall_range)
  331. {
  332. struct pnfs_layout_segment *lseg, *next;
  333. int invalid = 0, removed = 0;
  334. dprintk("%s:Begin lo %p\n", __func__, lo);
  335. if (list_empty(&lo->plh_segs)) {
  336. if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
  337. put_layout_hdr_locked(lo);
  338. return 0;
  339. }
  340. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  341. if (!recall_range ||
  342. should_free_lseg(&lseg->pls_range, recall_range)) {
  343. dprintk("%s: freeing lseg %p iomode %d "
  344. "offset %llu length %llu\n", __func__,
  345. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  346. lseg->pls_range.length);
  347. invalid++;
  348. removed += mark_lseg_invalid(lseg, tmp_list);
  349. }
  350. dprintk("%s:Return %i\n", __func__, invalid - removed);
  351. return invalid - removed;
  352. }
  353. /* note free_me must contain lsegs from a single layout_hdr */
  354. void
  355. pnfs_free_lseg_list(struct list_head *free_me)
  356. {
  357. struct pnfs_layout_segment *lseg, *tmp;
  358. struct pnfs_layout_hdr *lo;
  359. if (list_empty(free_me))
  360. return;
  361. lo = list_first_entry(free_me, struct pnfs_layout_segment,
  362. pls_list)->pls_layout;
  363. if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
  364. struct nfs_client *clp;
  365. clp = NFS_SERVER(lo->plh_inode)->nfs_client;
  366. spin_lock(&clp->cl_lock);
  367. list_del_init(&lo->plh_layouts);
  368. spin_unlock(&clp->cl_lock);
  369. }
  370. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  371. list_del(&lseg->pls_list);
  372. free_lseg(lseg);
  373. }
  374. }
  375. void
  376. pnfs_destroy_layout(struct nfs_inode *nfsi)
  377. {
  378. struct pnfs_layout_hdr *lo;
  379. LIST_HEAD(tmp_list);
  380. spin_lock(&nfsi->vfs_inode.i_lock);
  381. lo = nfsi->layout;
  382. if (lo) {
  383. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  384. mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  385. }
  386. spin_unlock(&nfsi->vfs_inode.i_lock);
  387. pnfs_free_lseg_list(&tmp_list);
  388. }
  389. /*
  390. * Called by the state manger to remove all layouts established under an
  391. * expired lease.
  392. */
  393. void
  394. pnfs_destroy_all_layouts(struct nfs_client *clp)
  395. {
  396. struct pnfs_layout_hdr *lo;
  397. LIST_HEAD(tmp_list);
  398. spin_lock(&clp->cl_lock);
  399. list_splice_init(&clp->cl_layouts, &tmp_list);
  400. spin_unlock(&clp->cl_lock);
  401. while (!list_empty(&tmp_list)) {
  402. lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
  403. plh_layouts);
  404. dprintk("%s freeing layout for inode %lu\n", __func__,
  405. lo->plh_inode->i_ino);
  406. list_del_init(&lo->plh_layouts);
  407. pnfs_destroy_layout(NFS_I(lo->plh_inode));
  408. }
  409. }
  410. /* update lo->plh_stateid with new if is more recent */
  411. void
  412. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  413. bool update_barrier)
  414. {
  415. u32 oldseq, newseq;
  416. oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
  417. newseq = be32_to_cpu(new->stateid.seqid);
  418. if ((int)(newseq - oldseq) > 0) {
  419. memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
  420. if (update_barrier) {
  421. u32 new_barrier = be32_to_cpu(new->stateid.seqid);
  422. if ((int)(new_barrier - lo->plh_barrier))
  423. lo->plh_barrier = new_barrier;
  424. } else {
  425. /* Because of wraparound, we want to keep the barrier
  426. * "close" to the current seqids. It needs to be
  427. * within 2**31 to count as "behind", so if it
  428. * gets too near that limit, give us a litle leeway
  429. * and bring it to within 2**30.
  430. * NOTE - and yes, this is all unsigned arithmetic.
  431. */
  432. if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
  433. lo->plh_barrier = newseq - (1 << 30);
  434. }
  435. }
  436. }
  437. /* lget is set to 1 if called from inside send_layoutget call chain */
  438. static bool
  439. pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
  440. int lget)
  441. {
  442. if ((stateid) &&
  443. (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
  444. return true;
  445. return lo->plh_block_lgets ||
  446. test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
  447. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  448. (list_empty(&lo->plh_segs) &&
  449. (atomic_read(&lo->plh_outstanding) > lget));
  450. }
  451. int
  452. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  453. struct nfs4_state *open_state)
  454. {
  455. int status = 0;
  456. dprintk("--> %s\n", __func__);
  457. spin_lock(&lo->plh_inode->i_lock);
  458. if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
  459. status = -EAGAIN;
  460. } else if (list_empty(&lo->plh_segs)) {
  461. int seq;
  462. do {
  463. seq = read_seqbegin(&open_state->seqlock);
  464. memcpy(dst->data, open_state->stateid.data,
  465. sizeof(open_state->stateid.data));
  466. } while (read_seqretry(&open_state->seqlock, seq));
  467. } else
  468. memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
  469. spin_unlock(&lo->plh_inode->i_lock);
  470. dprintk("<-- %s\n", __func__);
  471. return status;
  472. }
  473. /*
  474. * Get layout from server.
  475. * for now, assume that whole file layouts are requested.
  476. * arg->offset: 0
  477. * arg->length: all ones
  478. */
  479. static struct pnfs_layout_segment *
  480. send_layoutget(struct pnfs_layout_hdr *lo,
  481. struct nfs_open_context *ctx,
  482. struct pnfs_layout_range *range,
  483. gfp_t gfp_flags)
  484. {
  485. struct inode *ino = lo->plh_inode;
  486. struct nfs_server *server = NFS_SERVER(ino);
  487. struct nfs4_layoutget *lgp;
  488. struct pnfs_layout_segment *lseg = NULL;
  489. struct page **pages = NULL;
  490. int i;
  491. u32 max_resp_sz, max_pages;
  492. dprintk("--> %s\n", __func__);
  493. BUG_ON(ctx == NULL);
  494. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  495. if (lgp == NULL)
  496. return NULL;
  497. /* allocate pages for xdr post processing */
  498. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  499. max_pages = max_resp_sz >> PAGE_SHIFT;
  500. pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
  501. if (!pages)
  502. goto out_err_free;
  503. for (i = 0; i < max_pages; i++) {
  504. pages[i] = alloc_page(gfp_flags);
  505. if (!pages[i])
  506. goto out_err_free;
  507. }
  508. lgp->args.minlength = PAGE_CACHE_SIZE;
  509. if (lgp->args.minlength > range->length)
  510. lgp->args.minlength = range->length;
  511. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  512. lgp->args.range = *range;
  513. lgp->args.type = server->pnfs_curr_ld->id;
  514. lgp->args.inode = ino;
  515. lgp->args.ctx = get_nfs_open_context(ctx);
  516. lgp->args.layout.pages = pages;
  517. lgp->args.layout.pglen = max_pages * PAGE_SIZE;
  518. lgp->lsegpp = &lseg;
  519. lgp->gfp_flags = gfp_flags;
  520. /* Synchronously retrieve layout information from server and
  521. * store in lseg.
  522. */
  523. nfs4_proc_layoutget(lgp);
  524. if (!lseg) {
  525. /* remember that LAYOUTGET failed and suspend trying */
  526. set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
  527. }
  528. /* free xdr pages */
  529. for (i = 0; i < max_pages; i++)
  530. __free_page(pages[i]);
  531. kfree(pages);
  532. return lseg;
  533. out_err_free:
  534. /* free any allocated xdr pages, lgp as it's not used */
  535. if (pages) {
  536. for (i = 0; i < max_pages; i++) {
  537. if (!pages[i])
  538. break;
  539. __free_page(pages[i]);
  540. }
  541. kfree(pages);
  542. }
  543. kfree(lgp);
  544. return NULL;
  545. }
  546. /* Initiates a LAYOUTRETURN(FILE) */
  547. int
  548. _pnfs_return_layout(struct inode *ino)
  549. {
  550. struct pnfs_layout_hdr *lo = NULL;
  551. struct nfs_inode *nfsi = NFS_I(ino);
  552. LIST_HEAD(tmp_list);
  553. struct nfs4_layoutreturn *lrp;
  554. nfs4_stateid stateid;
  555. int status = 0;
  556. dprintk("--> %s\n", __func__);
  557. spin_lock(&ino->i_lock);
  558. lo = nfsi->layout;
  559. if (!lo) {
  560. spin_unlock(&ino->i_lock);
  561. dprintk("%s: no layout to return\n", __func__);
  562. return status;
  563. }
  564. stateid = nfsi->layout->plh_stateid;
  565. /* Reference matched in nfs4_layoutreturn_release */
  566. get_layout_hdr(lo);
  567. mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  568. lo->plh_block_lgets++;
  569. spin_unlock(&ino->i_lock);
  570. pnfs_free_lseg_list(&tmp_list);
  571. WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
  572. lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
  573. if (unlikely(lrp == NULL)) {
  574. status = -ENOMEM;
  575. set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
  576. set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
  577. put_layout_hdr(lo);
  578. goto out;
  579. }
  580. lrp->args.stateid = stateid;
  581. lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
  582. lrp->args.inode = ino;
  583. lrp->clp = NFS_SERVER(ino)->nfs_client;
  584. status = nfs4_proc_layoutreturn(lrp);
  585. out:
  586. dprintk("<-- %s status: %d\n", __func__, status);
  587. return status;
  588. }
  589. bool pnfs_roc(struct inode *ino)
  590. {
  591. struct pnfs_layout_hdr *lo;
  592. struct pnfs_layout_segment *lseg, *tmp;
  593. LIST_HEAD(tmp_list);
  594. bool found = false;
  595. spin_lock(&ino->i_lock);
  596. lo = NFS_I(ino)->layout;
  597. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  598. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  599. goto out_nolayout;
  600. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  601. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  602. mark_lseg_invalid(lseg, &tmp_list);
  603. found = true;
  604. }
  605. if (!found)
  606. goto out_nolayout;
  607. lo->plh_block_lgets++;
  608. get_layout_hdr(lo); /* matched in pnfs_roc_release */
  609. spin_unlock(&ino->i_lock);
  610. pnfs_free_lseg_list(&tmp_list);
  611. return true;
  612. out_nolayout:
  613. spin_unlock(&ino->i_lock);
  614. return false;
  615. }
  616. void pnfs_roc_release(struct inode *ino)
  617. {
  618. struct pnfs_layout_hdr *lo;
  619. spin_lock(&ino->i_lock);
  620. lo = NFS_I(ino)->layout;
  621. lo->plh_block_lgets--;
  622. put_layout_hdr_locked(lo);
  623. spin_unlock(&ino->i_lock);
  624. }
  625. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  626. {
  627. struct pnfs_layout_hdr *lo;
  628. spin_lock(&ino->i_lock);
  629. lo = NFS_I(ino)->layout;
  630. if ((int)(barrier - lo->plh_barrier) > 0)
  631. lo->plh_barrier = barrier;
  632. spin_unlock(&ino->i_lock);
  633. }
  634. bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
  635. {
  636. struct nfs_inode *nfsi = NFS_I(ino);
  637. struct pnfs_layout_segment *lseg;
  638. bool found = false;
  639. spin_lock(&ino->i_lock);
  640. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  641. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  642. found = true;
  643. break;
  644. }
  645. if (!found) {
  646. struct pnfs_layout_hdr *lo = nfsi->layout;
  647. u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
  648. /* Since close does not return a layout stateid for use as
  649. * a barrier, we choose the worst-case barrier.
  650. */
  651. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  652. }
  653. spin_unlock(&ino->i_lock);
  654. return found;
  655. }
  656. /*
  657. * Compare two layout segments for sorting into layout cache.
  658. * We want to preferentially return RW over RO layouts, so ensure those
  659. * are seen first.
  660. */
  661. static s64
  662. cmp_layout(struct pnfs_layout_range *l1,
  663. struct pnfs_layout_range *l2)
  664. {
  665. s64 d;
  666. /* high offset > low offset */
  667. d = l1->offset - l2->offset;
  668. if (d)
  669. return d;
  670. /* short length > long length */
  671. d = l2->length - l1->length;
  672. if (d)
  673. return d;
  674. /* read > read/write */
  675. return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
  676. }
  677. static void
  678. pnfs_insert_layout(struct pnfs_layout_hdr *lo,
  679. struct pnfs_layout_segment *lseg)
  680. {
  681. struct pnfs_layout_segment *lp;
  682. dprintk("%s:Begin\n", __func__);
  683. assert_spin_locked(&lo->plh_inode->i_lock);
  684. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  685. if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
  686. continue;
  687. list_add_tail(&lseg->pls_list, &lp->pls_list);
  688. dprintk("%s: inserted lseg %p "
  689. "iomode %d offset %llu length %llu before "
  690. "lp %p iomode %d offset %llu length %llu\n",
  691. __func__, lseg, lseg->pls_range.iomode,
  692. lseg->pls_range.offset, lseg->pls_range.length,
  693. lp, lp->pls_range.iomode, lp->pls_range.offset,
  694. lp->pls_range.length);
  695. goto out;
  696. }
  697. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  698. dprintk("%s: inserted lseg %p "
  699. "iomode %d offset %llu length %llu at tail\n",
  700. __func__, lseg, lseg->pls_range.iomode,
  701. lseg->pls_range.offset, lseg->pls_range.length);
  702. out:
  703. get_layout_hdr(lo);
  704. dprintk("%s:Return\n", __func__);
  705. }
  706. static struct pnfs_layout_hdr *
  707. alloc_init_layout_hdr(struct inode *ino,
  708. struct nfs_open_context *ctx,
  709. gfp_t gfp_flags)
  710. {
  711. struct pnfs_layout_hdr *lo;
  712. lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
  713. if (!lo)
  714. return NULL;
  715. atomic_set(&lo->plh_refcount, 1);
  716. INIT_LIST_HEAD(&lo->plh_layouts);
  717. INIT_LIST_HEAD(&lo->plh_segs);
  718. INIT_LIST_HEAD(&lo->plh_bulk_recall);
  719. lo->plh_inode = ino;
  720. lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
  721. return lo;
  722. }
  723. static struct pnfs_layout_hdr *
  724. pnfs_find_alloc_layout(struct inode *ino,
  725. struct nfs_open_context *ctx,
  726. gfp_t gfp_flags)
  727. {
  728. struct nfs_inode *nfsi = NFS_I(ino);
  729. struct pnfs_layout_hdr *new = NULL;
  730. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  731. assert_spin_locked(&ino->i_lock);
  732. if (nfsi->layout) {
  733. if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
  734. return NULL;
  735. else
  736. return nfsi->layout;
  737. }
  738. spin_unlock(&ino->i_lock);
  739. new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
  740. spin_lock(&ino->i_lock);
  741. if (likely(nfsi->layout == NULL)) /* Won the race? */
  742. nfsi->layout = new;
  743. else
  744. pnfs_free_layout_hdr(new);
  745. return nfsi->layout;
  746. }
  747. /*
  748. * iomode matching rules:
  749. * iomode lseg match
  750. * ----- ----- -----
  751. * ANY READ true
  752. * ANY RW true
  753. * RW READ false
  754. * RW RW true
  755. * READ READ true
  756. * READ RW true
  757. */
  758. static int
  759. is_matching_lseg(struct pnfs_layout_range *ls_range,
  760. struct pnfs_layout_range *range)
  761. {
  762. struct pnfs_layout_range range1;
  763. if ((range->iomode == IOMODE_RW &&
  764. ls_range->iomode != IOMODE_RW) ||
  765. !lo_seg_intersecting(ls_range, range))
  766. return 0;
  767. /* range1 covers only the first byte in the range */
  768. range1 = *range;
  769. range1.length = 1;
  770. return lo_seg_contained(ls_range, &range1);
  771. }
  772. /*
  773. * lookup range in layout
  774. */
  775. static struct pnfs_layout_segment *
  776. pnfs_find_lseg(struct pnfs_layout_hdr *lo,
  777. struct pnfs_layout_range *range)
  778. {
  779. struct pnfs_layout_segment *lseg, *ret = NULL;
  780. dprintk("%s:Begin\n", __func__);
  781. assert_spin_locked(&lo->plh_inode->i_lock);
  782. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  783. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  784. is_matching_lseg(&lseg->pls_range, range)) {
  785. ret = get_lseg(lseg);
  786. break;
  787. }
  788. if (lseg->pls_range.offset > range->offset)
  789. break;
  790. }
  791. dprintk("%s:Return lseg %p ref %d\n",
  792. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  793. return ret;
  794. }
  795. /*
  796. * Layout segment is retreived from the server if not cached.
  797. * The appropriate layout segment is referenced and returned to the caller.
  798. */
  799. struct pnfs_layout_segment *
  800. pnfs_update_layout(struct inode *ino,
  801. struct nfs_open_context *ctx,
  802. loff_t pos,
  803. u64 count,
  804. enum pnfs_iomode iomode,
  805. gfp_t gfp_flags)
  806. {
  807. struct pnfs_layout_range arg = {
  808. .iomode = iomode,
  809. .offset = pos,
  810. .length = count,
  811. };
  812. unsigned pg_offset;
  813. struct nfs_inode *nfsi = NFS_I(ino);
  814. struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
  815. struct pnfs_layout_hdr *lo;
  816. struct pnfs_layout_segment *lseg = NULL;
  817. bool first = false;
  818. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  819. return NULL;
  820. spin_lock(&ino->i_lock);
  821. lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
  822. if (lo == NULL) {
  823. dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
  824. goto out_unlock;
  825. }
  826. /* Do we even need to bother with this? */
  827. if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
  828. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  829. dprintk("%s matches recall, use MDS\n", __func__);
  830. goto out_unlock;
  831. }
  832. /* if LAYOUTGET already failed once we don't try again */
  833. if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
  834. goto out_unlock;
  835. /* Check to see if the layout for the given range already exists */
  836. lseg = pnfs_find_lseg(lo, &arg);
  837. if (lseg)
  838. goto out_unlock;
  839. if (pnfs_layoutgets_blocked(lo, NULL, 0))
  840. goto out_unlock;
  841. atomic_inc(&lo->plh_outstanding);
  842. get_layout_hdr(lo);
  843. if (list_empty(&lo->plh_segs))
  844. first = true;
  845. spin_unlock(&ino->i_lock);
  846. if (first) {
  847. /* The lo must be on the clp list if there is any
  848. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  849. */
  850. spin_lock(&clp->cl_lock);
  851. BUG_ON(!list_empty(&lo->plh_layouts));
  852. list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
  853. spin_unlock(&clp->cl_lock);
  854. }
  855. pg_offset = arg.offset & ~PAGE_CACHE_MASK;
  856. if (pg_offset) {
  857. arg.offset -= pg_offset;
  858. arg.length += pg_offset;
  859. }
  860. if (arg.length != NFS4_MAX_UINT64)
  861. arg.length = PAGE_CACHE_ALIGN(arg.length);
  862. lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
  863. if (!lseg && first) {
  864. spin_lock(&clp->cl_lock);
  865. list_del_init(&lo->plh_layouts);
  866. spin_unlock(&clp->cl_lock);
  867. }
  868. atomic_dec(&lo->plh_outstanding);
  869. put_layout_hdr(lo);
  870. out:
  871. dprintk("%s end, state 0x%lx lseg %p\n", __func__,
  872. nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
  873. return lseg;
  874. out_unlock:
  875. spin_unlock(&ino->i_lock);
  876. goto out;
  877. }
  878. int
  879. pnfs_layout_process(struct nfs4_layoutget *lgp)
  880. {
  881. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  882. struct nfs4_layoutget_res *res = &lgp->res;
  883. struct pnfs_layout_segment *lseg;
  884. struct inode *ino = lo->plh_inode;
  885. struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
  886. int status = 0;
  887. /* Inject layout blob into I/O device driver */
  888. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  889. if (!lseg || IS_ERR(lseg)) {
  890. if (!lseg)
  891. status = -ENOMEM;
  892. else
  893. status = PTR_ERR(lseg);
  894. dprintk("%s: Could not allocate layout: error %d\n",
  895. __func__, status);
  896. goto out;
  897. }
  898. spin_lock(&ino->i_lock);
  899. if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
  900. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  901. dprintk("%s forget reply due to recall\n", __func__);
  902. goto out_forget_reply;
  903. }
  904. if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
  905. dprintk("%s forget reply due to state\n", __func__);
  906. goto out_forget_reply;
  907. }
  908. init_lseg(lo, lseg);
  909. lseg->pls_range = res->range;
  910. *lgp->lsegpp = get_lseg(lseg);
  911. pnfs_insert_layout(lo, lseg);
  912. if (res->return_on_close) {
  913. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  914. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  915. }
  916. /* Done processing layoutget. Set the layout stateid */
  917. pnfs_set_layout_stateid(lo, &res->stateid, false);
  918. spin_unlock(&ino->i_lock);
  919. out:
  920. return status;
  921. out_forget_reply:
  922. spin_unlock(&ino->i_lock);
  923. lseg->pls_layout = lo;
  924. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  925. goto out;
  926. }
  927. bool
  928. pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  929. struct nfs_page *req)
  930. {
  931. enum pnfs_iomode access_type;
  932. gfp_t gfp_flags;
  933. /* We assume that pg_ioflags == 0 iff we're reading a page */
  934. if (pgio->pg_ioflags == 0) {
  935. access_type = IOMODE_READ;
  936. gfp_flags = GFP_KERNEL;
  937. } else {
  938. access_type = IOMODE_RW;
  939. gfp_flags = GFP_NOFS;
  940. }
  941. if (pgio->pg_lseg == NULL) {
  942. if (pgio->pg_count != prev->wb_bytes)
  943. return true;
  944. /* This is first coelesce call for a series of nfs_pages */
  945. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  946. prev->wb_context,
  947. req_offset(prev),
  948. pgio->pg_count,
  949. access_type,
  950. gfp_flags);
  951. if (pgio->pg_lseg == NULL)
  952. return true;
  953. }
  954. /*
  955. * Test if a nfs_page is fully contained in the pnfs_layout_range.
  956. * Note that this test makes several assumptions:
  957. * - that the previous nfs_page in the struct nfs_pageio_descriptor
  958. * is known to lie within the range.
  959. * - that the nfs_page being tested is known to be contiguous with the
  960. * previous nfs_page.
  961. * - Layout ranges are page aligned, so we only have to test the
  962. * start offset of the request.
  963. *
  964. * Please also note that 'end_offset' is actually the offset of the
  965. * first byte that lies outside the pnfs_layout_range. FIXME?
  966. *
  967. */
  968. return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
  969. pgio->pg_lseg->pls_range.length);
  970. }
  971. EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
  972. /*
  973. * Called by non rpc-based layout drivers
  974. */
  975. int
  976. pnfs_ld_write_done(struct nfs_write_data *data)
  977. {
  978. int status;
  979. if (!data->pnfs_error) {
  980. pnfs_set_layoutcommit(data);
  981. data->mds_ops->rpc_call_done(&data->task, data);
  982. data->mds_ops->rpc_release(data);
  983. return 0;
  984. }
  985. if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
  986. PNFS_LAYOUTRET_ON_ERROR) {
  987. /* Don't lo_commit on error, Server will needs to
  988. * preform a file recovery.
  989. */
  990. clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(data->inode)->flags);
  991. pnfs_return_layout(data->inode);
  992. }
  993. dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
  994. data->pnfs_error);
  995. status = nfs_initiate_write(data, NFS_CLIENT(data->inode),
  996. data->mds_ops, NFS_FILE_SYNC);
  997. return status ? : -EAGAIN;
  998. }
  999. EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
  1000. enum pnfs_try_status
  1001. pnfs_try_to_write_data(struct nfs_write_data *wdata,
  1002. const struct rpc_call_ops *call_ops, int how)
  1003. {
  1004. struct inode *inode = wdata->inode;
  1005. enum pnfs_try_status trypnfs;
  1006. struct nfs_server *nfss = NFS_SERVER(inode);
  1007. wdata->mds_ops = call_ops;
  1008. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  1009. inode->i_ino, wdata->args.count, wdata->args.offset, how);
  1010. trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
  1011. if (trypnfs == PNFS_NOT_ATTEMPTED) {
  1012. put_lseg(wdata->lseg);
  1013. wdata->lseg = NULL;
  1014. } else
  1015. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  1016. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1017. return trypnfs;
  1018. }
  1019. /*
  1020. * Called by non rpc-based layout drivers
  1021. */
  1022. int
  1023. pnfs_ld_read_done(struct nfs_read_data *data)
  1024. {
  1025. int status;
  1026. if (!data->pnfs_error) {
  1027. __nfs4_read_done_cb(data);
  1028. data->mds_ops->rpc_call_done(&data->task, data);
  1029. data->mds_ops->rpc_release(data);
  1030. return 0;
  1031. }
  1032. if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
  1033. PNFS_LAYOUTRET_ON_ERROR)
  1034. pnfs_return_layout(data->inode);
  1035. dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
  1036. data->pnfs_error);
  1037. status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
  1038. data->mds_ops);
  1039. return status ? : -EAGAIN;
  1040. }
  1041. EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
  1042. /*
  1043. * Call the appropriate parallel I/O subsystem read function.
  1044. */
  1045. enum pnfs_try_status
  1046. pnfs_try_to_read_data(struct nfs_read_data *rdata,
  1047. const struct rpc_call_ops *call_ops)
  1048. {
  1049. struct inode *inode = rdata->inode;
  1050. struct nfs_server *nfss = NFS_SERVER(inode);
  1051. enum pnfs_try_status trypnfs;
  1052. rdata->mds_ops = call_ops;
  1053. dprintk("%s: Reading ino:%lu %u@%llu\n",
  1054. __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
  1055. trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
  1056. if (trypnfs == PNFS_NOT_ATTEMPTED) {
  1057. put_lseg(rdata->lseg);
  1058. rdata->lseg = NULL;
  1059. } else {
  1060. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  1061. }
  1062. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1063. return trypnfs;
  1064. }
  1065. /*
  1066. * There can be multiple RW segments.
  1067. */
  1068. static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
  1069. {
  1070. struct pnfs_layout_segment *lseg;
  1071. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
  1072. if (lseg->pls_range.iomode == IOMODE_RW &&
  1073. test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  1074. list_add(&lseg->pls_lc_list, listp);
  1075. }
  1076. }
  1077. void
  1078. pnfs_set_layoutcommit(struct nfs_write_data *wdata)
  1079. {
  1080. struct nfs_inode *nfsi = NFS_I(wdata->inode);
  1081. loff_t end_pos = wdata->mds_offset + wdata->res.count;
  1082. bool mark_as_dirty = false;
  1083. spin_lock(&nfsi->vfs_inode.i_lock);
  1084. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1085. mark_as_dirty = true;
  1086. dprintk("%s: Set layoutcommit for inode %lu ",
  1087. __func__, wdata->inode->i_ino);
  1088. }
  1089. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
  1090. /* references matched in nfs4_layoutcommit_release */
  1091. get_lseg(wdata->lseg);
  1092. }
  1093. if (end_pos > nfsi->layout->plh_lwb)
  1094. nfsi->layout->plh_lwb = end_pos;
  1095. spin_unlock(&nfsi->vfs_inode.i_lock);
  1096. dprintk("%s: lseg %p end_pos %llu\n",
  1097. __func__, wdata->lseg, nfsi->layout->plh_lwb);
  1098. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  1099. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  1100. if (mark_as_dirty)
  1101. mark_inode_dirty_sync(wdata->inode);
  1102. }
  1103. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  1104. /*
  1105. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  1106. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  1107. * data to disk to allow the server to recover the data if it crashes.
  1108. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  1109. * is off, and a COMMIT is sent to a data server, or
  1110. * if WRITEs to a data server return NFS_DATA_SYNC.
  1111. */
  1112. int
  1113. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  1114. {
  1115. struct nfs4_layoutcommit_data *data;
  1116. struct nfs_inode *nfsi = NFS_I(inode);
  1117. loff_t end_pos;
  1118. int status = 0;
  1119. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  1120. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1121. return 0;
  1122. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  1123. data = kzalloc(sizeof(*data), GFP_NOFS);
  1124. if (!data) {
  1125. mark_inode_dirty_sync(inode);
  1126. status = -ENOMEM;
  1127. goto out;
  1128. }
  1129. INIT_LIST_HEAD(&data->lseg_list);
  1130. spin_lock(&inode->i_lock);
  1131. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1132. spin_unlock(&inode->i_lock);
  1133. kfree(data);
  1134. goto out;
  1135. }
  1136. pnfs_list_write_lseg(inode, &data->lseg_list);
  1137. end_pos = nfsi->layout->plh_lwb;
  1138. nfsi->layout->plh_lwb = 0;
  1139. memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
  1140. sizeof(nfsi->layout->plh_stateid.data));
  1141. spin_unlock(&inode->i_lock);
  1142. data->args.inode = inode;
  1143. data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
  1144. nfs_fattr_init(&data->fattr);
  1145. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  1146. data->res.fattr = &data->fattr;
  1147. data->args.lastbytewritten = end_pos - 1;
  1148. data->res.server = NFS_SERVER(inode);
  1149. status = nfs4_proc_layoutcommit(data, sync);
  1150. out:
  1151. dprintk("<-- %s status %d\n", __func__, status);
  1152. return status;
  1153. }