file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/slab.h>
  5. #include <linux/file.h>
  6. #include <linux/namei.h>
  7. #include <linux/writeback.h>
  8. #include "super.h"
  9. #include "mds_client.h"
  10. /*
  11. * Ceph file operations
  12. *
  13. * Implement basic open/close functionality, and implement
  14. * read/write.
  15. *
  16. * We implement three modes of file I/O:
  17. * - buffered uses the generic_file_aio_{read,write} helpers
  18. *
  19. * - synchronous is used when there is multi-client read/write
  20. * sharing, avoids the page cache, and synchronously waits for an
  21. * ack from the OSD.
  22. *
  23. * - direct io takes the variant of the sync path that references
  24. * user pages directly.
  25. *
  26. * fsync() flushes and waits on dirty pages, but just queues metadata
  27. * for writeback: since the MDS can recover size and mtime there is no
  28. * need to wait for MDS acknowledgement.
  29. */
  30. /*
  31. * Prepare an open request. Preallocate ceph_cap to avoid an
  32. * inopportune ENOMEM later.
  33. */
  34. static struct ceph_mds_request *
  35. prepare_open_request(struct super_block *sb, int flags, int create_mode)
  36. {
  37. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  38. struct ceph_mds_client *mdsc = fsc->mdsc;
  39. struct ceph_mds_request *req;
  40. int want_auth = USE_ANY_MDS;
  41. int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
  42. if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
  43. want_auth = USE_AUTH_MDS;
  44. req = ceph_mdsc_create_request(mdsc, op, want_auth);
  45. if (IS_ERR(req))
  46. goto out;
  47. req->r_fmode = ceph_flags_to_mode(flags);
  48. req->r_args.open.flags = cpu_to_le32(flags);
  49. req->r_args.open.mode = cpu_to_le32(create_mode);
  50. req->r_args.open.preferred = cpu_to_le32(-1);
  51. out:
  52. return req;
  53. }
  54. /*
  55. * initialize private struct file data.
  56. * if we fail, clean up by dropping fmode reference on the ceph_inode
  57. */
  58. static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
  59. {
  60. struct ceph_file_info *cf;
  61. int ret = 0;
  62. switch (inode->i_mode & S_IFMT) {
  63. case S_IFREG:
  64. case S_IFDIR:
  65. dout("init_file %p %p 0%o (regular)\n", inode, file,
  66. inode->i_mode);
  67. cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
  68. if (cf == NULL) {
  69. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  70. return -ENOMEM;
  71. }
  72. cf->fmode = fmode;
  73. cf->next_offset = 2;
  74. file->private_data = cf;
  75. BUG_ON(inode->i_fop->release != ceph_release);
  76. break;
  77. case S_IFLNK:
  78. dout("init_file %p %p 0%o (symlink)\n", inode, file,
  79. inode->i_mode);
  80. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  81. break;
  82. default:
  83. dout("init_file %p %p 0%o (special)\n", inode, file,
  84. inode->i_mode);
  85. /*
  86. * we need to drop the open ref now, since we don't
  87. * have .release set to ceph_release.
  88. */
  89. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  90. BUG_ON(inode->i_fop->release == ceph_release);
  91. /* call the proper open fop */
  92. ret = inode->i_fop->open(inode, file);
  93. }
  94. return ret;
  95. }
  96. /*
  97. * If the filp already has private_data, that means the file was
  98. * already opened by intent during lookup, and we do nothing.
  99. *
  100. * If we already have the requisite capabilities, we can satisfy
  101. * the open request locally (no need to request new caps from the
  102. * MDS). We do, however, need to inform the MDS (asynchronously)
  103. * if our wanted caps set expands.
  104. */
  105. int ceph_open(struct inode *inode, struct file *file)
  106. {
  107. struct ceph_inode_info *ci = ceph_inode(inode);
  108. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  109. struct ceph_mds_client *mdsc = fsc->mdsc;
  110. struct ceph_mds_request *req;
  111. struct ceph_file_info *cf = file->private_data;
  112. struct inode *parent_inode = NULL;
  113. int err;
  114. int flags, fmode, wanted;
  115. if (cf) {
  116. dout("open file %p is already opened\n", file);
  117. return 0;
  118. }
  119. /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
  120. flags = file->f_flags & ~(O_CREAT|O_EXCL);
  121. if (S_ISDIR(inode->i_mode))
  122. flags = O_DIRECTORY; /* mds likes to know */
  123. dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
  124. ceph_vinop(inode), file, flags, file->f_flags);
  125. fmode = ceph_flags_to_mode(flags);
  126. wanted = ceph_caps_for_mode(fmode);
  127. /* snapped files are read-only */
  128. if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
  129. return -EROFS;
  130. /* trivially open snapdir */
  131. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  132. spin_lock(&ci->i_ceph_lock);
  133. __ceph_get_fmode(ci, fmode);
  134. spin_unlock(&ci->i_ceph_lock);
  135. return ceph_init_file(inode, file, fmode);
  136. }
  137. /*
  138. * No need to block if we have caps on the auth MDS (for
  139. * write) or any MDS (for read). Update wanted set
  140. * asynchronously.
  141. */
  142. spin_lock(&ci->i_ceph_lock);
  143. if (__ceph_is_any_real_caps(ci) &&
  144. (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
  145. int mds_wanted = __ceph_caps_mds_wanted(ci);
  146. int issued = __ceph_caps_issued(ci, NULL);
  147. dout("open %p fmode %d want %s issued %s using existing\n",
  148. inode, fmode, ceph_cap_string(wanted),
  149. ceph_cap_string(issued));
  150. __ceph_get_fmode(ci, fmode);
  151. spin_unlock(&ci->i_ceph_lock);
  152. /* adjust wanted? */
  153. if ((issued & wanted) != wanted &&
  154. (mds_wanted & wanted) != wanted &&
  155. ceph_snap(inode) != CEPH_SNAPDIR)
  156. ceph_check_caps(ci, 0, NULL);
  157. return ceph_init_file(inode, file, fmode);
  158. } else if (ceph_snap(inode) != CEPH_NOSNAP &&
  159. (ci->i_snap_caps & wanted) == wanted) {
  160. __ceph_get_fmode(ci, fmode);
  161. spin_unlock(&ci->i_ceph_lock);
  162. return ceph_init_file(inode, file, fmode);
  163. }
  164. spin_unlock(&ci->i_ceph_lock);
  165. dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
  166. req = prepare_open_request(inode->i_sb, flags, 0);
  167. if (IS_ERR(req)) {
  168. err = PTR_ERR(req);
  169. goto out;
  170. }
  171. req->r_inode = inode;
  172. ihold(inode);
  173. req->r_num_caps = 1;
  174. if (flags & (O_CREAT|O_TRUNC))
  175. parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
  176. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  177. iput(parent_inode);
  178. if (!err)
  179. err = ceph_init_file(inode, file, req->r_fmode);
  180. ceph_mdsc_put_request(req);
  181. dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
  182. out:
  183. return err;
  184. }
  185. /*
  186. * Do a lookup + open with a single request.
  187. *
  188. * If this succeeds, but some subsequent check in the vfs
  189. * may_open() fails, the struct *file gets cleaned up (i.e.
  190. * ceph_release gets called). So fear not!
  191. */
  192. /*
  193. * flags
  194. * path_lookup_open -> LOOKUP_OPEN
  195. * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
  196. */
  197. struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
  198. struct nameidata *nd, int mode,
  199. int locked_dir)
  200. {
  201. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  202. struct ceph_mds_client *mdsc = fsc->mdsc;
  203. struct file *file;
  204. struct ceph_mds_request *req;
  205. struct dentry *ret;
  206. int err;
  207. int flags = nd->intent.open.flags;
  208. dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
  209. dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
  210. /* do the open */
  211. req = prepare_open_request(dir->i_sb, flags, mode);
  212. if (IS_ERR(req))
  213. return ERR_CAST(req);
  214. req->r_dentry = dget(dentry);
  215. req->r_num_caps = 2;
  216. if (flags & O_CREAT) {
  217. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  218. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  219. }
  220. req->r_locked_dir = dir; /* caller holds dir->i_mutex */
  221. err = ceph_mdsc_do_request(mdsc,
  222. (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
  223. req);
  224. err = ceph_handle_snapdir(req, dentry, err);
  225. if (err)
  226. goto out;
  227. if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
  228. err = ceph_handle_notrace_create(dir, dentry);
  229. if (err)
  230. goto out;
  231. file = lookup_instantiate_filp(nd, req->r_dentry, ceph_open);
  232. if (IS_ERR(file))
  233. err = PTR_ERR(file);
  234. out:
  235. ret = ceph_finish_lookup(req, dentry, err);
  236. ceph_mdsc_put_request(req);
  237. dout("ceph_lookup_open result=%p\n", ret);
  238. return ret;
  239. }
  240. int ceph_release(struct inode *inode, struct file *file)
  241. {
  242. struct ceph_inode_info *ci = ceph_inode(inode);
  243. struct ceph_file_info *cf = file->private_data;
  244. dout("release inode %p file %p\n", inode, file);
  245. ceph_put_fmode(ci, cf->fmode);
  246. if (cf->last_readdir)
  247. ceph_mdsc_put_request(cf->last_readdir);
  248. kfree(cf->last_name);
  249. kfree(cf->dir_info);
  250. dput(cf->dentry);
  251. kmem_cache_free(ceph_file_cachep, cf);
  252. /* wake up anyone waiting for caps on this inode */
  253. wake_up_all(&ci->i_cap_wq);
  254. return 0;
  255. }
  256. /*
  257. * Read a range of bytes striped over one or more objects. Iterate over
  258. * objects we stripe over. (That's not atomic, but good enough for now.)
  259. *
  260. * If we get a short result from the OSD, check against i_size; we need to
  261. * only return a short read to the caller if we hit EOF.
  262. */
  263. static int striped_read(struct inode *inode,
  264. u64 off, u64 len,
  265. struct page **pages, int num_pages,
  266. int *checkeof, bool o_direct,
  267. unsigned long buf_align)
  268. {
  269. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  270. struct ceph_inode_info *ci = ceph_inode(inode);
  271. u64 pos, this_len;
  272. int io_align, page_align;
  273. int left, pages_left;
  274. int read;
  275. struct page **page_pos;
  276. int ret;
  277. bool hit_stripe, was_short;
  278. /*
  279. * we may need to do multiple reads. not atomic, unfortunately.
  280. */
  281. pos = off;
  282. left = len;
  283. page_pos = pages;
  284. pages_left = num_pages;
  285. read = 0;
  286. io_align = off & ~PAGE_MASK;
  287. more:
  288. if (o_direct)
  289. page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
  290. else
  291. page_align = pos & ~PAGE_MASK;
  292. this_len = left;
  293. ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
  294. &ci->i_layout, pos, &this_len,
  295. ci->i_truncate_seq,
  296. ci->i_truncate_size,
  297. page_pos, pages_left, page_align);
  298. if (ret == -ENOENT)
  299. ret = 0;
  300. hit_stripe = this_len < left;
  301. was_short = ret >= 0 && ret < this_len;
  302. dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
  303. ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
  304. if (ret > 0) {
  305. int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
  306. if (read < pos - off) {
  307. dout(" zero gap %llu to %llu\n", off + read, pos);
  308. ceph_zero_page_vector_range(page_align + read,
  309. pos - off - read, pages);
  310. }
  311. pos += ret;
  312. read = pos - off;
  313. left -= ret;
  314. page_pos += didpages;
  315. pages_left -= didpages;
  316. /* hit stripe? */
  317. if (left && hit_stripe)
  318. goto more;
  319. }
  320. if (was_short) {
  321. /* did we bounce off eof? */
  322. if (pos + left > inode->i_size)
  323. *checkeof = 1;
  324. /* zero trailing bytes (inside i_size) */
  325. if (left > 0 && pos < inode->i_size) {
  326. if (pos + left > inode->i_size)
  327. left = inode->i_size - pos;
  328. dout("zero tail %d\n", left);
  329. ceph_zero_page_vector_range(page_align + read, left,
  330. pages);
  331. read += left;
  332. }
  333. }
  334. if (ret >= 0)
  335. ret = read;
  336. dout("striped_read returns %d\n", ret);
  337. return ret;
  338. }
  339. /*
  340. * Completely synchronous read and write methods. Direct from __user
  341. * buffer to osd, or directly to user pages (if O_DIRECT).
  342. *
  343. * If the read spans object boundary, just do multiple reads.
  344. */
  345. static ssize_t ceph_sync_read(struct file *file, char __user *data,
  346. unsigned len, loff_t *poff, int *checkeof)
  347. {
  348. struct inode *inode = file->f_dentry->d_inode;
  349. struct page **pages;
  350. u64 off = *poff;
  351. int num_pages, ret;
  352. dout("sync_read on file %p %llu~%u %s\n", file, off, len,
  353. (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
  354. if (file->f_flags & O_DIRECT) {
  355. num_pages = calc_pages_for((unsigned long)data, len);
  356. pages = ceph_get_direct_page_vector(data, num_pages, true);
  357. } else {
  358. num_pages = calc_pages_for(off, len);
  359. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  360. }
  361. if (IS_ERR(pages))
  362. return PTR_ERR(pages);
  363. /*
  364. * flush any page cache pages in this range. this
  365. * will make concurrent normal and sync io slow,
  366. * but it will at least behave sensibly when they are
  367. * in sequence.
  368. */
  369. ret = filemap_write_and_wait(inode->i_mapping);
  370. if (ret < 0)
  371. goto done;
  372. ret = striped_read(inode, off, len, pages, num_pages, checkeof,
  373. file->f_flags & O_DIRECT,
  374. (unsigned long)data & ~PAGE_MASK);
  375. if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
  376. ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
  377. if (ret >= 0)
  378. *poff = off + ret;
  379. done:
  380. if (file->f_flags & O_DIRECT)
  381. ceph_put_page_vector(pages, num_pages, true);
  382. else
  383. ceph_release_page_vector(pages, num_pages);
  384. dout("sync_read result %d\n", ret);
  385. return ret;
  386. }
  387. /*
  388. * Write commit callback, called if we requested both an ACK and
  389. * ONDISK commit reply from the OSD.
  390. */
  391. static void sync_write_commit(struct ceph_osd_request *req,
  392. struct ceph_msg *msg)
  393. {
  394. struct ceph_inode_info *ci = ceph_inode(req->r_inode);
  395. dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
  396. spin_lock(&ci->i_unsafe_lock);
  397. list_del_init(&req->r_unsafe_item);
  398. spin_unlock(&ci->i_unsafe_lock);
  399. ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
  400. }
  401. /*
  402. * Synchronous write, straight from __user pointer or user pages (if
  403. * O_DIRECT).
  404. *
  405. * If write spans object boundary, just do multiple writes. (For a
  406. * correct atomic write, we should e.g. take write locks on all
  407. * objects, rollback on failure, etc.)
  408. */
  409. static ssize_t ceph_sync_write(struct file *file, const char __user *data,
  410. size_t left, loff_t *offset)
  411. {
  412. struct inode *inode = file->f_dentry->d_inode;
  413. struct ceph_inode_info *ci = ceph_inode(inode);
  414. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  415. struct ceph_osd_request *req;
  416. struct page **pages;
  417. int num_pages;
  418. long long unsigned pos;
  419. u64 len;
  420. int written = 0;
  421. int flags;
  422. int do_sync = 0;
  423. int check_caps = 0;
  424. int page_align, io_align;
  425. unsigned long buf_align;
  426. int ret;
  427. struct timespec mtime = CURRENT_TIME;
  428. if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
  429. return -EROFS;
  430. dout("sync_write on file %p %lld~%u %s\n", file, *offset,
  431. (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
  432. if (file->f_flags & O_APPEND)
  433. pos = i_size_read(inode);
  434. else
  435. pos = *offset;
  436. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
  437. if (ret < 0)
  438. return ret;
  439. ret = invalidate_inode_pages2_range(inode->i_mapping,
  440. pos >> PAGE_CACHE_SHIFT,
  441. (pos + left) >> PAGE_CACHE_SHIFT);
  442. if (ret < 0)
  443. dout("invalidate_inode_pages2_range returned %d\n", ret);
  444. flags = CEPH_OSD_FLAG_ORDERSNAP |
  445. CEPH_OSD_FLAG_ONDISK |
  446. CEPH_OSD_FLAG_WRITE;
  447. if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
  448. flags |= CEPH_OSD_FLAG_ACK;
  449. else
  450. do_sync = 1;
  451. /*
  452. * we may need to do multiple writes here if we span an object
  453. * boundary. this isn't atomic, unfortunately. :(
  454. */
  455. more:
  456. io_align = pos & ~PAGE_MASK;
  457. buf_align = (unsigned long)data & ~PAGE_MASK;
  458. len = left;
  459. if (file->f_flags & O_DIRECT) {
  460. /* write from beginning of first page, regardless of
  461. io alignment */
  462. page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
  463. num_pages = calc_pages_for((unsigned long)data, len);
  464. } else {
  465. page_align = pos & ~PAGE_MASK;
  466. num_pages = calc_pages_for(pos, len);
  467. }
  468. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  469. ceph_vino(inode), pos, &len,
  470. CEPH_OSD_OP_WRITE, flags,
  471. ci->i_snap_realm->cached_context,
  472. do_sync,
  473. ci->i_truncate_seq, ci->i_truncate_size,
  474. &mtime, false, 2, page_align);
  475. if (IS_ERR(req))
  476. return PTR_ERR(req);
  477. if (file->f_flags & O_DIRECT) {
  478. pages = ceph_get_direct_page_vector(data, num_pages, false);
  479. if (IS_ERR(pages)) {
  480. ret = PTR_ERR(pages);
  481. goto out;
  482. }
  483. /*
  484. * throw out any page cache pages in this range. this
  485. * may block.
  486. */
  487. truncate_inode_pages_range(inode->i_mapping, pos,
  488. (pos+len) | (PAGE_CACHE_SIZE-1));
  489. } else {
  490. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  491. if (IS_ERR(pages)) {
  492. ret = PTR_ERR(pages);
  493. goto out;
  494. }
  495. ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
  496. if (ret < 0) {
  497. ceph_release_page_vector(pages, num_pages);
  498. goto out;
  499. }
  500. if ((file->f_flags & O_SYNC) == 0) {
  501. /* get a second commit callback */
  502. req->r_safe_callback = sync_write_commit;
  503. req->r_own_pages = 1;
  504. }
  505. }
  506. req->r_pages = pages;
  507. req->r_num_pages = num_pages;
  508. req->r_inode = inode;
  509. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  510. if (!ret) {
  511. if (req->r_safe_callback) {
  512. /*
  513. * Add to inode unsafe list only after we
  514. * start_request so that a tid has been assigned.
  515. */
  516. spin_lock(&ci->i_unsafe_lock);
  517. list_add_tail(&req->r_unsafe_item,
  518. &ci->i_unsafe_writes);
  519. spin_unlock(&ci->i_unsafe_lock);
  520. ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
  521. }
  522. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  523. if (ret < 0 && req->r_safe_callback) {
  524. spin_lock(&ci->i_unsafe_lock);
  525. list_del_init(&req->r_unsafe_item);
  526. spin_unlock(&ci->i_unsafe_lock);
  527. ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
  528. }
  529. }
  530. if (file->f_flags & O_DIRECT)
  531. ceph_put_page_vector(pages, num_pages, false);
  532. else if (file->f_flags & O_SYNC)
  533. ceph_release_page_vector(pages, num_pages);
  534. out:
  535. ceph_osdc_put_request(req);
  536. if (ret == 0) {
  537. pos += len;
  538. written += len;
  539. left -= len;
  540. data += written;
  541. if (left)
  542. goto more;
  543. ret = written;
  544. *offset = pos;
  545. if (pos > i_size_read(inode))
  546. check_caps = ceph_inode_set_size(inode, pos);
  547. if (check_caps)
  548. ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
  549. NULL);
  550. }
  551. return ret;
  552. }
  553. /*
  554. * Wrap generic_file_aio_read with checks for cap bits on the inode.
  555. * Atomically grab references, so that those bits are not released
  556. * back to the MDS mid-read.
  557. *
  558. * Hmm, the sync read case isn't actually async... should it be?
  559. */
  560. static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
  561. unsigned long nr_segs, loff_t pos)
  562. {
  563. struct file *filp = iocb->ki_filp;
  564. struct ceph_file_info *fi = filp->private_data;
  565. loff_t *ppos = &iocb->ki_pos;
  566. size_t len = iov->iov_len;
  567. struct inode *inode = filp->f_dentry->d_inode;
  568. struct ceph_inode_info *ci = ceph_inode(inode);
  569. void __user *base = iov->iov_base;
  570. ssize_t ret;
  571. int want, got = 0;
  572. int checkeof = 0, read = 0;
  573. dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
  574. inode, ceph_vinop(inode), pos, (unsigned)len, inode);
  575. again:
  576. __ceph_do_pending_vmtruncate(inode);
  577. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  578. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  579. else
  580. want = CEPH_CAP_FILE_CACHE;
  581. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
  582. if (ret < 0)
  583. goto out;
  584. dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  585. inode, ceph_vinop(inode), pos, (unsigned)len,
  586. ceph_cap_string(got));
  587. if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  588. (iocb->ki_filp->f_flags & O_DIRECT) ||
  589. (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
  590. (fi->flags & CEPH_F_SYNC))
  591. /* hmm, this isn't really async... */
  592. ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
  593. else
  594. ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
  595. out:
  596. dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
  597. inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
  598. ceph_put_cap_refs(ci, got);
  599. if (checkeof && ret >= 0) {
  600. int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
  601. /* hit EOF or hole? */
  602. if (statret == 0 && *ppos < inode->i_size) {
  603. dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
  604. read += ret;
  605. base += ret;
  606. len -= ret;
  607. checkeof = 0;
  608. goto again;
  609. }
  610. }
  611. if (ret >= 0)
  612. ret += read;
  613. return ret;
  614. }
  615. /*
  616. * Take cap references to avoid releasing caps to MDS mid-write.
  617. *
  618. * If we are synchronous, and write with an old snap context, the OSD
  619. * may return EOLDSNAPC. In that case, retry the write.. _after_
  620. * dropping our cap refs and allowing the pending snap to logically
  621. * complete _before_ this write occurs.
  622. *
  623. * If we are near ENOSPC, write synchronously.
  624. */
  625. static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
  626. unsigned long nr_segs, loff_t pos)
  627. {
  628. struct file *file = iocb->ki_filp;
  629. struct ceph_file_info *fi = file->private_data;
  630. struct inode *inode = file->f_dentry->d_inode;
  631. struct ceph_inode_info *ci = ceph_inode(inode);
  632. struct ceph_osd_client *osdc =
  633. &ceph_sb_to_client(inode->i_sb)->client->osdc;
  634. loff_t endoff = pos + iov->iov_len;
  635. int want, got = 0;
  636. int ret, err;
  637. if (ceph_snap(inode) != CEPH_NOSNAP)
  638. return -EROFS;
  639. retry_snap:
  640. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
  641. return -ENOSPC;
  642. __ceph_do_pending_vmtruncate(inode);
  643. dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
  644. inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
  645. inode->i_size);
  646. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  647. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  648. else
  649. want = CEPH_CAP_FILE_BUFFER;
  650. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
  651. if (ret < 0)
  652. goto out_put;
  653. dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
  654. inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
  655. ceph_cap_string(got));
  656. if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  657. (iocb->ki_filp->f_flags & O_DIRECT) ||
  658. (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
  659. (fi->flags & CEPH_F_SYNC)) {
  660. ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
  661. &iocb->ki_pos);
  662. } else {
  663. /*
  664. * buffered write; drop Fw early to avoid slow
  665. * revocation if we get stuck on balance_dirty_pages
  666. */
  667. int dirty;
  668. spin_lock(&ci->i_ceph_lock);
  669. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  670. spin_unlock(&ci->i_ceph_lock);
  671. ceph_put_cap_refs(ci, got);
  672. ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
  673. if ((ret >= 0 || ret == -EIOCBQUEUED) &&
  674. ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
  675. || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
  676. err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
  677. if (err < 0)
  678. ret = err;
  679. }
  680. if (dirty)
  681. __mark_inode_dirty(inode, dirty);
  682. goto out;
  683. }
  684. if (ret >= 0) {
  685. int dirty;
  686. spin_lock(&ci->i_ceph_lock);
  687. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  688. spin_unlock(&ci->i_ceph_lock);
  689. if (dirty)
  690. __mark_inode_dirty(inode, dirty);
  691. }
  692. out_put:
  693. dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
  694. inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
  695. ceph_cap_string(got));
  696. ceph_put_cap_refs(ci, got);
  697. out:
  698. if (ret == -EOLDSNAPC) {
  699. dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
  700. inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
  701. goto retry_snap;
  702. }
  703. return ret;
  704. }
  705. /*
  706. * llseek. be sure to verify file size on SEEK_END.
  707. */
  708. static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
  709. {
  710. struct inode *inode = file->f_mapping->host;
  711. int ret;
  712. mutex_lock(&inode->i_mutex);
  713. __ceph_do_pending_vmtruncate(inode);
  714. if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
  715. ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
  716. if (ret < 0) {
  717. offset = ret;
  718. goto out;
  719. }
  720. }
  721. switch (origin) {
  722. case SEEK_END:
  723. offset += inode->i_size;
  724. break;
  725. case SEEK_CUR:
  726. /*
  727. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  728. * position-querying operation. Avoid rewriting the "same"
  729. * f_pos value back to the file because a concurrent read(),
  730. * write() or lseek() might have altered it
  731. */
  732. if (offset == 0) {
  733. offset = file->f_pos;
  734. goto out;
  735. }
  736. offset += file->f_pos;
  737. break;
  738. case SEEK_DATA:
  739. if (offset >= inode->i_size) {
  740. ret = -ENXIO;
  741. goto out;
  742. }
  743. break;
  744. case SEEK_HOLE:
  745. if (offset >= inode->i_size) {
  746. ret = -ENXIO;
  747. goto out;
  748. }
  749. offset = inode->i_size;
  750. break;
  751. }
  752. if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
  753. offset = -EINVAL;
  754. goto out;
  755. }
  756. /* Special lock needed here? */
  757. if (offset != file->f_pos) {
  758. file->f_pos = offset;
  759. file->f_version = 0;
  760. }
  761. out:
  762. mutex_unlock(&inode->i_mutex);
  763. return offset;
  764. }
  765. const struct file_operations ceph_file_fops = {
  766. .open = ceph_open,
  767. .release = ceph_release,
  768. .llseek = ceph_llseek,
  769. .read = do_sync_read,
  770. .write = do_sync_write,
  771. .aio_read = ceph_aio_read,
  772. .aio_write = ceph_aio_write,
  773. .mmap = ceph_mmap,
  774. .fsync = ceph_fsync,
  775. .lock = ceph_lock,
  776. .flock = ceph_flock,
  777. .splice_read = generic_file_splice_read,
  778. .splice_write = generic_file_splice_write,
  779. .unlocked_ioctl = ceph_ioctl,
  780. .compat_ioctl = ceph_ioctl,
  781. };