dir.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include "super.h"
  8. #include "mds_client.h"
  9. /*
  10. * Directory operations: readdir, lookup, create, link, unlink,
  11. * rename, etc.
  12. */
  13. /*
  14. * Ceph MDS operations are specified in terms of a base ino and
  15. * relative path. Thus, the client can specify an operation on a
  16. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  17. * relative to, say, the root directory.
  18. *
  19. * Normally, we limit ourselves to strict inode ops (no path component)
  20. * or dentry operations (a single path component relative to an ino). The
  21. * exception to this is open_root_dentry(), which will open the mount
  22. * point by name.
  23. */
  24. const struct inode_operations ceph_dir_iops;
  25. const struct file_operations ceph_dir_fops;
  26. const struct dentry_operations ceph_dentry_ops;
  27. /*
  28. * Initialize ceph dentry state.
  29. */
  30. int ceph_init_dentry(struct dentry *dentry)
  31. {
  32. struct ceph_dentry_info *di;
  33. if (dentry->d_fsdata)
  34. return 0;
  35. di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
  36. if (!di)
  37. return -ENOMEM; /* oh well */
  38. spin_lock(&dentry->d_lock);
  39. if (dentry->d_fsdata) {
  40. /* lost a race */
  41. kmem_cache_free(ceph_dentry_cachep, di);
  42. goto out_unlock;
  43. }
  44. if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
  45. ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
  46. d_set_d_op(dentry, &ceph_dentry_ops);
  47. else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
  48. d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
  49. else
  50. d_set_d_op(dentry, &ceph_snap_dentry_ops);
  51. di->dentry = dentry;
  52. di->lease_session = NULL;
  53. dentry->d_time = jiffies;
  54. /* avoid reordering d_fsdata setup so that the check above is safe */
  55. smp_mb();
  56. dentry->d_fsdata = di;
  57. ceph_dentry_lru_add(dentry);
  58. out_unlock:
  59. spin_unlock(&dentry->d_lock);
  60. return 0;
  61. }
  62. struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
  63. {
  64. struct inode *inode = NULL;
  65. if (!dentry)
  66. return NULL;
  67. spin_lock(&dentry->d_lock);
  68. if (dentry->d_parent) {
  69. inode = dentry->d_parent->d_inode;
  70. ihold(inode);
  71. }
  72. spin_unlock(&dentry->d_lock);
  73. return inode;
  74. }
  75. /*
  76. * for readdir, we encode the directory frag and offset within that
  77. * frag into f_pos.
  78. */
  79. static unsigned fpos_frag(loff_t p)
  80. {
  81. return p >> 32;
  82. }
  83. static unsigned fpos_off(loff_t p)
  84. {
  85. return p & 0xffffffff;
  86. }
  87. /*
  88. * When possible, we try to satisfy a readdir by peeking at the
  89. * dcache. We make this work by carefully ordering dentries on
  90. * d_child when we initially get results back from the MDS, and
  91. * falling back to a "normal" sync readdir if any dentries in the dir
  92. * are dropped.
  93. *
  94. * D_COMPLETE tells indicates we have all dentries in the dir. It is
  95. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  96. * the MDS if/when the directory is modified).
  97. */
  98. static int __dcache_readdir(struct file *filp,
  99. void *dirent, filldir_t filldir)
  100. {
  101. struct ceph_file_info *fi = filp->private_data;
  102. struct dentry *parent = filp->f_dentry;
  103. struct inode *dir = parent->d_inode;
  104. struct list_head *p;
  105. struct dentry *dentry, *last;
  106. struct ceph_dentry_info *di;
  107. int err = 0;
  108. /* claim ref on last dentry we returned */
  109. last = fi->dentry;
  110. fi->dentry = NULL;
  111. dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
  112. last);
  113. spin_lock(&parent->d_lock);
  114. /* start at beginning? */
  115. if (filp->f_pos == 2 || last == NULL ||
  116. filp->f_pos < ceph_dentry(last)->offset) {
  117. if (list_empty(&parent->d_subdirs))
  118. goto out_unlock;
  119. p = parent->d_subdirs.prev;
  120. dout(" initial p %p/%p\n", p->prev, p->next);
  121. } else {
  122. p = last->d_child.prev;
  123. }
  124. more:
  125. dentry = list_entry(p, struct dentry, d_child);
  126. di = ceph_dentry(dentry);
  127. while (1) {
  128. dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
  129. d_unhashed(dentry) ? "!hashed" : "hashed",
  130. parent->d_subdirs.prev, parent->d_subdirs.next);
  131. if (p == &parent->d_subdirs) {
  132. fi->flags |= CEPH_F_ATEND;
  133. goto out_unlock;
  134. }
  135. spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
  136. if (!d_unhashed(dentry) && dentry->d_inode &&
  137. ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
  138. ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
  139. filp->f_pos <= di->offset)
  140. break;
  141. dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
  142. dentry->d_name.len, dentry->d_name.name, di->offset,
  143. filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
  144. !dentry->d_inode ? " null" : "");
  145. spin_unlock(&dentry->d_lock);
  146. p = p->prev;
  147. dentry = list_entry(p, struct dentry, d_child);
  148. di = ceph_dentry(dentry);
  149. }
  150. dget_dlock(dentry);
  151. spin_unlock(&dentry->d_lock);
  152. spin_unlock(&parent->d_lock);
  153. dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
  154. dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  155. filp->f_pos = di->offset;
  156. err = filldir(dirent, dentry->d_name.name,
  157. dentry->d_name.len, di->offset,
  158. ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
  159. dentry->d_inode->i_mode >> 12);
  160. if (last) {
  161. if (err < 0) {
  162. /* remember our position */
  163. fi->dentry = last;
  164. fi->next_offset = di->offset;
  165. } else {
  166. dput(last);
  167. }
  168. }
  169. last = dentry;
  170. if (err < 0)
  171. goto out;
  172. filp->f_pos++;
  173. /* make sure a dentry wasn't dropped while we didn't have parent lock */
  174. if (!ceph_dir_test_complete(dir)) {
  175. dout(" lost D_COMPLETE on %p; falling back to mds\n", dir);
  176. err = -EAGAIN;
  177. goto out;
  178. }
  179. spin_lock(&parent->d_lock);
  180. p = p->prev; /* advance to next dentry */
  181. goto more;
  182. out_unlock:
  183. spin_unlock(&parent->d_lock);
  184. out:
  185. if (last)
  186. dput(last);
  187. return err;
  188. }
  189. /*
  190. * make note of the last dentry we read, so we can
  191. * continue at the same lexicographical point,
  192. * regardless of what dir changes take place on the
  193. * server.
  194. */
  195. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  196. int len)
  197. {
  198. kfree(fi->last_name);
  199. fi->last_name = kmalloc(len+1, GFP_NOFS);
  200. if (!fi->last_name)
  201. return -ENOMEM;
  202. memcpy(fi->last_name, name, len);
  203. fi->last_name[len] = 0;
  204. dout("note_last_dentry '%s'\n", fi->last_name);
  205. return 0;
  206. }
  207. static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
  208. {
  209. struct ceph_file_info *fi = filp->private_data;
  210. struct inode *inode = filp->f_dentry->d_inode;
  211. struct ceph_inode_info *ci = ceph_inode(inode);
  212. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  213. struct ceph_mds_client *mdsc = fsc->mdsc;
  214. unsigned frag = fpos_frag(filp->f_pos);
  215. int off = fpos_off(filp->f_pos);
  216. int err;
  217. u32 ftype;
  218. struct ceph_mds_reply_info_parsed *rinfo;
  219. const int max_entries = fsc->mount_options->max_readdir;
  220. const int max_bytes = fsc->mount_options->max_readdir_bytes;
  221. dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
  222. if (fi->flags & CEPH_F_ATEND)
  223. return 0;
  224. /* always start with . and .. */
  225. if (filp->f_pos == 0) {
  226. /* note dir version at start of readdir so we can tell
  227. * if any dentries get dropped */
  228. fi->dir_release_count = ci->i_release_count;
  229. dout("readdir off 0 -> '.'\n");
  230. if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
  231. ceph_translate_ino(inode->i_sb, inode->i_ino),
  232. inode->i_mode >> 12) < 0)
  233. return 0;
  234. filp->f_pos = 1;
  235. off = 1;
  236. }
  237. if (filp->f_pos == 1) {
  238. ino_t ino = parent_ino(filp->f_dentry);
  239. dout("readdir off 1 -> '..'\n");
  240. if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
  241. ceph_translate_ino(inode->i_sb, ino),
  242. inode->i_mode >> 12) < 0)
  243. return 0;
  244. filp->f_pos = 2;
  245. off = 2;
  246. }
  247. /* can we use the dcache? */
  248. spin_lock(&ci->i_ceph_lock);
  249. if ((filp->f_pos == 2 || fi->dentry) &&
  250. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  251. ceph_snap(inode) != CEPH_SNAPDIR &&
  252. ceph_dir_test_complete(inode) &&
  253. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  254. spin_unlock(&ci->i_ceph_lock);
  255. err = __dcache_readdir(filp, dirent, filldir);
  256. if (err != -EAGAIN)
  257. return err;
  258. } else {
  259. spin_unlock(&ci->i_ceph_lock);
  260. }
  261. if (fi->dentry) {
  262. err = note_last_dentry(fi, fi->dentry->d_name.name,
  263. fi->dentry->d_name.len);
  264. if (err)
  265. return err;
  266. dput(fi->dentry);
  267. fi->dentry = NULL;
  268. }
  269. /* proceed with a normal readdir */
  270. more:
  271. /* do we have the correct frag content buffered? */
  272. if (fi->frag != frag || fi->last_readdir == NULL) {
  273. struct ceph_mds_request *req;
  274. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  275. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  276. /* discard old result, if any */
  277. if (fi->last_readdir) {
  278. ceph_mdsc_put_request(fi->last_readdir);
  279. fi->last_readdir = NULL;
  280. }
  281. /* requery frag tree, as the frag topology may have changed */
  282. frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
  283. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  284. ceph_vinop(inode), frag, fi->last_name);
  285. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  286. if (IS_ERR(req))
  287. return PTR_ERR(req);
  288. req->r_inode = inode;
  289. ihold(inode);
  290. req->r_dentry = dget(filp->f_dentry);
  291. /* hints to request -> mds selection code */
  292. req->r_direct_mode = USE_AUTH_MDS;
  293. req->r_direct_hash = ceph_frag_value(frag);
  294. req->r_direct_is_hash = true;
  295. req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
  296. req->r_readdir_offset = fi->next_offset;
  297. req->r_args.readdir.frag = cpu_to_le32(frag);
  298. req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
  299. req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
  300. req->r_num_caps = max_entries + 1;
  301. err = ceph_mdsc_do_request(mdsc, NULL, req);
  302. if (err < 0) {
  303. ceph_mdsc_put_request(req);
  304. return err;
  305. }
  306. dout("readdir got and parsed readdir result=%d"
  307. " on frag %x, end=%d, complete=%d\n", err, frag,
  308. (int)req->r_reply_info.dir_end,
  309. (int)req->r_reply_info.dir_complete);
  310. if (!req->r_did_prepopulate) {
  311. dout("readdir !did_prepopulate");
  312. fi->dir_release_count--; /* preclude D_COMPLETE */
  313. }
  314. /* note next offset and last dentry name */
  315. fi->offset = fi->next_offset;
  316. fi->last_readdir = req;
  317. if (req->r_reply_info.dir_end) {
  318. kfree(fi->last_name);
  319. fi->last_name = NULL;
  320. if (ceph_frag_is_rightmost(frag))
  321. fi->next_offset = 2;
  322. else
  323. fi->next_offset = 0;
  324. } else {
  325. rinfo = &req->r_reply_info;
  326. err = note_last_dentry(fi,
  327. rinfo->dir_dname[rinfo->dir_nr-1],
  328. rinfo->dir_dname_len[rinfo->dir_nr-1]);
  329. if (err)
  330. return err;
  331. fi->next_offset += rinfo->dir_nr;
  332. }
  333. }
  334. rinfo = &fi->last_readdir->r_reply_info;
  335. dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
  336. rinfo->dir_nr, off, fi->offset);
  337. while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
  338. u64 pos = ceph_make_fpos(frag, off);
  339. struct ceph_mds_reply_inode *in =
  340. rinfo->dir_in[off - fi->offset].in;
  341. struct ceph_vino vino;
  342. ino_t ino;
  343. dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
  344. off, off - fi->offset, rinfo->dir_nr, pos,
  345. rinfo->dir_dname_len[off - fi->offset],
  346. rinfo->dir_dname[off - fi->offset], in);
  347. BUG_ON(!in);
  348. ftype = le32_to_cpu(in->mode) >> 12;
  349. vino.ino = le64_to_cpu(in->ino);
  350. vino.snap = le64_to_cpu(in->snapid);
  351. ino = ceph_vino_to_ino(vino);
  352. if (filldir(dirent,
  353. rinfo->dir_dname[off - fi->offset],
  354. rinfo->dir_dname_len[off - fi->offset],
  355. pos,
  356. ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
  357. dout("filldir stopping us...\n");
  358. return 0;
  359. }
  360. off++;
  361. filp->f_pos = pos + 1;
  362. }
  363. if (fi->last_name) {
  364. ceph_mdsc_put_request(fi->last_readdir);
  365. fi->last_readdir = NULL;
  366. goto more;
  367. }
  368. /* more frags? */
  369. if (!ceph_frag_is_rightmost(frag)) {
  370. frag = ceph_frag_next(frag);
  371. off = 0;
  372. filp->f_pos = ceph_make_fpos(frag, off);
  373. dout("readdir next frag is %x\n", frag);
  374. goto more;
  375. }
  376. fi->flags |= CEPH_F_ATEND;
  377. /*
  378. * if dir_release_count still matches the dir, no dentries
  379. * were released during the whole readdir, and we should have
  380. * the complete dir contents in our cache.
  381. */
  382. spin_lock(&ci->i_ceph_lock);
  383. if (ci->i_release_count == fi->dir_release_count) {
  384. ceph_dir_set_complete(inode);
  385. ci->i_max_offset = filp->f_pos;
  386. }
  387. spin_unlock(&ci->i_ceph_lock);
  388. dout("readdir %p filp %p done.\n", inode, filp);
  389. return 0;
  390. }
  391. static void reset_readdir(struct ceph_file_info *fi)
  392. {
  393. if (fi->last_readdir) {
  394. ceph_mdsc_put_request(fi->last_readdir);
  395. fi->last_readdir = NULL;
  396. }
  397. kfree(fi->last_name);
  398. fi->last_name = NULL;
  399. fi->next_offset = 2; /* compensate for . and .. */
  400. if (fi->dentry) {
  401. dput(fi->dentry);
  402. fi->dentry = NULL;
  403. }
  404. fi->flags &= ~CEPH_F_ATEND;
  405. }
  406. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
  407. {
  408. struct ceph_file_info *fi = file->private_data;
  409. struct inode *inode = file->f_mapping->host;
  410. loff_t old_offset = offset;
  411. loff_t retval;
  412. mutex_lock(&inode->i_mutex);
  413. retval = -EINVAL;
  414. switch (origin) {
  415. case SEEK_END:
  416. offset += inode->i_size + 2; /* FIXME */
  417. break;
  418. case SEEK_CUR:
  419. offset += file->f_pos;
  420. case SEEK_SET:
  421. break;
  422. default:
  423. goto out;
  424. }
  425. if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
  426. if (offset != file->f_pos) {
  427. file->f_pos = offset;
  428. file->f_version = 0;
  429. fi->flags &= ~CEPH_F_ATEND;
  430. }
  431. retval = offset;
  432. /*
  433. * discard buffered readdir content on seekdir(0), or
  434. * seek to new frag, or seek prior to current chunk.
  435. */
  436. if (offset == 0 ||
  437. fpos_frag(offset) != fpos_frag(old_offset) ||
  438. fpos_off(offset) < fi->offset) {
  439. dout("dir_llseek dropping %p content\n", file);
  440. reset_readdir(fi);
  441. }
  442. /* bump dir_release_count if we did a forward seek */
  443. if (offset > old_offset)
  444. fi->dir_release_count--;
  445. }
  446. out:
  447. mutex_unlock(&inode->i_mutex);
  448. return retval;
  449. }
  450. /*
  451. * Handle lookups for the hidden .snap directory.
  452. */
  453. int ceph_handle_snapdir(struct ceph_mds_request *req,
  454. struct dentry *dentry, int err)
  455. {
  456. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  457. struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
  458. /* .snap dir? */
  459. if (err == -ENOENT &&
  460. ceph_snap(parent) == CEPH_NOSNAP &&
  461. strcmp(dentry->d_name.name,
  462. fsc->mount_options->snapdir_name) == 0) {
  463. struct inode *inode = ceph_get_snapdir(parent);
  464. dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
  465. dentry, dentry->d_name.len, dentry->d_name.name, inode);
  466. BUG_ON(!d_unhashed(dentry));
  467. d_add(dentry, inode);
  468. err = 0;
  469. }
  470. return err;
  471. }
  472. /*
  473. * Figure out final result of a lookup/open request.
  474. *
  475. * Mainly, make sure we return the final req->r_dentry (if it already
  476. * existed) in place of the original VFS-provided dentry when they
  477. * differ.
  478. *
  479. * Gracefully handle the case where the MDS replies with -ENOENT and
  480. * no trace (which it may do, at its discretion, e.g., if it doesn't
  481. * care to issue a lease on the negative dentry).
  482. */
  483. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  484. struct dentry *dentry, int err)
  485. {
  486. if (err == -ENOENT) {
  487. /* no trace? */
  488. err = 0;
  489. if (!req->r_reply_info.head->is_dentry) {
  490. dout("ENOENT and no trace, dentry %p inode %p\n",
  491. dentry, dentry->d_inode);
  492. if (dentry->d_inode) {
  493. d_drop(dentry);
  494. err = -ENOENT;
  495. } else {
  496. d_add(dentry, NULL);
  497. }
  498. }
  499. }
  500. if (err)
  501. dentry = ERR_PTR(err);
  502. else if (dentry != req->r_dentry)
  503. dentry = dget(req->r_dentry); /* we got spliced */
  504. else
  505. dentry = NULL;
  506. return dentry;
  507. }
  508. static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  509. {
  510. return ceph_ino(inode) == CEPH_INO_ROOT &&
  511. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  512. }
  513. /*
  514. * Look up a single dir entry. If there is a lookup intent, inform
  515. * the MDS so that it gets our 'caps wanted' value in a single op.
  516. */
  517. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  518. struct nameidata *nd)
  519. {
  520. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  521. struct ceph_mds_client *mdsc = fsc->mdsc;
  522. struct ceph_mds_request *req;
  523. int op;
  524. int err;
  525. dout("lookup %p dentry %p '%.*s'\n",
  526. dir, dentry, dentry->d_name.len, dentry->d_name.name);
  527. if (dentry->d_name.len > NAME_MAX)
  528. return ERR_PTR(-ENAMETOOLONG);
  529. err = ceph_init_dentry(dentry);
  530. if (err < 0)
  531. return ERR_PTR(err);
  532. /* open (but not create!) intent? */
  533. if (nd &&
  534. (nd->flags & LOOKUP_OPEN) &&
  535. !(nd->intent.open.flags & O_CREAT)) {
  536. int mode = nd->intent.open.create_mode & ~current->fs->umask;
  537. return ceph_lookup_open(dir, dentry, nd, mode, 1);
  538. }
  539. /* can we conclude ENOENT locally? */
  540. if (dentry->d_inode == NULL) {
  541. struct ceph_inode_info *ci = ceph_inode(dir);
  542. struct ceph_dentry_info *di = ceph_dentry(dentry);
  543. spin_lock(&ci->i_ceph_lock);
  544. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  545. if (strncmp(dentry->d_name.name,
  546. fsc->mount_options->snapdir_name,
  547. dentry->d_name.len) &&
  548. !is_root_ceph_dentry(dir, dentry) &&
  549. ceph_dir_test_complete(dir) &&
  550. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  551. spin_unlock(&ci->i_ceph_lock);
  552. dout(" dir %p complete, -ENOENT\n", dir);
  553. d_add(dentry, NULL);
  554. di->lease_shared_gen = ci->i_shared_gen;
  555. return NULL;
  556. }
  557. spin_unlock(&ci->i_ceph_lock);
  558. }
  559. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  560. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  561. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  562. if (IS_ERR(req))
  563. return ERR_CAST(req);
  564. req->r_dentry = dget(dentry);
  565. req->r_num_caps = 2;
  566. /* we only need inode linkage */
  567. req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
  568. req->r_locked_dir = dir;
  569. err = ceph_mdsc_do_request(mdsc, NULL, req);
  570. err = ceph_handle_snapdir(req, dentry, err);
  571. dentry = ceph_finish_lookup(req, dentry, err);
  572. ceph_mdsc_put_request(req); /* will dput(dentry) */
  573. dout("lookup result=%p\n", dentry);
  574. return dentry;
  575. }
  576. /*
  577. * If we do a create but get no trace back from the MDS, follow up with
  578. * a lookup (the VFS expects us to link up the provided dentry).
  579. */
  580. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  581. {
  582. struct dentry *result = ceph_lookup(dir, dentry, NULL);
  583. if (result && !IS_ERR(result)) {
  584. /*
  585. * We created the item, then did a lookup, and found
  586. * it was already linked to another inode we already
  587. * had in our cache (and thus got spliced). Link our
  588. * dentry to that inode, but don't hash it, just in
  589. * case the VFS wants to dereference it.
  590. */
  591. BUG_ON(!result->d_inode);
  592. d_instantiate(dentry, result->d_inode);
  593. return 0;
  594. }
  595. return PTR_ERR(result);
  596. }
  597. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  598. umode_t mode, dev_t rdev)
  599. {
  600. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  601. struct ceph_mds_client *mdsc = fsc->mdsc;
  602. struct ceph_mds_request *req;
  603. int err;
  604. if (ceph_snap(dir) != CEPH_NOSNAP)
  605. return -EROFS;
  606. dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
  607. dir, dentry, mode, rdev);
  608. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  609. if (IS_ERR(req)) {
  610. d_drop(dentry);
  611. return PTR_ERR(req);
  612. }
  613. req->r_dentry = dget(dentry);
  614. req->r_num_caps = 2;
  615. req->r_locked_dir = dir;
  616. req->r_args.mknod.mode = cpu_to_le32(mode);
  617. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  618. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  619. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  620. err = ceph_mdsc_do_request(mdsc, dir, req);
  621. if (!err && !req->r_reply_info.head->is_dentry)
  622. err = ceph_handle_notrace_create(dir, dentry);
  623. ceph_mdsc_put_request(req);
  624. if (err)
  625. d_drop(dentry);
  626. return err;
  627. }
  628. static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  629. struct nameidata *nd)
  630. {
  631. dout("create in dir %p dentry %p name '%.*s'\n",
  632. dir, dentry, dentry->d_name.len, dentry->d_name.name);
  633. if (ceph_snap(dir) != CEPH_NOSNAP)
  634. return -EROFS;
  635. if (nd) {
  636. BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
  637. dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
  638. /* hrm, what should i do here if we get aliased? */
  639. if (IS_ERR(dentry))
  640. return PTR_ERR(dentry);
  641. return 0;
  642. }
  643. /* fall back to mknod */
  644. return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
  645. }
  646. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  647. const char *dest)
  648. {
  649. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  650. struct ceph_mds_client *mdsc = fsc->mdsc;
  651. struct ceph_mds_request *req;
  652. int err;
  653. if (ceph_snap(dir) != CEPH_NOSNAP)
  654. return -EROFS;
  655. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  656. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  657. if (IS_ERR(req)) {
  658. d_drop(dentry);
  659. return PTR_ERR(req);
  660. }
  661. req->r_dentry = dget(dentry);
  662. req->r_num_caps = 2;
  663. req->r_path2 = kstrdup(dest, GFP_NOFS);
  664. req->r_locked_dir = dir;
  665. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  666. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  667. err = ceph_mdsc_do_request(mdsc, dir, req);
  668. if (!err && !req->r_reply_info.head->is_dentry)
  669. err = ceph_handle_notrace_create(dir, dentry);
  670. ceph_mdsc_put_request(req);
  671. if (err)
  672. d_drop(dentry);
  673. return err;
  674. }
  675. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  676. {
  677. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  678. struct ceph_mds_client *mdsc = fsc->mdsc;
  679. struct ceph_mds_request *req;
  680. int err = -EROFS;
  681. int op;
  682. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  683. /* mkdir .snap/foo is a MKSNAP */
  684. op = CEPH_MDS_OP_MKSNAP;
  685. dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
  686. dentry->d_name.len, dentry->d_name.name, dentry);
  687. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  688. dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
  689. op = CEPH_MDS_OP_MKDIR;
  690. } else {
  691. goto out;
  692. }
  693. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  694. if (IS_ERR(req)) {
  695. err = PTR_ERR(req);
  696. goto out;
  697. }
  698. req->r_dentry = dget(dentry);
  699. req->r_num_caps = 2;
  700. req->r_locked_dir = dir;
  701. req->r_args.mkdir.mode = cpu_to_le32(mode);
  702. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  703. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  704. err = ceph_mdsc_do_request(mdsc, dir, req);
  705. if (!err && !req->r_reply_info.head->is_dentry)
  706. err = ceph_handle_notrace_create(dir, dentry);
  707. ceph_mdsc_put_request(req);
  708. out:
  709. if (err < 0)
  710. d_drop(dentry);
  711. return err;
  712. }
  713. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  714. struct dentry *dentry)
  715. {
  716. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  717. struct ceph_mds_client *mdsc = fsc->mdsc;
  718. struct ceph_mds_request *req;
  719. int err;
  720. if (ceph_snap(dir) != CEPH_NOSNAP)
  721. return -EROFS;
  722. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  723. old_dentry, dentry);
  724. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  725. if (IS_ERR(req)) {
  726. d_drop(dentry);
  727. return PTR_ERR(req);
  728. }
  729. req->r_dentry = dget(dentry);
  730. req->r_num_caps = 2;
  731. req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
  732. req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
  733. req->r_locked_dir = dir;
  734. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  735. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  736. err = ceph_mdsc_do_request(mdsc, dir, req);
  737. if (err) {
  738. d_drop(dentry);
  739. } else if (!req->r_reply_info.head->is_dentry) {
  740. ihold(old_dentry->d_inode);
  741. d_instantiate(dentry, old_dentry->d_inode);
  742. }
  743. ceph_mdsc_put_request(req);
  744. return err;
  745. }
  746. /*
  747. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  748. * looks like the link count will hit 0, drop any other caps (other
  749. * than PIN) we don't specifically want (due to the file still being
  750. * open).
  751. */
  752. static int drop_caps_for_unlink(struct inode *inode)
  753. {
  754. struct ceph_inode_info *ci = ceph_inode(inode);
  755. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  756. spin_lock(&ci->i_ceph_lock);
  757. if (inode->i_nlink == 1) {
  758. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  759. ci->i_ceph_flags |= CEPH_I_NODELAY;
  760. }
  761. spin_unlock(&ci->i_ceph_lock);
  762. return drop;
  763. }
  764. /*
  765. * rmdir and unlink are differ only by the metadata op code
  766. */
  767. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  768. {
  769. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  770. struct ceph_mds_client *mdsc = fsc->mdsc;
  771. struct inode *inode = dentry->d_inode;
  772. struct ceph_mds_request *req;
  773. int err = -EROFS;
  774. int op;
  775. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  776. /* rmdir .snap/foo is RMSNAP */
  777. dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
  778. dentry->d_name.name, dentry);
  779. op = CEPH_MDS_OP_RMSNAP;
  780. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  781. dout("unlink/rmdir dir %p dn %p inode %p\n",
  782. dir, dentry, inode);
  783. op = S_ISDIR(dentry->d_inode->i_mode) ?
  784. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  785. } else
  786. goto out;
  787. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  788. if (IS_ERR(req)) {
  789. err = PTR_ERR(req);
  790. goto out;
  791. }
  792. req->r_dentry = dget(dentry);
  793. req->r_num_caps = 2;
  794. req->r_locked_dir = dir;
  795. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  796. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  797. req->r_inode_drop = drop_caps_for_unlink(inode);
  798. err = ceph_mdsc_do_request(mdsc, dir, req);
  799. if (!err && !req->r_reply_info.head->is_dentry)
  800. d_delete(dentry);
  801. ceph_mdsc_put_request(req);
  802. out:
  803. return err;
  804. }
  805. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  806. struct inode *new_dir, struct dentry *new_dentry)
  807. {
  808. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  809. struct ceph_mds_client *mdsc = fsc->mdsc;
  810. struct ceph_mds_request *req;
  811. int err;
  812. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  813. return -EXDEV;
  814. if (ceph_snap(old_dir) != CEPH_NOSNAP ||
  815. ceph_snap(new_dir) != CEPH_NOSNAP)
  816. return -EROFS;
  817. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  818. old_dir, old_dentry, new_dir, new_dentry);
  819. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
  820. if (IS_ERR(req))
  821. return PTR_ERR(req);
  822. req->r_dentry = dget(new_dentry);
  823. req->r_num_caps = 2;
  824. req->r_old_dentry = dget(old_dentry);
  825. req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
  826. req->r_locked_dir = new_dir;
  827. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  828. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  829. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  830. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  831. /* release LINK_RDCACHE on source inode (mds will lock it) */
  832. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  833. if (new_dentry->d_inode)
  834. req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
  835. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  836. if (!err && !req->r_reply_info.head->is_dentry) {
  837. /*
  838. * Normally d_move() is done by fill_trace (called by
  839. * do_request, above). If there is no trace, we need
  840. * to do it here.
  841. */
  842. /* d_move screws up d_subdirs order */
  843. ceph_dir_clear_complete(new_dir);
  844. d_move(old_dentry, new_dentry);
  845. /* ensure target dentry is invalidated, despite
  846. rehashing bug in vfs_rename_dir */
  847. ceph_invalidate_dentry_lease(new_dentry);
  848. }
  849. ceph_mdsc_put_request(req);
  850. return err;
  851. }
  852. /*
  853. * Ensure a dentry lease will no longer revalidate.
  854. */
  855. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  856. {
  857. spin_lock(&dentry->d_lock);
  858. dentry->d_time = jiffies;
  859. ceph_dentry(dentry)->lease_shared_gen = 0;
  860. spin_unlock(&dentry->d_lock);
  861. }
  862. /*
  863. * Check if dentry lease is valid. If not, delete the lease. Try to
  864. * renew if the least is more than half up.
  865. */
  866. static int dentry_lease_is_valid(struct dentry *dentry)
  867. {
  868. struct ceph_dentry_info *di;
  869. struct ceph_mds_session *s;
  870. int valid = 0;
  871. u32 gen;
  872. unsigned long ttl;
  873. struct ceph_mds_session *session = NULL;
  874. struct inode *dir = NULL;
  875. u32 seq = 0;
  876. spin_lock(&dentry->d_lock);
  877. di = ceph_dentry(dentry);
  878. if (di->lease_session) {
  879. s = di->lease_session;
  880. spin_lock(&s->s_gen_ttl_lock);
  881. gen = s->s_cap_gen;
  882. ttl = s->s_cap_ttl;
  883. spin_unlock(&s->s_gen_ttl_lock);
  884. if (di->lease_gen == gen &&
  885. time_before(jiffies, dentry->d_time) &&
  886. time_before(jiffies, ttl)) {
  887. valid = 1;
  888. if (di->lease_renew_after &&
  889. time_after(jiffies, di->lease_renew_after)) {
  890. /* we should renew */
  891. dir = dentry->d_parent->d_inode;
  892. session = ceph_get_mds_session(s);
  893. seq = di->lease_seq;
  894. di->lease_renew_after = 0;
  895. di->lease_renew_from = jiffies;
  896. }
  897. }
  898. }
  899. spin_unlock(&dentry->d_lock);
  900. if (session) {
  901. ceph_mdsc_lease_send_msg(session, dir, dentry,
  902. CEPH_MDS_LEASE_RENEW, seq);
  903. ceph_put_mds_session(session);
  904. }
  905. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  906. return valid;
  907. }
  908. /*
  909. * Check if directory-wide content lease/cap is valid.
  910. */
  911. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  912. {
  913. struct ceph_inode_info *ci = ceph_inode(dir);
  914. struct ceph_dentry_info *di = ceph_dentry(dentry);
  915. int valid = 0;
  916. spin_lock(&ci->i_ceph_lock);
  917. if (ci->i_shared_gen == di->lease_shared_gen)
  918. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  919. spin_unlock(&ci->i_ceph_lock);
  920. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  921. dir, (unsigned)ci->i_shared_gen, dentry,
  922. (unsigned)di->lease_shared_gen, valid);
  923. return valid;
  924. }
  925. /*
  926. * Check if cached dentry can be trusted.
  927. */
  928. static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
  929. {
  930. int valid = 0;
  931. struct inode *dir;
  932. if (nd && nd->flags & LOOKUP_RCU)
  933. return -ECHILD;
  934. dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
  935. dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
  936. ceph_dentry(dentry)->offset);
  937. dir = ceph_get_dentry_parent_inode(dentry);
  938. /* always trust cached snapped dentries, snapdir dentry */
  939. if (ceph_snap(dir) != CEPH_NOSNAP) {
  940. dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
  941. dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  942. valid = 1;
  943. } else if (dentry->d_inode &&
  944. ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
  945. valid = 1;
  946. } else if (dentry_lease_is_valid(dentry) ||
  947. dir_lease_is_valid(dir, dentry)) {
  948. valid = 1;
  949. }
  950. dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
  951. if (valid)
  952. ceph_dentry_lru_touch(dentry);
  953. else
  954. d_drop(dentry);
  955. iput(dir);
  956. return valid;
  957. }
  958. /*
  959. * Release our ceph_dentry_info.
  960. */
  961. static void ceph_d_release(struct dentry *dentry)
  962. {
  963. struct ceph_dentry_info *di = ceph_dentry(dentry);
  964. dout("d_release %p\n", dentry);
  965. ceph_dentry_lru_del(dentry);
  966. if (di->lease_session)
  967. ceph_put_mds_session(di->lease_session);
  968. kmem_cache_free(ceph_dentry_cachep, di);
  969. dentry->d_fsdata = NULL;
  970. }
  971. static int ceph_snapdir_d_revalidate(struct dentry *dentry,
  972. struct nameidata *nd)
  973. {
  974. /*
  975. * Eventually, we'll want to revalidate snapped metadata
  976. * too... probably...
  977. */
  978. return 1;
  979. }
  980. /*
  981. * Set/clear/test dir complete flag on the dir's dentry.
  982. */
  983. void ceph_dir_set_complete(struct inode *inode)
  984. {
  985. struct dentry *dentry = d_find_any_alias(inode);
  986. if (dentry && ceph_dentry(dentry) &&
  987. ceph_test_mount_opt(ceph_sb_to_client(dentry->d_sb), DCACHE)) {
  988. dout(" marking %p (%p) complete\n", inode, dentry);
  989. set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
  990. }
  991. dput(dentry);
  992. }
  993. void ceph_dir_clear_complete(struct inode *inode)
  994. {
  995. struct dentry *dentry = d_find_any_alias(inode);
  996. if (dentry && ceph_dentry(dentry)) {
  997. dout(" marking %p (%p) complete\n", inode, dentry);
  998. set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
  999. }
  1000. dput(dentry);
  1001. }
  1002. bool ceph_dir_test_complete(struct inode *inode)
  1003. {
  1004. struct dentry *dentry = d_find_any_alias(inode);
  1005. if (dentry && ceph_dentry(dentry)) {
  1006. dout(" marking %p (%p) NOT complete\n", inode, dentry);
  1007. clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
  1008. }
  1009. dput(dentry);
  1010. return false;
  1011. }
  1012. /*
  1013. * When the VFS prunes a dentry from the cache, we need to clear the
  1014. * complete flag on the parent directory.
  1015. *
  1016. * Called under dentry->d_lock.
  1017. */
  1018. static void ceph_d_prune(struct dentry *dentry)
  1019. {
  1020. struct ceph_dentry_info *di;
  1021. dout("ceph_d_prune %p\n", dentry);
  1022. /* do we have a valid parent? */
  1023. if (!dentry->d_parent || IS_ROOT(dentry))
  1024. return;
  1025. /* if we are not hashed, we don't affect D_COMPLETE */
  1026. if (d_unhashed(dentry))
  1027. return;
  1028. /*
  1029. * we hold d_lock, so d_parent is stable, and d_fsdata is never
  1030. * cleared until d_release
  1031. */
  1032. di = ceph_dentry(dentry->d_parent);
  1033. clear_bit(CEPH_D_COMPLETE, &di->flags);
  1034. }
  1035. /*
  1036. * read() on a dir. This weird interface hack only works if mounted
  1037. * with '-o dirstat'.
  1038. */
  1039. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  1040. loff_t *ppos)
  1041. {
  1042. struct ceph_file_info *cf = file->private_data;
  1043. struct inode *inode = file->f_dentry->d_inode;
  1044. struct ceph_inode_info *ci = ceph_inode(inode);
  1045. int left;
  1046. const int bufsize = 1024;
  1047. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  1048. return -EISDIR;
  1049. if (!cf->dir_info) {
  1050. cf->dir_info = kmalloc(bufsize, GFP_NOFS);
  1051. if (!cf->dir_info)
  1052. return -ENOMEM;
  1053. cf->dir_info_len =
  1054. snprintf(cf->dir_info, bufsize,
  1055. "entries: %20lld\n"
  1056. " files: %20lld\n"
  1057. " subdirs: %20lld\n"
  1058. "rentries: %20lld\n"
  1059. " rfiles: %20lld\n"
  1060. " rsubdirs: %20lld\n"
  1061. "rbytes: %20lld\n"
  1062. "rctime: %10ld.%09ld\n",
  1063. ci->i_files + ci->i_subdirs,
  1064. ci->i_files,
  1065. ci->i_subdirs,
  1066. ci->i_rfiles + ci->i_rsubdirs,
  1067. ci->i_rfiles,
  1068. ci->i_rsubdirs,
  1069. ci->i_rbytes,
  1070. (long)ci->i_rctime.tv_sec,
  1071. (long)ci->i_rctime.tv_nsec);
  1072. }
  1073. if (*ppos >= cf->dir_info_len)
  1074. return 0;
  1075. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  1076. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  1077. if (left == size)
  1078. return -EFAULT;
  1079. *ppos += (size - left);
  1080. return size - left;
  1081. }
  1082. /*
  1083. * an fsync() on a dir will wait for any uncommitted directory
  1084. * operations to commit.
  1085. */
  1086. static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
  1087. int datasync)
  1088. {
  1089. struct inode *inode = file->f_path.dentry->d_inode;
  1090. struct ceph_inode_info *ci = ceph_inode(inode);
  1091. struct list_head *head = &ci->i_unsafe_dirops;
  1092. struct ceph_mds_request *req;
  1093. u64 last_tid;
  1094. int ret = 0;
  1095. dout("dir_fsync %p\n", inode);
  1096. ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
  1097. if (ret)
  1098. return ret;
  1099. mutex_lock(&inode->i_mutex);
  1100. spin_lock(&ci->i_unsafe_lock);
  1101. if (list_empty(head))
  1102. goto out;
  1103. req = list_entry(head->prev,
  1104. struct ceph_mds_request, r_unsafe_dir_item);
  1105. last_tid = req->r_tid;
  1106. do {
  1107. ceph_mdsc_get_request(req);
  1108. spin_unlock(&ci->i_unsafe_lock);
  1109. dout("dir_fsync %p wait on tid %llu (until %llu)\n",
  1110. inode, req->r_tid, last_tid);
  1111. if (req->r_timeout) {
  1112. ret = wait_for_completion_timeout(
  1113. &req->r_safe_completion, req->r_timeout);
  1114. if (ret > 0)
  1115. ret = 0;
  1116. else if (ret == 0)
  1117. ret = -EIO; /* timed out */
  1118. } else {
  1119. wait_for_completion(&req->r_safe_completion);
  1120. }
  1121. ceph_mdsc_put_request(req);
  1122. spin_lock(&ci->i_unsafe_lock);
  1123. if (ret || list_empty(head))
  1124. break;
  1125. req = list_entry(head->next,
  1126. struct ceph_mds_request, r_unsafe_dir_item);
  1127. } while (req->r_tid < last_tid);
  1128. out:
  1129. spin_unlock(&ci->i_unsafe_lock);
  1130. mutex_unlock(&inode->i_mutex);
  1131. return ret;
  1132. }
  1133. /*
  1134. * We maintain a private dentry LRU.
  1135. *
  1136. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1137. */
  1138. void ceph_dentry_lru_add(struct dentry *dn)
  1139. {
  1140. struct ceph_dentry_info *di = ceph_dentry(dn);
  1141. struct ceph_mds_client *mdsc;
  1142. dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
  1143. dn->d_name.len, dn->d_name.name);
  1144. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1145. spin_lock(&mdsc->dentry_lru_lock);
  1146. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1147. mdsc->num_dentry++;
  1148. spin_unlock(&mdsc->dentry_lru_lock);
  1149. }
  1150. void ceph_dentry_lru_touch(struct dentry *dn)
  1151. {
  1152. struct ceph_dentry_info *di = ceph_dentry(dn);
  1153. struct ceph_mds_client *mdsc;
  1154. dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
  1155. dn->d_name.len, dn->d_name.name, di->offset);
  1156. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1157. spin_lock(&mdsc->dentry_lru_lock);
  1158. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1159. spin_unlock(&mdsc->dentry_lru_lock);
  1160. }
  1161. void ceph_dentry_lru_del(struct dentry *dn)
  1162. {
  1163. struct ceph_dentry_info *di = ceph_dentry(dn);
  1164. struct ceph_mds_client *mdsc;
  1165. dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
  1166. dn->d_name.len, dn->d_name.name);
  1167. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1168. spin_lock(&mdsc->dentry_lru_lock);
  1169. list_del_init(&di->lru);
  1170. mdsc->num_dentry--;
  1171. spin_unlock(&mdsc->dentry_lru_lock);
  1172. }
  1173. /*
  1174. * Return name hash for a given dentry. This is dependent on
  1175. * the parent directory's hash function.
  1176. */
  1177. unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
  1178. {
  1179. struct ceph_inode_info *dci = ceph_inode(dir);
  1180. switch (dci->i_dir_layout.dl_dir_hash) {
  1181. case 0: /* for backward compat */
  1182. case CEPH_STR_HASH_LINUX:
  1183. return dn->d_name.hash;
  1184. default:
  1185. return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
  1186. dn->d_name.name, dn->d_name.len);
  1187. }
  1188. }
  1189. const struct file_operations ceph_dir_fops = {
  1190. .read = ceph_read_dir,
  1191. .readdir = ceph_readdir,
  1192. .llseek = ceph_dir_llseek,
  1193. .open = ceph_open,
  1194. .release = ceph_release,
  1195. .unlocked_ioctl = ceph_ioctl,
  1196. .fsync = ceph_dir_fsync,
  1197. };
  1198. const struct inode_operations ceph_dir_iops = {
  1199. .lookup = ceph_lookup,
  1200. .permission = ceph_permission,
  1201. .getattr = ceph_getattr,
  1202. .setattr = ceph_setattr,
  1203. .setxattr = ceph_setxattr,
  1204. .getxattr = ceph_getxattr,
  1205. .listxattr = ceph_listxattr,
  1206. .removexattr = ceph_removexattr,
  1207. .mknod = ceph_mknod,
  1208. .symlink = ceph_symlink,
  1209. .mkdir = ceph_mkdir,
  1210. .link = ceph_link,
  1211. .unlink = ceph_unlink,
  1212. .rmdir = ceph_unlink,
  1213. .rename = ceph_rename,
  1214. .create = ceph_create,
  1215. };
  1216. const struct dentry_operations ceph_dentry_ops = {
  1217. .d_revalidate = ceph_d_revalidate,
  1218. .d_release = ceph_d_release,
  1219. .d_prune = ceph_d_prune,
  1220. };
  1221. const struct dentry_operations ceph_snapdir_dentry_ops = {
  1222. .d_revalidate = ceph_snapdir_d_revalidate,
  1223. .d_release = ceph_d_release,
  1224. };
  1225. const struct dentry_operations ceph_snap_dentry_ops = {
  1226. .d_release = ceph_d_release,
  1227. .d_prune = ceph_d_prune,
  1228. };