inode.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/fs.h>
  4. #include <linux/slab.h>
  5. #include <linux/string.h>
  6. #include <linux/uaccess.h>
  7. #include <linux/kernel.h>
  8. #include <linux/namei.h>
  9. #include <linux/writeback.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/pagevec.h>
  12. #include "super.h"
  13. #include "mds_client.h"
  14. #include <linux/ceph/decode.h>
  15. /*
  16. * Ceph inode operations
  17. *
  18. * Implement basic inode helpers (get, alloc) and inode ops (getattr,
  19. * setattr, etc.), xattr helpers, and helpers for assimilating
  20. * metadata returned by the MDS into our cache.
  21. *
  22. * Also define helpers for doing asynchronous writeback, invalidation,
  23. * and truncation for the benefit of those who can't afford to block
  24. * (typically because they are in the message handler path).
  25. */
  26. static const struct inode_operations ceph_symlink_iops;
  27. static void ceph_invalidate_work(struct work_struct *work);
  28. static void ceph_writeback_work(struct work_struct *work);
  29. static void ceph_vmtruncate_work(struct work_struct *work);
  30. /*
  31. * find or create an inode, given the ceph ino number
  32. */
  33. static int ceph_set_ino_cb(struct inode *inode, void *data)
  34. {
  35. ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
  36. inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
  37. return 0;
  38. }
  39. struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
  40. {
  41. struct inode *inode;
  42. ino_t t = ceph_vino_to_ino(vino);
  43. inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
  44. if (inode == NULL)
  45. return ERR_PTR(-ENOMEM);
  46. if (inode->i_state & I_NEW) {
  47. dout("get_inode created new inode %p %llx.%llx ino %llx\n",
  48. inode, ceph_vinop(inode), (u64)inode->i_ino);
  49. unlock_new_inode(inode);
  50. }
  51. dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
  52. vino.snap, inode);
  53. return inode;
  54. }
  55. /*
  56. * get/constuct snapdir inode for a given directory
  57. */
  58. struct inode *ceph_get_snapdir(struct inode *parent)
  59. {
  60. struct ceph_vino vino = {
  61. .ino = ceph_ino(parent),
  62. .snap = CEPH_SNAPDIR,
  63. };
  64. struct inode *inode = ceph_get_inode(parent->i_sb, vino);
  65. struct ceph_inode_info *ci = ceph_inode(inode);
  66. BUG_ON(!S_ISDIR(parent->i_mode));
  67. if (IS_ERR(inode))
  68. return inode;
  69. inode->i_mode = parent->i_mode;
  70. inode->i_uid = parent->i_uid;
  71. inode->i_gid = parent->i_gid;
  72. inode->i_op = &ceph_dir_iops;
  73. inode->i_fop = &ceph_dir_fops;
  74. ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
  75. ci->i_rbytes = 0;
  76. return inode;
  77. }
  78. const struct inode_operations ceph_file_iops = {
  79. .permission = ceph_permission,
  80. .setattr = ceph_setattr,
  81. .getattr = ceph_getattr,
  82. .setxattr = ceph_setxattr,
  83. .getxattr = ceph_getxattr,
  84. .listxattr = ceph_listxattr,
  85. .removexattr = ceph_removexattr,
  86. };
  87. /*
  88. * We use a 'frag tree' to keep track of the MDS's directory fragments
  89. * for a given inode (usually there is just a single fragment). We
  90. * need to know when a child frag is delegated to a new MDS, or when
  91. * it is flagged as replicated, so we can direct our requests
  92. * accordingly.
  93. */
  94. /*
  95. * find/create a frag in the tree
  96. */
  97. static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
  98. u32 f)
  99. {
  100. struct rb_node **p;
  101. struct rb_node *parent = NULL;
  102. struct ceph_inode_frag *frag;
  103. int c;
  104. p = &ci->i_fragtree.rb_node;
  105. while (*p) {
  106. parent = *p;
  107. frag = rb_entry(parent, struct ceph_inode_frag, node);
  108. c = ceph_frag_compare(f, frag->frag);
  109. if (c < 0)
  110. p = &(*p)->rb_left;
  111. else if (c > 0)
  112. p = &(*p)->rb_right;
  113. else
  114. return frag;
  115. }
  116. frag = kmalloc(sizeof(*frag), GFP_NOFS);
  117. if (!frag) {
  118. pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
  119. "frag %x\n", &ci->vfs_inode,
  120. ceph_vinop(&ci->vfs_inode), f);
  121. return ERR_PTR(-ENOMEM);
  122. }
  123. frag->frag = f;
  124. frag->split_by = 0;
  125. frag->mds = -1;
  126. frag->ndist = 0;
  127. rb_link_node(&frag->node, parent, p);
  128. rb_insert_color(&frag->node, &ci->i_fragtree);
  129. dout("get_or_create_frag added %llx.%llx frag %x\n",
  130. ceph_vinop(&ci->vfs_inode), f);
  131. return frag;
  132. }
  133. /*
  134. * find a specific frag @f
  135. */
  136. struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
  137. {
  138. struct rb_node *n = ci->i_fragtree.rb_node;
  139. while (n) {
  140. struct ceph_inode_frag *frag =
  141. rb_entry(n, struct ceph_inode_frag, node);
  142. int c = ceph_frag_compare(f, frag->frag);
  143. if (c < 0)
  144. n = n->rb_left;
  145. else if (c > 0)
  146. n = n->rb_right;
  147. else
  148. return frag;
  149. }
  150. return NULL;
  151. }
  152. /*
  153. * Choose frag containing the given value @v. If @pfrag is
  154. * specified, copy the frag delegation info to the caller if
  155. * it is present.
  156. */
  157. u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
  158. struct ceph_inode_frag *pfrag,
  159. int *found)
  160. {
  161. u32 t = ceph_frag_make(0, 0);
  162. struct ceph_inode_frag *frag;
  163. unsigned nway, i;
  164. u32 n;
  165. if (found)
  166. *found = 0;
  167. mutex_lock(&ci->i_fragtree_mutex);
  168. while (1) {
  169. WARN_ON(!ceph_frag_contains_value(t, v));
  170. frag = __ceph_find_frag(ci, t);
  171. if (!frag)
  172. break; /* t is a leaf */
  173. if (frag->split_by == 0) {
  174. if (pfrag)
  175. memcpy(pfrag, frag, sizeof(*pfrag));
  176. if (found)
  177. *found = 1;
  178. break;
  179. }
  180. /* choose child */
  181. nway = 1 << frag->split_by;
  182. dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
  183. frag->split_by, nway);
  184. for (i = 0; i < nway; i++) {
  185. n = ceph_frag_make_child(t, frag->split_by, i);
  186. if (ceph_frag_contains_value(n, v)) {
  187. t = n;
  188. break;
  189. }
  190. }
  191. BUG_ON(i == nway);
  192. }
  193. dout("choose_frag(%x) = %x\n", v, t);
  194. mutex_unlock(&ci->i_fragtree_mutex);
  195. return t;
  196. }
  197. /*
  198. * Process dirfrag (delegation) info from the mds. Include leaf
  199. * fragment in tree ONLY if ndist > 0. Otherwise, only
  200. * branches/splits are included in i_fragtree)
  201. */
  202. static int ceph_fill_dirfrag(struct inode *inode,
  203. struct ceph_mds_reply_dirfrag *dirinfo)
  204. {
  205. struct ceph_inode_info *ci = ceph_inode(inode);
  206. struct ceph_inode_frag *frag;
  207. u32 id = le32_to_cpu(dirinfo->frag);
  208. int mds = le32_to_cpu(dirinfo->auth);
  209. int ndist = le32_to_cpu(dirinfo->ndist);
  210. int i;
  211. int err = 0;
  212. mutex_lock(&ci->i_fragtree_mutex);
  213. if (ndist == 0) {
  214. /* no delegation info needed. */
  215. frag = __ceph_find_frag(ci, id);
  216. if (!frag)
  217. goto out;
  218. if (frag->split_by == 0) {
  219. /* tree leaf, remove */
  220. dout("fill_dirfrag removed %llx.%llx frag %x"
  221. " (no ref)\n", ceph_vinop(inode), id);
  222. rb_erase(&frag->node, &ci->i_fragtree);
  223. kfree(frag);
  224. } else {
  225. /* tree branch, keep and clear */
  226. dout("fill_dirfrag cleared %llx.%llx frag %x"
  227. " referral\n", ceph_vinop(inode), id);
  228. frag->mds = -1;
  229. frag->ndist = 0;
  230. }
  231. goto out;
  232. }
  233. /* find/add this frag to store mds delegation info */
  234. frag = __get_or_create_frag(ci, id);
  235. if (IS_ERR(frag)) {
  236. /* this is not the end of the world; we can continue
  237. with bad/inaccurate delegation info */
  238. pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
  239. ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
  240. err = -ENOMEM;
  241. goto out;
  242. }
  243. frag->mds = mds;
  244. frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
  245. for (i = 0; i < frag->ndist; i++)
  246. frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
  247. dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
  248. ceph_vinop(inode), frag->frag, frag->ndist);
  249. out:
  250. mutex_unlock(&ci->i_fragtree_mutex);
  251. return err;
  252. }
  253. /*
  254. * initialize a newly allocated inode.
  255. */
  256. struct inode *ceph_alloc_inode(struct super_block *sb)
  257. {
  258. struct ceph_inode_info *ci;
  259. int i;
  260. ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
  261. if (!ci)
  262. return NULL;
  263. dout("alloc_inode %p\n", &ci->vfs_inode);
  264. ci->i_version = 0;
  265. ci->i_time_warp_seq = 0;
  266. ci->i_ceph_flags = 0;
  267. ci->i_release_count = 0;
  268. ci->i_symlink = NULL;
  269. memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
  270. ci->i_fragtree = RB_ROOT;
  271. mutex_init(&ci->i_fragtree_mutex);
  272. ci->i_xattrs.blob = NULL;
  273. ci->i_xattrs.prealloc_blob = NULL;
  274. ci->i_xattrs.dirty = false;
  275. ci->i_xattrs.index = RB_ROOT;
  276. ci->i_xattrs.count = 0;
  277. ci->i_xattrs.names_size = 0;
  278. ci->i_xattrs.vals_size = 0;
  279. ci->i_xattrs.version = 0;
  280. ci->i_xattrs.index_version = 0;
  281. ci->i_caps = RB_ROOT;
  282. ci->i_auth_cap = NULL;
  283. ci->i_dirty_caps = 0;
  284. ci->i_flushing_caps = 0;
  285. INIT_LIST_HEAD(&ci->i_dirty_item);
  286. INIT_LIST_HEAD(&ci->i_flushing_item);
  287. ci->i_cap_flush_seq = 0;
  288. ci->i_cap_flush_last_tid = 0;
  289. memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
  290. init_waitqueue_head(&ci->i_cap_wq);
  291. ci->i_hold_caps_min = 0;
  292. ci->i_hold_caps_max = 0;
  293. INIT_LIST_HEAD(&ci->i_cap_delay_list);
  294. ci->i_cap_exporting_mds = 0;
  295. ci->i_cap_exporting_mseq = 0;
  296. ci->i_cap_exporting_issued = 0;
  297. INIT_LIST_HEAD(&ci->i_cap_snaps);
  298. ci->i_head_snapc = NULL;
  299. ci->i_snap_caps = 0;
  300. for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
  301. ci->i_nr_by_mode[i] = 0;
  302. ci->i_truncate_seq = 0;
  303. ci->i_truncate_size = 0;
  304. ci->i_truncate_pending = 0;
  305. ci->i_max_size = 0;
  306. ci->i_reported_size = 0;
  307. ci->i_wanted_max_size = 0;
  308. ci->i_requested_max_size = 0;
  309. ci->i_pin_ref = 0;
  310. ci->i_rd_ref = 0;
  311. ci->i_rdcache_ref = 0;
  312. ci->i_wr_ref = 0;
  313. ci->i_wb_ref = 0;
  314. ci->i_wrbuffer_ref = 0;
  315. ci->i_wrbuffer_ref_head = 0;
  316. ci->i_shared_gen = 0;
  317. ci->i_rdcache_gen = 0;
  318. ci->i_rdcache_revoking = 0;
  319. INIT_LIST_HEAD(&ci->i_unsafe_writes);
  320. INIT_LIST_HEAD(&ci->i_unsafe_dirops);
  321. spin_lock_init(&ci->i_unsafe_lock);
  322. ci->i_snap_realm = NULL;
  323. INIT_LIST_HEAD(&ci->i_snap_realm_item);
  324. INIT_LIST_HEAD(&ci->i_snap_flush_item);
  325. INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
  326. INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
  327. INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
  328. return &ci->vfs_inode;
  329. }
  330. static void ceph_i_callback(struct rcu_head *head)
  331. {
  332. struct inode *inode = container_of(head, struct inode, i_rcu);
  333. struct ceph_inode_info *ci = ceph_inode(inode);
  334. INIT_LIST_HEAD(&inode->i_dentry);
  335. kmem_cache_free(ceph_inode_cachep, ci);
  336. }
  337. void ceph_destroy_inode(struct inode *inode)
  338. {
  339. struct ceph_inode_info *ci = ceph_inode(inode);
  340. struct ceph_inode_frag *frag;
  341. struct rb_node *n;
  342. dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
  343. ceph_queue_caps_release(inode);
  344. /*
  345. * we may still have a snap_realm reference if there are stray
  346. * caps in i_cap_exporting_issued or i_snap_caps.
  347. */
  348. if (ci->i_snap_realm) {
  349. struct ceph_mds_client *mdsc =
  350. ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
  351. struct ceph_snap_realm *realm = ci->i_snap_realm;
  352. dout(" dropping residual ref to snap realm %p\n", realm);
  353. spin_lock(&realm->inodes_with_caps_lock);
  354. list_del_init(&ci->i_snap_realm_item);
  355. spin_unlock(&realm->inodes_with_caps_lock);
  356. ceph_put_snap_realm(mdsc, realm);
  357. }
  358. kfree(ci->i_symlink);
  359. while ((n = rb_first(&ci->i_fragtree)) != NULL) {
  360. frag = rb_entry(n, struct ceph_inode_frag, node);
  361. rb_erase(n, &ci->i_fragtree);
  362. kfree(frag);
  363. }
  364. __ceph_destroy_xattrs(ci);
  365. if (ci->i_xattrs.blob)
  366. ceph_buffer_put(ci->i_xattrs.blob);
  367. if (ci->i_xattrs.prealloc_blob)
  368. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  369. call_rcu(&inode->i_rcu, ceph_i_callback);
  370. }
  371. /*
  372. * Helpers to fill in size, ctime, mtime, and atime. We have to be
  373. * careful because either the client or MDS may have more up to date
  374. * info, depending on which capabilities are held, and whether
  375. * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
  376. * and size are monotonically increasing, except when utimes() or
  377. * truncate() increments the corresponding _seq values.)
  378. */
  379. int ceph_fill_file_size(struct inode *inode, int issued,
  380. u32 truncate_seq, u64 truncate_size, u64 size)
  381. {
  382. struct ceph_inode_info *ci = ceph_inode(inode);
  383. int queue_trunc = 0;
  384. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
  385. (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
  386. dout("size %lld -> %llu\n", inode->i_size, size);
  387. inode->i_size = size;
  388. inode->i_blocks = (size + (1<<9) - 1) >> 9;
  389. ci->i_reported_size = size;
  390. if (truncate_seq != ci->i_truncate_seq) {
  391. dout("truncate_seq %u -> %u\n",
  392. ci->i_truncate_seq, truncate_seq);
  393. ci->i_truncate_seq = truncate_seq;
  394. /*
  395. * If we hold relevant caps, or in the case where we're
  396. * not the only client referencing this file and we
  397. * don't hold those caps, then we need to check whether
  398. * the file is either opened or mmaped
  399. */
  400. if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
  401. CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
  402. CEPH_CAP_FILE_EXCL|
  403. CEPH_CAP_FILE_LAZYIO)) ||
  404. mapping_mapped(inode->i_mapping) ||
  405. __ceph_caps_file_wanted(ci)) {
  406. ci->i_truncate_pending++;
  407. queue_trunc = 1;
  408. }
  409. }
  410. }
  411. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
  412. ci->i_truncate_size != truncate_size) {
  413. dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
  414. truncate_size);
  415. ci->i_truncate_size = truncate_size;
  416. }
  417. return queue_trunc;
  418. }
  419. void ceph_fill_file_time(struct inode *inode, int issued,
  420. u64 time_warp_seq, struct timespec *ctime,
  421. struct timespec *mtime, struct timespec *atime)
  422. {
  423. struct ceph_inode_info *ci = ceph_inode(inode);
  424. int warn = 0;
  425. if (issued & (CEPH_CAP_FILE_EXCL|
  426. CEPH_CAP_FILE_WR|
  427. CEPH_CAP_FILE_BUFFER|
  428. CEPH_CAP_AUTH_EXCL|
  429. CEPH_CAP_XATTR_EXCL)) {
  430. if (timespec_compare(ctime, &inode->i_ctime) > 0) {
  431. dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
  432. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  433. ctime->tv_sec, ctime->tv_nsec);
  434. inode->i_ctime = *ctime;
  435. }
  436. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
  437. /* the MDS did a utimes() */
  438. dout("mtime %ld.%09ld -> %ld.%09ld "
  439. "tw %d -> %d\n",
  440. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  441. mtime->tv_sec, mtime->tv_nsec,
  442. ci->i_time_warp_seq, (int)time_warp_seq);
  443. inode->i_mtime = *mtime;
  444. inode->i_atime = *atime;
  445. ci->i_time_warp_seq = time_warp_seq;
  446. } else if (time_warp_seq == ci->i_time_warp_seq) {
  447. /* nobody did utimes(); take the max */
  448. if (timespec_compare(mtime, &inode->i_mtime) > 0) {
  449. dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
  450. inode->i_mtime.tv_sec,
  451. inode->i_mtime.tv_nsec,
  452. mtime->tv_sec, mtime->tv_nsec);
  453. inode->i_mtime = *mtime;
  454. }
  455. if (timespec_compare(atime, &inode->i_atime) > 0) {
  456. dout("atime %ld.%09ld -> %ld.%09ld inc\n",
  457. inode->i_atime.tv_sec,
  458. inode->i_atime.tv_nsec,
  459. atime->tv_sec, atime->tv_nsec);
  460. inode->i_atime = *atime;
  461. }
  462. } else if (issued & CEPH_CAP_FILE_EXCL) {
  463. /* we did a utimes(); ignore mds values */
  464. } else {
  465. warn = 1;
  466. }
  467. } else {
  468. /* we have no write|excl caps; whatever the MDS says is true */
  469. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
  470. inode->i_ctime = *ctime;
  471. inode->i_mtime = *mtime;
  472. inode->i_atime = *atime;
  473. ci->i_time_warp_seq = time_warp_seq;
  474. } else {
  475. warn = 1;
  476. }
  477. }
  478. if (warn) /* time_warp_seq shouldn't go backwards */
  479. dout("%p mds time_warp_seq %llu < %u\n",
  480. inode, time_warp_seq, ci->i_time_warp_seq);
  481. }
  482. /*
  483. * Populate an inode based on info from mds. May be called on new or
  484. * existing inodes.
  485. */
  486. static int fill_inode(struct inode *inode,
  487. struct ceph_mds_reply_info_in *iinfo,
  488. struct ceph_mds_reply_dirfrag *dirinfo,
  489. struct ceph_mds_session *session,
  490. unsigned long ttl_from, int cap_fmode,
  491. struct ceph_cap_reservation *caps_reservation)
  492. {
  493. struct ceph_mds_reply_inode *info = iinfo->in;
  494. struct ceph_inode_info *ci = ceph_inode(inode);
  495. int i;
  496. int issued, implemented;
  497. struct timespec mtime, atime, ctime;
  498. u32 nsplits;
  499. struct ceph_buffer *xattr_blob = NULL;
  500. int err = 0;
  501. int queue_trunc = 0;
  502. dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
  503. inode, ceph_vinop(inode), le64_to_cpu(info->version),
  504. ci->i_version);
  505. /*
  506. * prealloc xattr data, if it looks like we'll need it. only
  507. * if len > 4 (meaning there are actually xattrs; the first 4
  508. * bytes are the xattr count).
  509. */
  510. if (iinfo->xattr_len > 4) {
  511. xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
  512. if (!xattr_blob)
  513. pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
  514. iinfo->xattr_len);
  515. }
  516. spin_lock(&inode->i_lock);
  517. /*
  518. * provided version will be odd if inode value is projected,
  519. * even if stable. skip the update if we have newer stable
  520. * info (ours>=theirs, e.g. due to racing mds replies), unless
  521. * we are getting projected (unstable) info (in which case the
  522. * version is odd, and we want ours>theirs).
  523. * us them
  524. * 2 2 skip
  525. * 3 2 skip
  526. * 3 3 update
  527. */
  528. if (le64_to_cpu(info->version) > 0 &&
  529. (ci->i_version & ~1) >= le64_to_cpu(info->version))
  530. goto no_change;
  531. issued = __ceph_caps_issued(ci, &implemented);
  532. issued |= implemented | __ceph_caps_dirty(ci);
  533. /* update inode */
  534. ci->i_version = le64_to_cpu(info->version);
  535. inode->i_version++;
  536. inode->i_rdev = le32_to_cpu(info->rdev);
  537. if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
  538. inode->i_mode = le32_to_cpu(info->mode);
  539. inode->i_uid = le32_to_cpu(info->uid);
  540. inode->i_gid = le32_to_cpu(info->gid);
  541. dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
  542. inode->i_uid, inode->i_gid);
  543. }
  544. if ((issued & CEPH_CAP_LINK_EXCL) == 0)
  545. inode->i_nlink = le32_to_cpu(info->nlink);
  546. /* be careful with mtime, atime, size */
  547. ceph_decode_timespec(&atime, &info->atime);
  548. ceph_decode_timespec(&mtime, &info->mtime);
  549. ceph_decode_timespec(&ctime, &info->ctime);
  550. queue_trunc = ceph_fill_file_size(inode, issued,
  551. le32_to_cpu(info->truncate_seq),
  552. le64_to_cpu(info->truncate_size),
  553. le64_to_cpu(info->size));
  554. ceph_fill_file_time(inode, issued,
  555. le32_to_cpu(info->time_warp_seq),
  556. &ctime, &mtime, &atime);
  557. /* only update max_size on auth cap */
  558. if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
  559. ci->i_max_size != le64_to_cpu(info->max_size)) {
  560. dout("max_size %lld -> %llu\n", ci->i_max_size,
  561. le64_to_cpu(info->max_size));
  562. ci->i_max_size = le64_to_cpu(info->max_size);
  563. }
  564. ci->i_layout = info->layout;
  565. inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
  566. /* xattrs */
  567. /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
  568. if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
  569. le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
  570. if (ci->i_xattrs.blob)
  571. ceph_buffer_put(ci->i_xattrs.blob);
  572. ci->i_xattrs.blob = xattr_blob;
  573. if (xattr_blob)
  574. memcpy(ci->i_xattrs.blob->vec.iov_base,
  575. iinfo->xattr_data, iinfo->xattr_len);
  576. ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
  577. xattr_blob = NULL;
  578. }
  579. inode->i_mapping->a_ops = &ceph_aops;
  580. inode->i_mapping->backing_dev_info =
  581. &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
  582. switch (inode->i_mode & S_IFMT) {
  583. case S_IFIFO:
  584. case S_IFBLK:
  585. case S_IFCHR:
  586. case S_IFSOCK:
  587. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  588. inode->i_op = &ceph_file_iops;
  589. break;
  590. case S_IFREG:
  591. inode->i_op = &ceph_file_iops;
  592. inode->i_fop = &ceph_file_fops;
  593. break;
  594. case S_IFLNK:
  595. inode->i_op = &ceph_symlink_iops;
  596. if (!ci->i_symlink) {
  597. int symlen = iinfo->symlink_len;
  598. char *sym;
  599. BUG_ON(symlen != inode->i_size);
  600. spin_unlock(&inode->i_lock);
  601. err = -ENOMEM;
  602. sym = kmalloc(symlen+1, GFP_NOFS);
  603. if (!sym)
  604. goto out;
  605. memcpy(sym, iinfo->symlink, symlen);
  606. sym[symlen] = 0;
  607. spin_lock(&inode->i_lock);
  608. if (!ci->i_symlink)
  609. ci->i_symlink = sym;
  610. else
  611. kfree(sym); /* lost a race */
  612. }
  613. break;
  614. case S_IFDIR:
  615. inode->i_op = &ceph_dir_iops;
  616. inode->i_fop = &ceph_dir_fops;
  617. ci->i_dir_layout = iinfo->dir_layout;
  618. ci->i_files = le64_to_cpu(info->files);
  619. ci->i_subdirs = le64_to_cpu(info->subdirs);
  620. ci->i_rbytes = le64_to_cpu(info->rbytes);
  621. ci->i_rfiles = le64_to_cpu(info->rfiles);
  622. ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
  623. ceph_decode_timespec(&ci->i_rctime, &info->rctime);
  624. /* set dir completion flag? */
  625. if (ci->i_files == 0 && ci->i_subdirs == 0 &&
  626. ceph_snap(inode) == CEPH_NOSNAP &&
  627. (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
  628. (issued & CEPH_CAP_FILE_EXCL) == 0 &&
  629. (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  630. dout(" marking %p complete (empty)\n", inode);
  631. /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
  632. ci->i_max_offset = 2;
  633. }
  634. break;
  635. default:
  636. pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
  637. ceph_vinop(inode), inode->i_mode);
  638. }
  639. no_change:
  640. spin_unlock(&inode->i_lock);
  641. /* queue truncate if we saw i_size decrease */
  642. if (queue_trunc)
  643. ceph_queue_vmtruncate(inode);
  644. /* populate frag tree */
  645. /* FIXME: move me up, if/when version reflects fragtree changes */
  646. nsplits = le32_to_cpu(info->fragtree.nsplits);
  647. mutex_lock(&ci->i_fragtree_mutex);
  648. for (i = 0; i < nsplits; i++) {
  649. u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
  650. struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
  651. if (IS_ERR(frag))
  652. continue;
  653. frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
  654. dout(" frag %x split by %d\n", frag->frag, frag->split_by);
  655. }
  656. mutex_unlock(&ci->i_fragtree_mutex);
  657. /* were we issued a capability? */
  658. if (info->cap.caps) {
  659. if (ceph_snap(inode) == CEPH_NOSNAP) {
  660. ceph_add_cap(inode, session,
  661. le64_to_cpu(info->cap.cap_id),
  662. cap_fmode,
  663. le32_to_cpu(info->cap.caps),
  664. le32_to_cpu(info->cap.wanted),
  665. le32_to_cpu(info->cap.seq),
  666. le32_to_cpu(info->cap.mseq),
  667. le64_to_cpu(info->cap.realm),
  668. info->cap.flags,
  669. caps_reservation);
  670. } else {
  671. spin_lock(&inode->i_lock);
  672. dout(" %p got snap_caps %s\n", inode,
  673. ceph_cap_string(le32_to_cpu(info->cap.caps)));
  674. ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
  675. if (cap_fmode >= 0)
  676. __ceph_get_fmode(ci, cap_fmode);
  677. spin_unlock(&inode->i_lock);
  678. }
  679. } else if (cap_fmode >= 0) {
  680. pr_warning("mds issued no caps on %llx.%llx\n",
  681. ceph_vinop(inode));
  682. __ceph_get_fmode(ci, cap_fmode);
  683. }
  684. /* update delegation info? */
  685. if (dirinfo)
  686. ceph_fill_dirfrag(inode, dirinfo);
  687. err = 0;
  688. out:
  689. if (xattr_blob)
  690. ceph_buffer_put(xattr_blob);
  691. return err;
  692. }
  693. /*
  694. * caller should hold session s_mutex.
  695. */
  696. static void update_dentry_lease(struct dentry *dentry,
  697. struct ceph_mds_reply_lease *lease,
  698. struct ceph_mds_session *session,
  699. unsigned long from_time)
  700. {
  701. struct ceph_dentry_info *di = ceph_dentry(dentry);
  702. long unsigned duration = le32_to_cpu(lease->duration_ms);
  703. long unsigned ttl = from_time + (duration * HZ) / 1000;
  704. long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
  705. struct inode *dir;
  706. /* only track leases on regular dentries */
  707. if (dentry->d_op != &ceph_dentry_ops)
  708. return;
  709. spin_lock(&dentry->d_lock);
  710. dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
  711. dentry, le16_to_cpu(lease->mask), duration, ttl);
  712. /* make lease_rdcache_gen match directory */
  713. dir = dentry->d_parent->d_inode;
  714. di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
  715. if (lease->mask == 0)
  716. goto out_unlock;
  717. if (di->lease_gen == session->s_cap_gen &&
  718. time_before(ttl, dentry->d_time))
  719. goto out_unlock; /* we already have a newer lease. */
  720. if (di->lease_session && di->lease_session != session)
  721. goto out_unlock;
  722. ceph_dentry_lru_touch(dentry);
  723. if (!di->lease_session)
  724. di->lease_session = ceph_get_mds_session(session);
  725. di->lease_gen = session->s_cap_gen;
  726. di->lease_seq = le32_to_cpu(lease->seq);
  727. di->lease_renew_after = half_ttl;
  728. di->lease_renew_from = 0;
  729. dentry->d_time = ttl;
  730. out_unlock:
  731. spin_unlock(&dentry->d_lock);
  732. return;
  733. }
  734. /*
  735. * Set dentry's directory position based on the current dir's max, and
  736. * order it in d_subdirs, so that dcache_readdir behaves.
  737. */
  738. static void ceph_set_dentry_offset(struct dentry *dn)
  739. {
  740. struct dentry *dir = dn->d_parent;
  741. struct inode *inode = dn->d_parent->d_inode;
  742. struct ceph_dentry_info *di;
  743. BUG_ON(!inode);
  744. di = ceph_dentry(dn);
  745. spin_lock(&inode->i_lock);
  746. if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  747. spin_unlock(&inode->i_lock);
  748. return;
  749. }
  750. di->offset = ceph_inode(inode)->i_max_offset++;
  751. spin_unlock(&inode->i_lock);
  752. spin_lock(&dir->d_lock);
  753. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  754. list_move(&dn->d_u.d_child, &dir->d_subdirs);
  755. dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
  756. dn->d_u.d_child.prev, dn->d_u.d_child.next);
  757. spin_unlock(&dn->d_lock);
  758. spin_unlock(&dir->d_lock);
  759. }
  760. /*
  761. * splice a dentry to an inode.
  762. * caller must hold directory i_mutex for this to be safe.
  763. *
  764. * we will only rehash the resulting dentry if @prehash is
  765. * true; @prehash will be set to false (for the benefit of
  766. * the caller) if we fail.
  767. */
  768. static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
  769. bool *prehash, bool set_offset)
  770. {
  771. struct dentry *realdn;
  772. BUG_ON(dn->d_inode);
  773. /* dn must be unhashed */
  774. if (!d_unhashed(dn))
  775. d_drop(dn);
  776. realdn = d_materialise_unique(dn, in);
  777. if (IS_ERR(realdn)) {
  778. pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
  779. PTR_ERR(realdn), dn, in, ceph_vinop(in));
  780. if (prehash)
  781. *prehash = false; /* don't rehash on error */
  782. dn = realdn; /* note realdn contains the error */
  783. goto out;
  784. } else if (realdn) {
  785. dout("dn %p (%d) spliced with %p (%d) "
  786. "inode %p ino %llx.%llx\n",
  787. dn, dn->d_count,
  788. realdn, realdn->d_count,
  789. realdn->d_inode, ceph_vinop(realdn->d_inode));
  790. dput(dn);
  791. dn = realdn;
  792. } else {
  793. BUG_ON(!ceph_dentry(dn));
  794. dout("dn %p attached to %p ino %llx.%llx\n",
  795. dn, dn->d_inode, ceph_vinop(dn->d_inode));
  796. }
  797. if ((!prehash || *prehash) && d_unhashed(dn))
  798. d_rehash(dn);
  799. if (set_offset)
  800. ceph_set_dentry_offset(dn);
  801. out:
  802. return dn;
  803. }
  804. /*
  805. * Incorporate results into the local cache. This is either just
  806. * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
  807. * after a lookup).
  808. *
  809. * A reply may contain
  810. * a directory inode along with a dentry.
  811. * and/or a target inode
  812. *
  813. * Called with snap_rwsem (read).
  814. */
  815. int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
  816. struct ceph_mds_session *session)
  817. {
  818. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  819. struct inode *in = NULL;
  820. struct ceph_mds_reply_inode *ininfo;
  821. struct ceph_vino vino;
  822. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  823. int i = 0;
  824. int err = 0;
  825. dout("fill_trace %p is_dentry %d is_target %d\n", req,
  826. rinfo->head->is_dentry, rinfo->head->is_target);
  827. #if 0
  828. /*
  829. * Debugging hook:
  830. *
  831. * If we resend completed ops to a recovering mds, we get no
  832. * trace. Since that is very rare, pretend this is the case
  833. * to ensure the 'no trace' handlers in the callers behave.
  834. *
  835. * Fill in inodes unconditionally to avoid breaking cap
  836. * invariants.
  837. */
  838. if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
  839. pr_info("fill_trace faking empty trace on %lld %s\n",
  840. req->r_tid, ceph_mds_op_name(rinfo->head->op));
  841. if (rinfo->head->is_dentry) {
  842. rinfo->head->is_dentry = 0;
  843. err = fill_inode(req->r_locked_dir,
  844. &rinfo->diri, rinfo->dirfrag,
  845. session, req->r_request_started, -1);
  846. }
  847. if (rinfo->head->is_target) {
  848. rinfo->head->is_target = 0;
  849. ininfo = rinfo->targeti.in;
  850. vino.ino = le64_to_cpu(ininfo->ino);
  851. vino.snap = le64_to_cpu(ininfo->snapid);
  852. in = ceph_get_inode(sb, vino);
  853. err = fill_inode(in, &rinfo->targeti, NULL,
  854. session, req->r_request_started,
  855. req->r_fmode);
  856. iput(in);
  857. }
  858. }
  859. #endif
  860. if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
  861. dout("fill_trace reply is empty!\n");
  862. if (rinfo->head->result == 0 && req->r_locked_dir)
  863. ceph_invalidate_dir_request(req);
  864. return 0;
  865. }
  866. if (rinfo->head->is_dentry) {
  867. struct inode *dir = req->r_locked_dir;
  868. err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
  869. session, req->r_request_started, -1,
  870. &req->r_caps_reservation);
  871. if (err < 0)
  872. return err;
  873. }
  874. /*
  875. * ignore null lease/binding on snapdir ENOENT, or else we
  876. * will have trouble splicing in the virtual snapdir later
  877. */
  878. if (rinfo->head->is_dentry && !req->r_aborted &&
  879. (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
  880. fsc->mount_options->snapdir_name,
  881. req->r_dentry->d_name.len))) {
  882. /*
  883. * lookup link rename : null -> possibly existing inode
  884. * mknod symlink mkdir : null -> new inode
  885. * unlink : linked -> null
  886. */
  887. struct inode *dir = req->r_locked_dir;
  888. struct dentry *dn = req->r_dentry;
  889. bool have_dir_cap, have_lease;
  890. BUG_ON(!dn);
  891. BUG_ON(!dir);
  892. BUG_ON(dn->d_parent->d_inode != dir);
  893. BUG_ON(ceph_ino(dir) !=
  894. le64_to_cpu(rinfo->diri.in->ino));
  895. BUG_ON(ceph_snap(dir) !=
  896. le64_to_cpu(rinfo->diri.in->snapid));
  897. /* do we have a lease on the whole dir? */
  898. have_dir_cap =
  899. (le32_to_cpu(rinfo->diri.in->cap.caps) &
  900. CEPH_CAP_FILE_SHARED);
  901. /* do we have a dn lease? */
  902. have_lease = have_dir_cap ||
  903. (le16_to_cpu(rinfo->dlease->mask) &
  904. CEPH_LOCK_DN);
  905. if (!have_lease)
  906. dout("fill_trace no dentry lease or dir cap\n");
  907. /* rename? */
  908. if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
  909. dout(" src %p '%.*s' dst %p '%.*s'\n",
  910. req->r_old_dentry,
  911. req->r_old_dentry->d_name.len,
  912. req->r_old_dentry->d_name.name,
  913. dn, dn->d_name.len, dn->d_name.name);
  914. dout("fill_trace doing d_move %p -> %p\n",
  915. req->r_old_dentry, dn);
  916. d_move(req->r_old_dentry, dn);
  917. dout(" src %p '%.*s' dst %p '%.*s'\n",
  918. req->r_old_dentry,
  919. req->r_old_dentry->d_name.len,
  920. req->r_old_dentry->d_name.name,
  921. dn, dn->d_name.len, dn->d_name.name);
  922. /* ensure target dentry is invalidated, despite
  923. rehashing bug in vfs_rename_dir */
  924. ceph_invalidate_dentry_lease(dn);
  925. /*
  926. * d_move() puts the renamed dentry at the end of
  927. * d_subdirs. We need to assign it an appropriate
  928. * directory offset so we can behave when holding
  929. * I_COMPLETE.
  930. */
  931. ceph_set_dentry_offset(req->r_old_dentry);
  932. dout("dn %p gets new offset %lld\n", req->r_old_dentry,
  933. ceph_dentry(req->r_old_dentry)->offset);
  934. dn = req->r_old_dentry; /* use old_dentry */
  935. in = dn->d_inode;
  936. }
  937. /* null dentry? */
  938. if (!rinfo->head->is_target) {
  939. dout("fill_trace null dentry\n");
  940. if (dn->d_inode) {
  941. dout("d_delete %p\n", dn);
  942. d_delete(dn);
  943. } else {
  944. dout("d_instantiate %p NULL\n", dn);
  945. d_instantiate(dn, NULL);
  946. if (have_lease && d_unhashed(dn))
  947. d_rehash(dn);
  948. update_dentry_lease(dn, rinfo->dlease,
  949. session,
  950. req->r_request_started);
  951. }
  952. goto done;
  953. }
  954. /* attach proper inode */
  955. ininfo = rinfo->targeti.in;
  956. vino.ino = le64_to_cpu(ininfo->ino);
  957. vino.snap = le64_to_cpu(ininfo->snapid);
  958. in = dn->d_inode;
  959. if (!in) {
  960. in = ceph_get_inode(sb, vino);
  961. if (IS_ERR(in)) {
  962. pr_err("fill_trace bad get_inode "
  963. "%llx.%llx\n", vino.ino, vino.snap);
  964. err = PTR_ERR(in);
  965. d_delete(dn);
  966. goto done;
  967. }
  968. dn = splice_dentry(dn, in, &have_lease, true);
  969. if (IS_ERR(dn)) {
  970. err = PTR_ERR(dn);
  971. goto done;
  972. }
  973. req->r_dentry = dn; /* may have spliced */
  974. ihold(in);
  975. } else if (ceph_ino(in) == vino.ino &&
  976. ceph_snap(in) == vino.snap) {
  977. ihold(in);
  978. } else {
  979. dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
  980. dn, in, ceph_ino(in), ceph_snap(in),
  981. vino.ino, vino.snap);
  982. have_lease = false;
  983. in = NULL;
  984. }
  985. if (have_lease)
  986. update_dentry_lease(dn, rinfo->dlease, session,
  987. req->r_request_started);
  988. dout(" final dn %p\n", dn);
  989. i++;
  990. } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
  991. req->r_op == CEPH_MDS_OP_MKSNAP) {
  992. struct dentry *dn = req->r_dentry;
  993. /* fill out a snapdir LOOKUPSNAP dentry */
  994. BUG_ON(!dn);
  995. BUG_ON(!req->r_locked_dir);
  996. BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
  997. ininfo = rinfo->targeti.in;
  998. vino.ino = le64_to_cpu(ininfo->ino);
  999. vino.snap = le64_to_cpu(ininfo->snapid);
  1000. in = ceph_get_inode(sb, vino);
  1001. if (IS_ERR(in)) {
  1002. pr_err("fill_inode get_inode badness %llx.%llx\n",
  1003. vino.ino, vino.snap);
  1004. err = PTR_ERR(in);
  1005. d_delete(dn);
  1006. goto done;
  1007. }
  1008. dout(" linking snapped dir %p to dn %p\n", in, dn);
  1009. dn = splice_dentry(dn, in, NULL, true);
  1010. if (IS_ERR(dn)) {
  1011. err = PTR_ERR(dn);
  1012. goto done;
  1013. }
  1014. req->r_dentry = dn; /* may have spliced */
  1015. ihold(in);
  1016. rinfo->head->is_dentry = 1; /* fool notrace handlers */
  1017. }
  1018. if (rinfo->head->is_target) {
  1019. vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
  1020. vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
  1021. if (in == NULL || ceph_ino(in) != vino.ino ||
  1022. ceph_snap(in) != vino.snap) {
  1023. in = ceph_get_inode(sb, vino);
  1024. if (IS_ERR(in)) {
  1025. err = PTR_ERR(in);
  1026. goto done;
  1027. }
  1028. }
  1029. req->r_target_inode = in;
  1030. err = fill_inode(in,
  1031. &rinfo->targeti, NULL,
  1032. session, req->r_request_started,
  1033. (le32_to_cpu(rinfo->head->result) == 0) ?
  1034. req->r_fmode : -1,
  1035. &req->r_caps_reservation);
  1036. if (err < 0) {
  1037. pr_err("fill_inode badness %p %llx.%llx\n",
  1038. in, ceph_vinop(in));
  1039. goto done;
  1040. }
  1041. }
  1042. done:
  1043. dout("fill_trace done err=%d\n", err);
  1044. return err;
  1045. }
  1046. /*
  1047. * Prepopulate our cache with readdir results, leases, etc.
  1048. */
  1049. int ceph_readdir_prepopulate(struct ceph_mds_request *req,
  1050. struct ceph_mds_session *session)
  1051. {
  1052. struct dentry *parent = req->r_dentry;
  1053. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  1054. struct qstr dname;
  1055. struct dentry *dn;
  1056. struct inode *in;
  1057. int err = 0, i;
  1058. struct inode *snapdir = NULL;
  1059. struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
  1060. u64 frag = le32_to_cpu(rhead->args.readdir.frag);
  1061. struct ceph_dentry_info *di;
  1062. if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
  1063. snapdir = ceph_get_snapdir(parent->d_inode);
  1064. parent = d_find_alias(snapdir);
  1065. dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
  1066. rinfo->dir_nr, parent);
  1067. } else {
  1068. dout("readdir_prepopulate %d items under dn %p\n",
  1069. rinfo->dir_nr, parent);
  1070. if (rinfo->dir_dir)
  1071. ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
  1072. }
  1073. for (i = 0; i < rinfo->dir_nr; i++) {
  1074. struct ceph_vino vino;
  1075. dname.name = rinfo->dir_dname[i];
  1076. dname.len = rinfo->dir_dname_len[i];
  1077. dname.hash = full_name_hash(dname.name, dname.len);
  1078. vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
  1079. vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
  1080. retry_lookup:
  1081. dn = d_lookup(parent, &dname);
  1082. dout("d_lookup on parent=%p name=%.*s got %p\n",
  1083. parent, dname.len, dname.name, dn);
  1084. if (!dn) {
  1085. dn = d_alloc(parent, &dname);
  1086. dout("d_alloc %p '%.*s' = %p\n", parent,
  1087. dname.len, dname.name, dn);
  1088. if (dn == NULL) {
  1089. dout("d_alloc badness\n");
  1090. err = -ENOMEM;
  1091. goto out;
  1092. }
  1093. err = ceph_init_dentry(dn);
  1094. if (err < 0) {
  1095. dput(dn);
  1096. goto out;
  1097. }
  1098. } else if (dn->d_inode &&
  1099. (ceph_ino(dn->d_inode) != vino.ino ||
  1100. ceph_snap(dn->d_inode) != vino.snap)) {
  1101. dout(" dn %p points to wrong inode %p\n",
  1102. dn, dn->d_inode);
  1103. d_delete(dn);
  1104. dput(dn);
  1105. goto retry_lookup;
  1106. } else {
  1107. /* reorder parent's d_subdirs */
  1108. spin_lock(&parent->d_lock);
  1109. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  1110. list_move(&dn->d_u.d_child, &parent->d_subdirs);
  1111. spin_unlock(&dn->d_lock);
  1112. spin_unlock(&parent->d_lock);
  1113. }
  1114. di = dn->d_fsdata;
  1115. di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
  1116. /* inode */
  1117. if (dn->d_inode) {
  1118. in = dn->d_inode;
  1119. } else {
  1120. in = ceph_get_inode(parent->d_sb, vino);
  1121. if (IS_ERR(in)) {
  1122. dout("new_inode badness\n");
  1123. d_delete(dn);
  1124. dput(dn);
  1125. err = PTR_ERR(in);
  1126. goto out;
  1127. }
  1128. dn = splice_dentry(dn, in, NULL, false);
  1129. if (IS_ERR(dn))
  1130. dn = NULL;
  1131. }
  1132. if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
  1133. req->r_request_started, -1,
  1134. &req->r_caps_reservation) < 0) {
  1135. pr_err("fill_inode badness on %p\n", in);
  1136. goto next_item;
  1137. }
  1138. if (dn)
  1139. update_dentry_lease(dn, rinfo->dir_dlease[i],
  1140. req->r_session,
  1141. req->r_request_started);
  1142. next_item:
  1143. if (dn)
  1144. dput(dn);
  1145. }
  1146. req->r_did_prepopulate = true;
  1147. out:
  1148. if (snapdir) {
  1149. iput(snapdir);
  1150. dput(parent);
  1151. }
  1152. dout("readdir_prepopulate done\n");
  1153. return err;
  1154. }
  1155. int ceph_inode_set_size(struct inode *inode, loff_t size)
  1156. {
  1157. struct ceph_inode_info *ci = ceph_inode(inode);
  1158. int ret = 0;
  1159. spin_lock(&inode->i_lock);
  1160. dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
  1161. inode->i_size = size;
  1162. inode->i_blocks = (size + (1 << 9) - 1) >> 9;
  1163. /* tell the MDS if we are approaching max_size */
  1164. if ((size << 1) >= ci->i_max_size &&
  1165. (ci->i_reported_size << 1) < ci->i_max_size)
  1166. ret = 1;
  1167. spin_unlock(&inode->i_lock);
  1168. return ret;
  1169. }
  1170. /*
  1171. * Write back inode data in a worker thread. (This can't be done
  1172. * in the message handler context.)
  1173. */
  1174. void ceph_queue_writeback(struct inode *inode)
  1175. {
  1176. if (queue_work(ceph_inode_to_client(inode)->wb_wq,
  1177. &ceph_inode(inode)->i_wb_work)) {
  1178. dout("ceph_queue_writeback %p\n", inode);
  1179. ihold(inode);
  1180. } else {
  1181. dout("ceph_queue_writeback %p failed\n", inode);
  1182. }
  1183. }
  1184. static void ceph_writeback_work(struct work_struct *work)
  1185. {
  1186. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1187. i_wb_work);
  1188. struct inode *inode = &ci->vfs_inode;
  1189. dout("writeback %p\n", inode);
  1190. filemap_fdatawrite(&inode->i_data);
  1191. iput(inode);
  1192. }
  1193. /*
  1194. * queue an async invalidation
  1195. */
  1196. void ceph_queue_invalidate(struct inode *inode)
  1197. {
  1198. if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
  1199. &ceph_inode(inode)->i_pg_inv_work)) {
  1200. dout("ceph_queue_invalidate %p\n", inode);
  1201. ihold(inode);
  1202. } else {
  1203. dout("ceph_queue_invalidate %p failed\n", inode);
  1204. }
  1205. }
  1206. /*
  1207. * invalidate any pages that are not dirty or under writeback. this
  1208. * includes pages that are clean and mapped.
  1209. */
  1210. static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
  1211. {
  1212. struct pagevec pvec;
  1213. pgoff_t next = 0;
  1214. int i;
  1215. pagevec_init(&pvec, 0);
  1216. while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  1217. for (i = 0; i < pagevec_count(&pvec); i++) {
  1218. struct page *page = pvec.pages[i];
  1219. pgoff_t index;
  1220. int skip_page =
  1221. (PageDirty(page) || PageWriteback(page));
  1222. if (!skip_page)
  1223. skip_page = !trylock_page(page);
  1224. /*
  1225. * We really shouldn't be looking at the ->index of an
  1226. * unlocked page. But we're not allowed to lock these
  1227. * pages. So we rely upon nobody altering the ->index
  1228. * of this (pinned-by-us) page.
  1229. */
  1230. index = page->index;
  1231. if (index > next)
  1232. next = index;
  1233. next++;
  1234. if (skip_page)
  1235. continue;
  1236. generic_error_remove_page(mapping, page);
  1237. unlock_page(page);
  1238. }
  1239. pagevec_release(&pvec);
  1240. cond_resched();
  1241. }
  1242. }
  1243. /*
  1244. * Invalidate inode pages in a worker thread. (This can't be done
  1245. * in the message handler context.)
  1246. */
  1247. static void ceph_invalidate_work(struct work_struct *work)
  1248. {
  1249. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1250. i_pg_inv_work);
  1251. struct inode *inode = &ci->vfs_inode;
  1252. u32 orig_gen;
  1253. int check = 0;
  1254. spin_lock(&inode->i_lock);
  1255. dout("invalidate_pages %p gen %d revoking %d\n", inode,
  1256. ci->i_rdcache_gen, ci->i_rdcache_revoking);
  1257. if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
  1258. /* nevermind! */
  1259. spin_unlock(&inode->i_lock);
  1260. goto out;
  1261. }
  1262. orig_gen = ci->i_rdcache_gen;
  1263. spin_unlock(&inode->i_lock);
  1264. ceph_invalidate_nondirty_pages(inode->i_mapping);
  1265. spin_lock(&inode->i_lock);
  1266. if (orig_gen == ci->i_rdcache_gen &&
  1267. orig_gen == ci->i_rdcache_revoking) {
  1268. dout("invalidate_pages %p gen %d successful\n", inode,
  1269. ci->i_rdcache_gen);
  1270. ci->i_rdcache_revoking--;
  1271. check = 1;
  1272. } else {
  1273. dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
  1274. inode, orig_gen, ci->i_rdcache_gen,
  1275. ci->i_rdcache_revoking);
  1276. }
  1277. spin_unlock(&inode->i_lock);
  1278. if (check)
  1279. ceph_check_caps(ci, 0, NULL);
  1280. out:
  1281. iput(inode);
  1282. }
  1283. /*
  1284. * called by trunc_wq; take i_mutex ourselves
  1285. *
  1286. * We also truncate in a separate thread as well.
  1287. */
  1288. static void ceph_vmtruncate_work(struct work_struct *work)
  1289. {
  1290. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1291. i_vmtruncate_work);
  1292. struct inode *inode = &ci->vfs_inode;
  1293. dout("vmtruncate_work %p\n", inode);
  1294. mutex_lock(&inode->i_mutex);
  1295. __ceph_do_pending_vmtruncate(inode);
  1296. mutex_unlock(&inode->i_mutex);
  1297. iput(inode);
  1298. }
  1299. /*
  1300. * Queue an async vmtruncate. If we fail to queue work, we will handle
  1301. * the truncation the next time we call __ceph_do_pending_vmtruncate.
  1302. */
  1303. void ceph_queue_vmtruncate(struct inode *inode)
  1304. {
  1305. struct ceph_inode_info *ci = ceph_inode(inode);
  1306. if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
  1307. &ci->i_vmtruncate_work)) {
  1308. dout("ceph_queue_vmtruncate %p\n", inode);
  1309. ihold(inode);
  1310. } else {
  1311. dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
  1312. inode, ci->i_truncate_pending);
  1313. }
  1314. }
  1315. /*
  1316. * called with i_mutex held.
  1317. *
  1318. * Make sure any pending truncation is applied before doing anything
  1319. * that may depend on it.
  1320. */
  1321. void __ceph_do_pending_vmtruncate(struct inode *inode)
  1322. {
  1323. struct ceph_inode_info *ci = ceph_inode(inode);
  1324. u64 to;
  1325. int wrbuffer_refs, wake = 0;
  1326. retry:
  1327. spin_lock(&inode->i_lock);
  1328. if (ci->i_truncate_pending == 0) {
  1329. dout("__do_pending_vmtruncate %p none pending\n", inode);
  1330. spin_unlock(&inode->i_lock);
  1331. return;
  1332. }
  1333. /*
  1334. * make sure any dirty snapped pages are flushed before we
  1335. * possibly truncate them.. so write AND block!
  1336. */
  1337. if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
  1338. dout("__do_pending_vmtruncate %p flushing snaps first\n",
  1339. inode);
  1340. spin_unlock(&inode->i_lock);
  1341. filemap_write_and_wait_range(&inode->i_data, 0,
  1342. inode->i_sb->s_maxbytes);
  1343. goto retry;
  1344. }
  1345. to = ci->i_truncate_size;
  1346. wrbuffer_refs = ci->i_wrbuffer_ref;
  1347. dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
  1348. ci->i_truncate_pending, to);
  1349. spin_unlock(&inode->i_lock);
  1350. truncate_inode_pages(inode->i_mapping, to);
  1351. spin_lock(&inode->i_lock);
  1352. ci->i_truncate_pending--;
  1353. if (ci->i_truncate_pending == 0)
  1354. wake = 1;
  1355. spin_unlock(&inode->i_lock);
  1356. if (wrbuffer_refs == 0)
  1357. ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
  1358. if (wake)
  1359. wake_up_all(&ci->i_cap_wq);
  1360. }
  1361. /*
  1362. * symlinks
  1363. */
  1364. static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
  1365. {
  1366. struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
  1367. nd_set_link(nd, ci->i_symlink);
  1368. return NULL;
  1369. }
  1370. static const struct inode_operations ceph_symlink_iops = {
  1371. .readlink = generic_readlink,
  1372. .follow_link = ceph_sym_follow_link,
  1373. };
  1374. /*
  1375. * setattr
  1376. */
  1377. int ceph_setattr(struct dentry *dentry, struct iattr *attr)
  1378. {
  1379. struct inode *inode = dentry->d_inode;
  1380. struct ceph_inode_info *ci = ceph_inode(inode);
  1381. struct inode *parent_inode = dentry->d_parent->d_inode;
  1382. const unsigned int ia_valid = attr->ia_valid;
  1383. struct ceph_mds_request *req;
  1384. struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
  1385. int issued;
  1386. int release = 0, dirtied = 0;
  1387. int mask = 0;
  1388. int err = 0;
  1389. int inode_dirty_flags = 0;
  1390. if (ceph_snap(inode) != CEPH_NOSNAP)
  1391. return -EROFS;
  1392. __ceph_do_pending_vmtruncate(inode);
  1393. err = inode_change_ok(inode, attr);
  1394. if (err != 0)
  1395. return err;
  1396. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
  1397. USE_AUTH_MDS);
  1398. if (IS_ERR(req))
  1399. return PTR_ERR(req);
  1400. spin_lock(&inode->i_lock);
  1401. issued = __ceph_caps_issued(ci, NULL);
  1402. dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
  1403. if (ia_valid & ATTR_UID) {
  1404. dout("setattr %p uid %d -> %d\n", inode,
  1405. inode->i_uid, attr->ia_uid);
  1406. if (issued & CEPH_CAP_AUTH_EXCL) {
  1407. inode->i_uid = attr->ia_uid;
  1408. dirtied |= CEPH_CAP_AUTH_EXCL;
  1409. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1410. attr->ia_uid != inode->i_uid) {
  1411. req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
  1412. mask |= CEPH_SETATTR_UID;
  1413. release |= CEPH_CAP_AUTH_SHARED;
  1414. }
  1415. }
  1416. if (ia_valid & ATTR_GID) {
  1417. dout("setattr %p gid %d -> %d\n", inode,
  1418. inode->i_gid, attr->ia_gid);
  1419. if (issued & CEPH_CAP_AUTH_EXCL) {
  1420. inode->i_gid = attr->ia_gid;
  1421. dirtied |= CEPH_CAP_AUTH_EXCL;
  1422. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1423. attr->ia_gid != inode->i_gid) {
  1424. req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
  1425. mask |= CEPH_SETATTR_GID;
  1426. release |= CEPH_CAP_AUTH_SHARED;
  1427. }
  1428. }
  1429. if (ia_valid & ATTR_MODE) {
  1430. dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
  1431. attr->ia_mode);
  1432. if (issued & CEPH_CAP_AUTH_EXCL) {
  1433. inode->i_mode = attr->ia_mode;
  1434. dirtied |= CEPH_CAP_AUTH_EXCL;
  1435. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1436. attr->ia_mode != inode->i_mode) {
  1437. req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
  1438. mask |= CEPH_SETATTR_MODE;
  1439. release |= CEPH_CAP_AUTH_SHARED;
  1440. }
  1441. }
  1442. if (ia_valid & ATTR_ATIME) {
  1443. dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
  1444. inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
  1445. attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
  1446. if (issued & CEPH_CAP_FILE_EXCL) {
  1447. ci->i_time_warp_seq++;
  1448. inode->i_atime = attr->ia_atime;
  1449. dirtied |= CEPH_CAP_FILE_EXCL;
  1450. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1451. timespec_compare(&inode->i_atime,
  1452. &attr->ia_atime) < 0) {
  1453. inode->i_atime = attr->ia_atime;
  1454. dirtied |= CEPH_CAP_FILE_WR;
  1455. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1456. !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
  1457. ceph_encode_timespec(&req->r_args.setattr.atime,
  1458. &attr->ia_atime);
  1459. mask |= CEPH_SETATTR_ATIME;
  1460. release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
  1461. CEPH_CAP_FILE_WR;
  1462. }
  1463. }
  1464. if (ia_valid & ATTR_MTIME) {
  1465. dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
  1466. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  1467. attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
  1468. if (issued & CEPH_CAP_FILE_EXCL) {
  1469. ci->i_time_warp_seq++;
  1470. inode->i_mtime = attr->ia_mtime;
  1471. dirtied |= CEPH_CAP_FILE_EXCL;
  1472. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1473. timespec_compare(&inode->i_mtime,
  1474. &attr->ia_mtime) < 0) {
  1475. inode->i_mtime = attr->ia_mtime;
  1476. dirtied |= CEPH_CAP_FILE_WR;
  1477. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1478. !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
  1479. ceph_encode_timespec(&req->r_args.setattr.mtime,
  1480. &attr->ia_mtime);
  1481. mask |= CEPH_SETATTR_MTIME;
  1482. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1483. CEPH_CAP_FILE_WR;
  1484. }
  1485. }
  1486. if (ia_valid & ATTR_SIZE) {
  1487. dout("setattr %p size %lld -> %lld\n", inode,
  1488. inode->i_size, attr->ia_size);
  1489. if (attr->ia_size > inode->i_sb->s_maxbytes) {
  1490. err = -EINVAL;
  1491. goto out;
  1492. }
  1493. if ((issued & CEPH_CAP_FILE_EXCL) &&
  1494. attr->ia_size > inode->i_size) {
  1495. inode->i_size = attr->ia_size;
  1496. inode->i_blocks =
  1497. (attr->ia_size + (1 << 9) - 1) >> 9;
  1498. inode->i_ctime = attr->ia_ctime;
  1499. ci->i_reported_size = attr->ia_size;
  1500. dirtied |= CEPH_CAP_FILE_EXCL;
  1501. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1502. attr->ia_size != inode->i_size) {
  1503. req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
  1504. req->r_args.setattr.old_size =
  1505. cpu_to_le64(inode->i_size);
  1506. mask |= CEPH_SETATTR_SIZE;
  1507. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1508. CEPH_CAP_FILE_WR;
  1509. }
  1510. }
  1511. /* these do nothing */
  1512. if (ia_valid & ATTR_CTIME) {
  1513. bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
  1514. ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
  1515. dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
  1516. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  1517. attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
  1518. only ? "ctime only" : "ignored");
  1519. inode->i_ctime = attr->ia_ctime;
  1520. if (only) {
  1521. /*
  1522. * if kernel wants to dirty ctime but nothing else,
  1523. * we need to choose a cap to dirty under, or do
  1524. * a almost-no-op setattr
  1525. */
  1526. if (issued & CEPH_CAP_AUTH_EXCL)
  1527. dirtied |= CEPH_CAP_AUTH_EXCL;
  1528. else if (issued & CEPH_CAP_FILE_EXCL)
  1529. dirtied |= CEPH_CAP_FILE_EXCL;
  1530. else if (issued & CEPH_CAP_XATTR_EXCL)
  1531. dirtied |= CEPH_CAP_XATTR_EXCL;
  1532. else
  1533. mask |= CEPH_SETATTR_CTIME;
  1534. }
  1535. }
  1536. if (ia_valid & ATTR_FILE)
  1537. dout("setattr %p ATTR_FILE ... hrm!\n", inode);
  1538. if (dirtied) {
  1539. inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
  1540. inode->i_ctime = CURRENT_TIME;
  1541. }
  1542. release &= issued;
  1543. spin_unlock(&inode->i_lock);
  1544. if (inode_dirty_flags)
  1545. __mark_inode_dirty(inode, inode_dirty_flags);
  1546. if (mask) {
  1547. req->r_inode = inode;
  1548. ihold(inode);
  1549. req->r_inode_drop = release;
  1550. req->r_args.setattr.mask = cpu_to_le32(mask);
  1551. req->r_num_caps = 1;
  1552. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  1553. }
  1554. dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
  1555. ceph_cap_string(dirtied), mask);
  1556. ceph_mdsc_put_request(req);
  1557. __ceph_do_pending_vmtruncate(inode);
  1558. return err;
  1559. out:
  1560. spin_unlock(&inode->i_lock);
  1561. ceph_mdsc_put_request(req);
  1562. return err;
  1563. }
  1564. /*
  1565. * Verify that we have a lease on the given mask. If not,
  1566. * do a getattr against an mds.
  1567. */
  1568. int ceph_do_getattr(struct inode *inode, int mask)
  1569. {
  1570. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  1571. struct ceph_mds_client *mdsc = fsc->mdsc;
  1572. struct ceph_mds_request *req;
  1573. int err;
  1574. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  1575. dout("do_getattr inode %p SNAPDIR\n", inode);
  1576. return 0;
  1577. }
  1578. dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
  1579. if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
  1580. return 0;
  1581. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
  1582. if (IS_ERR(req))
  1583. return PTR_ERR(req);
  1584. req->r_inode = inode;
  1585. ihold(inode);
  1586. req->r_num_caps = 1;
  1587. req->r_args.getattr.mask = cpu_to_le32(mask);
  1588. err = ceph_mdsc_do_request(mdsc, NULL, req);
  1589. ceph_mdsc_put_request(req);
  1590. dout("do_getattr result=%d\n", err);
  1591. return err;
  1592. }
  1593. /*
  1594. * Check inode permissions. We verify we have a valid value for
  1595. * the AUTH cap, then call the generic handler.
  1596. */
  1597. int ceph_permission(struct inode *inode, int mask, unsigned int flags)
  1598. {
  1599. int err;
  1600. if (flags & IPERM_FLAG_RCU)
  1601. return -ECHILD;
  1602. err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
  1603. if (!err)
  1604. err = generic_permission(inode, mask, flags, NULL);
  1605. return err;
  1606. }
  1607. /*
  1608. * Get all attributes. Hopefully somedata we'll have a statlite()
  1609. * and can limit the fields we require to be accurate.
  1610. */
  1611. int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
  1612. struct kstat *stat)
  1613. {
  1614. struct inode *inode = dentry->d_inode;
  1615. struct ceph_inode_info *ci = ceph_inode(inode);
  1616. int err;
  1617. err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
  1618. if (!err) {
  1619. generic_fillattr(inode, stat);
  1620. stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
  1621. if (ceph_snap(inode) != CEPH_NOSNAP)
  1622. stat->dev = ceph_snap(inode);
  1623. else
  1624. stat->dev = 0;
  1625. if (S_ISDIR(inode->i_mode)) {
  1626. if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
  1627. RBYTES))
  1628. stat->size = ci->i_rbytes;
  1629. else
  1630. stat->size = ci->i_files + ci->i_subdirs;
  1631. stat->blocks = 0;
  1632. stat->blksize = 65536;
  1633. }
  1634. }
  1635. return err;
  1636. }