dir.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/pagemap.h>
  9. #include <linux/file.h>
  10. #include <linux/sched.h>
  11. #include <linux/namei.h>
  12. #include <linux/slab.h>
  13. static bool fuse_use_readdirplus(struct inode *dir, struct file *filp)
  14. {
  15. struct fuse_conn *fc = get_fuse_conn(dir);
  16. struct fuse_inode *fi = get_fuse_inode(dir);
  17. if (!fc->do_readdirplus)
  18. return false;
  19. if (!fc->readdirplus_auto)
  20. return true;
  21. if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
  22. return true;
  23. if (filp->f_pos == 0)
  24. return true;
  25. return false;
  26. }
  27. static void fuse_advise_use_readdirplus(struct inode *dir)
  28. {
  29. struct fuse_inode *fi = get_fuse_inode(dir);
  30. set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
  31. }
  32. #if BITS_PER_LONG >= 64
  33. static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
  34. {
  35. entry->d_time = time;
  36. }
  37. static inline u64 fuse_dentry_time(struct dentry *entry)
  38. {
  39. return entry->d_time;
  40. }
  41. #else
  42. /*
  43. * On 32 bit archs store the high 32 bits of time in d_fsdata
  44. */
  45. static void fuse_dentry_settime(struct dentry *entry, u64 time)
  46. {
  47. entry->d_time = time;
  48. entry->d_fsdata = (void *) (unsigned long) (time >> 32);
  49. }
  50. static u64 fuse_dentry_time(struct dentry *entry)
  51. {
  52. return (u64) entry->d_time +
  53. ((u64) (unsigned long) entry->d_fsdata << 32);
  54. }
  55. #endif
  56. /*
  57. * FUSE caches dentries and attributes with separate timeout. The
  58. * time in jiffies until the dentry/attributes are valid is stored in
  59. * dentry->d_time and fuse_inode->i_time respectively.
  60. */
  61. /*
  62. * Calculate the time in jiffies until a dentry/attributes are valid
  63. */
  64. static u64 time_to_jiffies(unsigned long sec, unsigned long nsec)
  65. {
  66. if (sec || nsec) {
  67. struct timespec ts = {sec, nsec};
  68. return get_jiffies_64() + timespec_to_jiffies(&ts);
  69. } else
  70. return 0;
  71. }
  72. /*
  73. * Set dentry and possibly attribute timeouts from the lookup/mk*
  74. * replies
  75. */
  76. static void fuse_change_entry_timeout(struct dentry *entry,
  77. struct fuse_entry_out *o)
  78. {
  79. fuse_dentry_settime(entry,
  80. time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
  81. }
  82. static u64 attr_timeout(struct fuse_attr_out *o)
  83. {
  84. return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
  85. }
  86. static u64 entry_attr_timeout(struct fuse_entry_out *o)
  87. {
  88. return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
  89. }
  90. /*
  91. * Mark the attributes as stale, so that at the next call to
  92. * ->getattr() they will be fetched from userspace
  93. */
  94. void fuse_invalidate_attr(struct inode *inode)
  95. {
  96. get_fuse_inode(inode)->i_time = 0;
  97. }
  98. /*
  99. * Just mark the entry as stale, so that a next attempt to look it up
  100. * will result in a new lookup call to userspace
  101. *
  102. * This is called when a dentry is about to become negative and the
  103. * timeout is unknown (unlink, rmdir, rename and in some cases
  104. * lookup)
  105. */
  106. void fuse_invalidate_entry_cache(struct dentry *entry)
  107. {
  108. fuse_dentry_settime(entry, 0);
  109. }
  110. /*
  111. * Same as fuse_invalidate_entry_cache(), but also try to remove the
  112. * dentry from the hash
  113. */
  114. static void fuse_invalidate_entry(struct dentry *entry)
  115. {
  116. d_invalidate(entry);
  117. fuse_invalidate_entry_cache(entry);
  118. }
  119. static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
  120. u64 nodeid, struct qstr *name,
  121. struct fuse_entry_out *outarg)
  122. {
  123. memset(outarg, 0, sizeof(struct fuse_entry_out));
  124. req->in.h.opcode = FUSE_LOOKUP;
  125. req->in.h.nodeid = nodeid;
  126. req->in.numargs = 1;
  127. req->in.args[0].size = name->len + 1;
  128. req->in.args[0].value = name->name;
  129. req->out.numargs = 1;
  130. if (fc->minor < 9)
  131. req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
  132. else
  133. req->out.args[0].size = sizeof(struct fuse_entry_out);
  134. req->out.args[0].value = outarg;
  135. }
  136. u64 fuse_get_attr_version(struct fuse_conn *fc)
  137. {
  138. u64 curr_version;
  139. /*
  140. * The spin lock isn't actually needed on 64bit archs, but we
  141. * don't yet care too much about such optimizations.
  142. */
  143. spin_lock(&fc->lock);
  144. curr_version = fc->attr_version;
  145. spin_unlock(&fc->lock);
  146. return curr_version;
  147. }
  148. /*
  149. * Check whether the dentry is still valid
  150. *
  151. * If the entry validity timeout has expired and the dentry is
  152. * positive, try to redo the lookup. If the lookup results in a
  153. * different inode, then let the VFS invalidate the dentry and redo
  154. * the lookup once more. If the lookup results in the same inode,
  155. * then refresh the attributes, timeouts and mark the dentry valid.
  156. */
  157. static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
  158. {
  159. struct inode *inode;
  160. struct dentry *parent;
  161. struct fuse_conn *fc;
  162. inode = ACCESS_ONCE(entry->d_inode);
  163. if (inode && is_bad_inode(inode))
  164. return 0;
  165. else if (fuse_dentry_time(entry) < get_jiffies_64()) {
  166. int err;
  167. struct fuse_entry_out outarg;
  168. struct fuse_req *req;
  169. struct fuse_forget_link *forget;
  170. u64 attr_version;
  171. /* For negative dentries, always do a fresh lookup */
  172. if (!inode)
  173. return 0;
  174. if (nd && (nd->flags & LOOKUP_RCU))
  175. return -ECHILD;
  176. fc = get_fuse_conn(inode);
  177. req = fuse_get_req_nopages(fc);
  178. if (IS_ERR(req))
  179. return 0;
  180. forget = fuse_alloc_forget();
  181. if (!forget) {
  182. fuse_put_request(fc, req);
  183. return 0;
  184. }
  185. attr_version = fuse_get_attr_version(fc);
  186. parent = dget_parent(entry);
  187. fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
  188. &entry->d_name, &outarg);
  189. fuse_request_send(fc, req);
  190. dput(parent);
  191. err = req->out.h.error;
  192. fuse_put_request(fc, req);
  193. /* Zero nodeid is same as -ENOENT */
  194. if (!err && !outarg.nodeid)
  195. err = -ENOENT;
  196. if (!err) {
  197. struct fuse_inode *fi = get_fuse_inode(inode);
  198. if (outarg.nodeid != get_node_id(inode)) {
  199. fuse_queue_forget(fc, forget, outarg.nodeid, 1);
  200. return 0;
  201. }
  202. spin_lock(&fc->lock);
  203. fi->nlookup++;
  204. spin_unlock(&fc->lock);
  205. }
  206. kfree(forget);
  207. if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
  208. return 0;
  209. fuse_change_attributes(inode, &outarg.attr,
  210. entry_attr_timeout(&outarg),
  211. attr_version);
  212. fuse_change_entry_timeout(entry, &outarg);
  213. } else if (inode) {
  214. fc = get_fuse_conn(inode);
  215. if (fc->readdirplus_auto) {
  216. parent = dget_parent(entry);
  217. fuse_advise_use_readdirplus(parent->d_inode);
  218. dput(parent);
  219. }
  220. }
  221. return 1;
  222. }
  223. /*
  224. * Get the canonical path. Since we must translate to a path, this must be done
  225. * in the context of the userspace daemon, however, the userspace daemon cannot
  226. * look up paths on its own. Instead, we handle the lookup as a special case
  227. * inside of the write request.
  228. */
  229. static void fuse_dentry_canonical_path(const struct path *path, struct path *canonical_path) {
  230. struct inode *inode = path->dentry->d_inode;
  231. struct fuse_conn *fc = get_fuse_conn(inode);
  232. struct fuse_req *req;
  233. int err;
  234. char *path_name;
  235. req = fuse_get_req(fc, 1);
  236. err = PTR_ERR(req);
  237. if (IS_ERR(req))
  238. goto default_path;
  239. path_name = (char*)__get_free_page(GFP_KERNEL);
  240. if (!path_name) {
  241. fuse_put_request(fc, req);
  242. goto default_path;
  243. }
  244. req->in.h.opcode = FUSE_CANONICAL_PATH;
  245. req->in.h.nodeid = get_node_id(inode);
  246. req->in.numargs = 0;
  247. req->out.numargs = 1;
  248. req->out.args[0].size = PATH_MAX;
  249. req->out.args[0].value = path_name;
  250. req->canonical_path = canonical_path;
  251. req->out.argvar = 1;
  252. fuse_request_send(fc, req);
  253. err = req->out.h.error;
  254. fuse_put_request(fc, req);
  255. free_page((unsigned long)path_name);
  256. if (!err)
  257. return;
  258. default_path:
  259. canonical_path->dentry = path->dentry;
  260. canonical_path->mnt = path->mnt;
  261. path_get(canonical_path);
  262. }
  263. static int invalid_nodeid(u64 nodeid)
  264. {
  265. return !nodeid || nodeid == FUSE_ROOT_ID;
  266. }
  267. const struct dentry_operations fuse_dentry_operations = {
  268. .d_revalidate = fuse_dentry_revalidate,
  269. .d_canonical_path = fuse_dentry_canonical_path,
  270. };
  271. int fuse_valid_type(int m)
  272. {
  273. return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
  274. S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
  275. }
  276. /*
  277. * Add a directory inode to a dentry, ensuring that no other dentry
  278. * refers to this inode. Called with fc->inst_mutex.
  279. */
  280. static struct dentry *fuse_d_add_directory(struct dentry *entry,
  281. struct inode *inode)
  282. {
  283. struct dentry *alias = d_find_alias(inode);
  284. if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
  285. /* This tries to shrink the subtree below alias */
  286. fuse_invalidate_entry(alias);
  287. dput(alias);
  288. if (!list_empty(&inode->i_dentry))
  289. return ERR_PTR(-EBUSY);
  290. } else {
  291. dput(alias);
  292. }
  293. return d_splice_alias(inode, entry);
  294. }
  295. int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
  296. struct fuse_entry_out *outarg, struct inode **inode)
  297. {
  298. struct fuse_conn *fc = get_fuse_conn_super(sb);
  299. struct fuse_req *req;
  300. struct fuse_forget_link *forget;
  301. u64 attr_version;
  302. int err;
  303. *inode = NULL;
  304. err = -ENAMETOOLONG;
  305. if (name->len > FUSE_NAME_MAX)
  306. goto out;
  307. req = fuse_get_req_nopages(fc);
  308. err = PTR_ERR(req);
  309. if (IS_ERR(req))
  310. goto out;
  311. forget = fuse_alloc_forget();
  312. err = -ENOMEM;
  313. if (!forget) {
  314. fuse_put_request(fc, req);
  315. goto out;
  316. }
  317. attr_version = fuse_get_attr_version(fc);
  318. fuse_lookup_init(fc, req, nodeid, name, outarg);
  319. fuse_request_send(fc, req);
  320. err = req->out.h.error;
  321. fuse_put_request(fc, req);
  322. /* Zero nodeid is same as -ENOENT, but with valid timeout */
  323. if (err || !outarg->nodeid)
  324. goto out_put_forget;
  325. err = -EIO;
  326. if (!outarg->nodeid)
  327. goto out_put_forget;
  328. if (!fuse_valid_type(outarg->attr.mode))
  329. goto out_put_forget;
  330. *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
  331. &outarg->attr, entry_attr_timeout(outarg),
  332. attr_version);
  333. err = -ENOMEM;
  334. if (!*inode) {
  335. fuse_queue_forget(fc, forget, outarg->nodeid, 1);
  336. goto out;
  337. }
  338. err = 0;
  339. out_put_forget:
  340. kfree(forget);
  341. out:
  342. return err;
  343. }
  344. static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
  345. struct nameidata *nd)
  346. {
  347. int err;
  348. struct fuse_entry_out outarg;
  349. struct inode *inode;
  350. struct dentry *newent;
  351. struct fuse_conn *fc = get_fuse_conn(dir);
  352. bool outarg_valid = true;
  353. err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
  354. &outarg, &inode);
  355. if (err == -ENOENT) {
  356. outarg_valid = false;
  357. err = 0;
  358. }
  359. if (err)
  360. goto out_err;
  361. err = -EIO;
  362. if (inode && get_node_id(inode) == FUSE_ROOT_ID)
  363. goto out_iput;
  364. if (inode && S_ISDIR(inode->i_mode)) {
  365. mutex_lock(&fc->inst_mutex);
  366. newent = fuse_d_add_directory(entry, inode);
  367. mutex_unlock(&fc->inst_mutex);
  368. err = PTR_ERR(newent);
  369. if (IS_ERR(newent))
  370. goto out_iput;
  371. } else {
  372. newent = d_splice_alias(inode, entry);
  373. }
  374. entry = newent ? newent : entry;
  375. if (outarg_valid)
  376. fuse_change_entry_timeout(entry, &outarg);
  377. else
  378. fuse_invalidate_entry_cache(entry);
  379. fuse_advise_use_readdirplus(dir);
  380. return newent;
  381. out_iput:
  382. iput(inode);
  383. out_err:
  384. return ERR_PTR(err);
  385. }
  386. /*
  387. * Atomic create+open operation
  388. *
  389. * If the filesystem doesn't support this, then fall back to separate
  390. * 'mknod' + 'open' requests.
  391. */
  392. static int fuse_create_open(struct inode *dir, struct dentry *entry,
  393. umode_t mode, struct nameidata *nd)
  394. {
  395. int err;
  396. struct inode *inode;
  397. struct fuse_conn *fc = get_fuse_conn(dir);
  398. struct fuse_req *req;
  399. struct fuse_forget_link *forget;
  400. struct fuse_create_in inarg;
  401. struct fuse_open_out outopen;
  402. struct fuse_entry_out outentry;
  403. struct fuse_file *ff;
  404. struct file *file;
  405. int flags = nd->intent.open.flags;
  406. /* Userspace expects S_IFREG in create mode */
  407. BUG_ON((mode & S_IFMT) != S_IFREG);
  408. if (fc->no_create)
  409. return -ENOSYS;
  410. forget = fuse_alloc_forget();
  411. if (!forget)
  412. return -ENOMEM;
  413. req = fuse_get_req_nopages(fc);
  414. err = PTR_ERR(req);
  415. if (IS_ERR(req))
  416. goto out_put_forget_req;
  417. err = -ENOMEM;
  418. ff = fuse_file_alloc(fc);
  419. if (!ff)
  420. goto out_put_request;
  421. if (!fc->dont_mask)
  422. mode &= ~current_umask();
  423. flags &= ~O_NOCTTY;
  424. memset(&inarg, 0, sizeof(inarg));
  425. memset(&outentry, 0, sizeof(outentry));
  426. inarg.flags = flags;
  427. inarg.mode = mode;
  428. inarg.umask = current_umask();
  429. req->in.h.opcode = FUSE_CREATE;
  430. req->in.h.nodeid = get_node_id(dir);
  431. req->in.numargs = 2;
  432. req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
  433. sizeof(inarg);
  434. req->in.args[0].value = &inarg;
  435. req->in.args[1].size = entry->d_name.len + 1;
  436. req->in.args[1].value = entry->d_name.name;
  437. req->out.numargs = 2;
  438. if (fc->minor < 9)
  439. req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
  440. else
  441. req->out.args[0].size = sizeof(outentry);
  442. req->out.args[0].value = &outentry;
  443. req->out.args[1].size = sizeof(outopen);
  444. req->out.args[1].value = &outopen;
  445. fuse_request_send(fc, req);
  446. err = req->out.h.error;
  447. if (err) {
  448. if (err == -ENOSYS)
  449. fc->no_create = 1;
  450. goto out_free_ff;
  451. }
  452. err = -EIO;
  453. if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
  454. goto out_free_ff;
  455. fuse_put_request(fc, req);
  456. ff->fh = outopen.fh;
  457. ff->nodeid = outentry.nodeid;
  458. ff->open_flags = outopen.open_flags;
  459. inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
  460. &outentry.attr, entry_attr_timeout(&outentry), 0);
  461. if (!inode) {
  462. flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
  463. fuse_sync_release(ff, flags);
  464. fuse_queue_forget(fc, forget, outentry.nodeid, 1);
  465. return -ENOMEM;
  466. }
  467. kfree(forget);
  468. d_instantiate(entry, inode);
  469. fuse_change_entry_timeout(entry, &outentry);
  470. fuse_invalidate_attr(dir);
  471. file = lookup_instantiate_filp(nd, entry, generic_file_open);
  472. if (IS_ERR(file)) {
  473. fuse_sync_release(ff, flags);
  474. return PTR_ERR(file);
  475. }
  476. file->private_data = fuse_file_get(ff);
  477. fuse_finish_open(inode, file);
  478. return 0;
  479. out_free_ff:
  480. fuse_file_free(ff);
  481. out_put_request:
  482. fuse_put_request(fc, req);
  483. out_put_forget_req:
  484. kfree(forget);
  485. return err;
  486. }
  487. /*
  488. * Code shared between mknod, mkdir, symlink and link
  489. */
  490. static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
  491. struct inode *dir, struct dentry *entry,
  492. umode_t mode)
  493. {
  494. struct fuse_entry_out outarg;
  495. struct inode *inode;
  496. int err;
  497. struct fuse_forget_link *forget;
  498. forget = fuse_alloc_forget();
  499. if (!forget) {
  500. fuse_put_request(fc, req);
  501. return -ENOMEM;
  502. }
  503. memset(&outarg, 0, sizeof(outarg));
  504. req->in.h.nodeid = get_node_id(dir);
  505. req->out.numargs = 1;
  506. if (fc->minor < 9)
  507. req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
  508. else
  509. req->out.args[0].size = sizeof(outarg);
  510. req->out.args[0].value = &outarg;
  511. fuse_request_send(fc, req);
  512. err = req->out.h.error;
  513. fuse_put_request(fc, req);
  514. if (err)
  515. goto out_put_forget_req;
  516. err = -EIO;
  517. if (invalid_nodeid(outarg.nodeid))
  518. goto out_put_forget_req;
  519. if ((outarg.attr.mode ^ mode) & S_IFMT)
  520. goto out_put_forget_req;
  521. inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
  522. &outarg.attr, entry_attr_timeout(&outarg), 0);
  523. if (!inode) {
  524. fuse_queue_forget(fc, forget, outarg.nodeid, 1);
  525. return -ENOMEM;
  526. }
  527. kfree(forget);
  528. if (S_ISDIR(inode->i_mode)) {
  529. struct dentry *alias;
  530. mutex_lock(&fc->inst_mutex);
  531. alias = d_find_alias(inode);
  532. if (alias) {
  533. /* New directory must have moved since mkdir */
  534. mutex_unlock(&fc->inst_mutex);
  535. dput(alias);
  536. iput(inode);
  537. return -EBUSY;
  538. }
  539. d_instantiate(entry, inode);
  540. mutex_unlock(&fc->inst_mutex);
  541. } else
  542. d_instantiate(entry, inode);
  543. fuse_change_entry_timeout(entry, &outarg);
  544. fuse_invalidate_attr(dir);
  545. return 0;
  546. out_put_forget_req:
  547. kfree(forget);
  548. return err;
  549. }
  550. static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
  551. dev_t rdev)
  552. {
  553. struct fuse_mknod_in inarg;
  554. struct fuse_conn *fc = get_fuse_conn(dir);
  555. struct fuse_req *req = fuse_get_req_nopages(fc);
  556. if (IS_ERR(req))
  557. return PTR_ERR(req);
  558. if (!fc->dont_mask)
  559. mode &= ~current_umask();
  560. memset(&inarg, 0, sizeof(inarg));
  561. inarg.mode = mode;
  562. inarg.rdev = new_encode_dev(rdev);
  563. inarg.umask = current_umask();
  564. req->in.h.opcode = FUSE_MKNOD;
  565. req->in.numargs = 2;
  566. req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
  567. sizeof(inarg);
  568. req->in.args[0].value = &inarg;
  569. req->in.args[1].size = entry->d_name.len + 1;
  570. req->in.args[1].value = entry->d_name.name;
  571. return create_new_entry(fc, req, dir, entry, mode);
  572. }
  573. static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
  574. struct nameidata *nd)
  575. {
  576. if (nd) {
  577. int err = fuse_create_open(dir, entry, mode, nd);
  578. if (err != -ENOSYS)
  579. return err;
  580. /* Fall back on mknod */
  581. }
  582. return fuse_mknod(dir, entry, mode, 0);
  583. }
  584. static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
  585. {
  586. struct fuse_mkdir_in inarg;
  587. struct fuse_conn *fc = get_fuse_conn(dir);
  588. struct fuse_req *req = fuse_get_req_nopages(fc);
  589. if (IS_ERR(req))
  590. return PTR_ERR(req);
  591. if (!fc->dont_mask)
  592. mode &= ~current_umask();
  593. memset(&inarg, 0, sizeof(inarg));
  594. inarg.mode = mode;
  595. inarg.umask = current_umask();
  596. req->in.h.opcode = FUSE_MKDIR;
  597. req->in.numargs = 2;
  598. req->in.args[0].size = sizeof(inarg);
  599. req->in.args[0].value = &inarg;
  600. req->in.args[1].size = entry->d_name.len + 1;
  601. req->in.args[1].value = entry->d_name.name;
  602. return create_new_entry(fc, req, dir, entry, S_IFDIR);
  603. }
  604. static int fuse_symlink(struct inode *dir, struct dentry *entry,
  605. const char *link)
  606. {
  607. struct fuse_conn *fc = get_fuse_conn(dir);
  608. unsigned len = strlen(link) + 1;
  609. struct fuse_req *req = fuse_get_req_nopages(fc);
  610. if (IS_ERR(req))
  611. return PTR_ERR(req);
  612. req->in.h.opcode = FUSE_SYMLINK;
  613. req->in.numargs = 2;
  614. req->in.args[0].size = entry->d_name.len + 1;
  615. req->in.args[0].value = entry->d_name.name;
  616. req->in.args[1].size = len;
  617. req->in.args[1].value = link;
  618. return create_new_entry(fc, req, dir, entry, S_IFLNK);
  619. }
  620. static int fuse_unlink(struct inode *dir, struct dentry *entry)
  621. {
  622. int err;
  623. struct fuse_conn *fc = get_fuse_conn(dir);
  624. struct fuse_req *req = fuse_get_req_nopages(fc);
  625. if (IS_ERR(req))
  626. return PTR_ERR(req);
  627. req->in.h.opcode = FUSE_UNLINK;
  628. req->in.h.nodeid = get_node_id(dir);
  629. req->in.numargs = 1;
  630. req->in.args[0].size = entry->d_name.len + 1;
  631. req->in.args[0].value = entry->d_name.name;
  632. fuse_request_send(fc, req);
  633. err = req->out.h.error;
  634. fuse_put_request(fc, req);
  635. if (!err) {
  636. struct inode *inode = entry->d_inode;
  637. struct fuse_inode *fi = get_fuse_inode(inode);
  638. spin_lock(&fc->lock);
  639. fi->attr_version = ++fc->attr_version;
  640. /*
  641. * If i_nlink == 0 then unlink doesn't make sense, yet this can
  642. * happen if userspace filesystem is careless. It would be
  643. * difficult to enforce correct nlink usage so just ignore this
  644. * condition here
  645. */
  646. if (inode->i_nlink > 0)
  647. drop_nlink(inode);
  648. spin_unlock(&fc->lock);
  649. fuse_invalidate_attr(inode);
  650. fuse_invalidate_attr(dir);
  651. fuse_invalidate_entry_cache(entry);
  652. } else if (err == -EINTR)
  653. fuse_invalidate_entry(entry);
  654. return err;
  655. }
  656. static int fuse_rmdir(struct inode *dir, struct dentry *entry)
  657. {
  658. int err;
  659. struct fuse_conn *fc = get_fuse_conn(dir);
  660. struct fuse_req *req = fuse_get_req_nopages(fc);
  661. if (IS_ERR(req))
  662. return PTR_ERR(req);
  663. req->in.h.opcode = FUSE_RMDIR;
  664. req->in.h.nodeid = get_node_id(dir);
  665. req->in.numargs = 1;
  666. req->in.args[0].size = entry->d_name.len + 1;
  667. req->in.args[0].value = entry->d_name.name;
  668. fuse_request_send(fc, req);
  669. err = req->out.h.error;
  670. fuse_put_request(fc, req);
  671. if (!err) {
  672. clear_nlink(entry->d_inode);
  673. fuse_invalidate_attr(dir);
  674. fuse_invalidate_entry_cache(entry);
  675. } else if (err == -EINTR)
  676. fuse_invalidate_entry(entry);
  677. return err;
  678. }
  679. static int fuse_rename(struct inode *olddir, struct dentry *oldent,
  680. struct inode *newdir, struct dentry *newent)
  681. {
  682. int err;
  683. struct fuse_rename_in inarg;
  684. struct fuse_conn *fc = get_fuse_conn(olddir);
  685. struct fuse_req *req = fuse_get_req_nopages(fc);
  686. if (IS_ERR(req))
  687. return PTR_ERR(req);
  688. memset(&inarg, 0, sizeof(inarg));
  689. inarg.newdir = get_node_id(newdir);
  690. req->in.h.opcode = FUSE_RENAME;
  691. req->in.h.nodeid = get_node_id(olddir);
  692. req->in.numargs = 3;
  693. req->in.args[0].size = sizeof(inarg);
  694. req->in.args[0].value = &inarg;
  695. req->in.args[1].size = oldent->d_name.len + 1;
  696. req->in.args[1].value = oldent->d_name.name;
  697. req->in.args[2].size = newent->d_name.len + 1;
  698. req->in.args[2].value = newent->d_name.name;
  699. fuse_request_send(fc, req);
  700. err = req->out.h.error;
  701. fuse_put_request(fc, req);
  702. if (!err) {
  703. /* ctime changes */
  704. fuse_invalidate_attr(oldent->d_inode);
  705. fuse_invalidate_attr(olddir);
  706. if (olddir != newdir)
  707. fuse_invalidate_attr(newdir);
  708. /* newent will end up negative */
  709. if (newent->d_inode) {
  710. fuse_invalidate_attr(newent->d_inode);
  711. fuse_invalidate_entry_cache(newent);
  712. }
  713. } else if (err == -EINTR) {
  714. /* If request was interrupted, DEITY only knows if the
  715. rename actually took place. If the invalidation
  716. fails (e.g. some process has CWD under the renamed
  717. directory), then there can be inconsistency between
  718. the dcache and the real filesystem. Tough luck. */
  719. fuse_invalidate_entry(oldent);
  720. if (newent->d_inode)
  721. fuse_invalidate_entry(newent);
  722. }
  723. return err;
  724. }
  725. static int fuse_link(struct dentry *entry, struct inode *newdir,
  726. struct dentry *newent)
  727. {
  728. int err;
  729. struct fuse_link_in inarg;
  730. struct inode *inode = entry->d_inode;
  731. struct fuse_conn *fc = get_fuse_conn(inode);
  732. struct fuse_req *req = fuse_get_req_nopages(fc);
  733. if (IS_ERR(req))
  734. return PTR_ERR(req);
  735. memset(&inarg, 0, sizeof(inarg));
  736. inarg.oldnodeid = get_node_id(inode);
  737. req->in.h.opcode = FUSE_LINK;
  738. req->in.numargs = 2;
  739. req->in.args[0].size = sizeof(inarg);
  740. req->in.args[0].value = &inarg;
  741. req->in.args[1].size = newent->d_name.len + 1;
  742. req->in.args[1].value = newent->d_name.name;
  743. err = create_new_entry(fc, req, newdir, newent, inode->i_mode);
  744. /* Contrary to "normal" filesystems it can happen that link
  745. makes two "logical" inodes point to the same "physical"
  746. inode. We invalidate the attributes of the old one, so it
  747. will reflect changes in the backing inode (link count,
  748. etc.)
  749. */
  750. if (!err) {
  751. struct fuse_inode *fi = get_fuse_inode(inode);
  752. spin_lock(&fc->lock);
  753. fi->attr_version = ++fc->attr_version;
  754. if (likely(inode->i_nlink < UINT_MAX))
  755. inc_nlink(inode);
  756. spin_unlock(&fc->lock);
  757. fuse_invalidate_attr(inode);
  758. } else if (err == -EINTR) {
  759. fuse_invalidate_attr(inode);
  760. }
  761. return err;
  762. }
  763. static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
  764. struct kstat *stat)
  765. {
  766. unsigned int blkbits;
  767. stat->dev = inode->i_sb->s_dev;
  768. stat->ino = attr->ino;
  769. stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
  770. stat->nlink = attr->nlink;
  771. stat->uid = make_kuid(&init_user_ns, attr->uid);
  772. stat->gid = make_kgid(&init_user_ns, attr->gid);
  773. stat->rdev = inode->i_rdev;
  774. stat->atime.tv_sec = attr->atime;
  775. stat->atime.tv_nsec = attr->atimensec;
  776. stat->mtime.tv_sec = attr->mtime;
  777. stat->mtime.tv_nsec = attr->mtimensec;
  778. stat->ctime.tv_sec = attr->ctime;
  779. stat->ctime.tv_nsec = attr->ctimensec;
  780. stat->size = attr->size;
  781. stat->blocks = attr->blocks;
  782. if (attr->blksize != 0)
  783. blkbits = ilog2(attr->blksize);
  784. else
  785. blkbits = inode->i_sb->s_blocksize_bits;
  786. stat->blksize = 1 << blkbits;
  787. }
  788. static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
  789. struct file *file)
  790. {
  791. int err;
  792. struct fuse_getattr_in inarg;
  793. struct fuse_attr_out outarg;
  794. struct fuse_conn *fc = get_fuse_conn(inode);
  795. struct fuse_req *req;
  796. u64 attr_version;
  797. req = fuse_get_req_nopages(fc);
  798. if (IS_ERR(req))
  799. return PTR_ERR(req);
  800. attr_version = fuse_get_attr_version(fc);
  801. memset(&inarg, 0, sizeof(inarg));
  802. memset(&outarg, 0, sizeof(outarg));
  803. /* Directories have separate file-handle space */
  804. if (file && S_ISREG(inode->i_mode)) {
  805. struct fuse_file *ff = file->private_data;
  806. inarg.getattr_flags |= FUSE_GETATTR_FH;
  807. inarg.fh = ff->fh;
  808. }
  809. req->in.h.opcode = FUSE_GETATTR;
  810. req->in.h.nodeid = get_node_id(inode);
  811. req->in.numargs = 1;
  812. req->in.args[0].size = sizeof(inarg);
  813. req->in.args[0].value = &inarg;
  814. req->out.numargs = 1;
  815. if (fc->minor < 9)
  816. req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
  817. else
  818. req->out.args[0].size = sizeof(outarg);
  819. req->out.args[0].value = &outarg;
  820. fuse_request_send(fc, req);
  821. err = req->out.h.error;
  822. fuse_put_request(fc, req);
  823. if (!err) {
  824. if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
  825. make_bad_inode(inode);
  826. err = -EIO;
  827. } else {
  828. fuse_change_attributes(inode, &outarg.attr,
  829. attr_timeout(&outarg),
  830. attr_version);
  831. if (stat)
  832. fuse_fillattr(inode, &outarg.attr, stat);
  833. }
  834. }
  835. return err;
  836. }
  837. int fuse_update_attributes(struct inode *inode, struct kstat *stat,
  838. struct file *file, bool *refreshed)
  839. {
  840. struct fuse_inode *fi = get_fuse_inode(inode);
  841. int err;
  842. bool r;
  843. if (fi->i_time < get_jiffies_64()) {
  844. r = true;
  845. err = fuse_do_getattr(inode, stat, file);
  846. } else {
  847. r = false;
  848. err = 0;
  849. if (stat) {
  850. generic_fillattr(inode, stat);
  851. stat->mode = fi->orig_i_mode;
  852. stat->ino = fi->orig_ino;
  853. }
  854. }
  855. if (refreshed != NULL)
  856. *refreshed = r;
  857. return err;
  858. }
  859. int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
  860. u64 child_nodeid, struct qstr *name)
  861. {
  862. int err = -ENOTDIR;
  863. struct inode *parent;
  864. struct dentry *dir;
  865. struct dentry *entry;
  866. parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
  867. if (!parent)
  868. return -ENOENT;
  869. mutex_lock(&parent->i_mutex);
  870. if (!S_ISDIR(parent->i_mode))
  871. goto unlock;
  872. err = -ENOENT;
  873. dir = d_find_alias(parent);
  874. if (!dir)
  875. goto unlock;
  876. entry = d_lookup(dir, name);
  877. dput(dir);
  878. if (!entry)
  879. goto unlock;
  880. fuse_invalidate_attr(parent);
  881. fuse_invalidate_entry(entry);
  882. if (child_nodeid != 0 && entry->d_inode) {
  883. mutex_lock(&entry->d_inode->i_mutex);
  884. if (get_node_id(entry->d_inode) != child_nodeid) {
  885. err = -ENOENT;
  886. goto badentry;
  887. }
  888. if (d_mountpoint(entry)) {
  889. err = -EBUSY;
  890. goto badentry;
  891. }
  892. if (S_ISDIR(entry->d_inode->i_mode)) {
  893. shrink_dcache_parent(entry);
  894. if (!simple_empty(entry)) {
  895. err = -ENOTEMPTY;
  896. goto badentry;
  897. }
  898. entry->d_inode->i_flags |= S_DEAD;
  899. }
  900. dont_mount(entry);
  901. clear_nlink(entry->d_inode);
  902. err = 0;
  903. badentry:
  904. mutex_unlock(&entry->d_inode->i_mutex);
  905. if (!err)
  906. d_delete(entry);
  907. } else {
  908. err = 0;
  909. }
  910. dput(entry);
  911. unlock:
  912. mutex_unlock(&parent->i_mutex);
  913. iput(parent);
  914. return err;
  915. }
  916. /*
  917. * Calling into a user-controlled filesystem gives the filesystem
  918. * daemon ptrace-like capabilities over the current process. This
  919. * means, that the filesystem daemon is able to record the exact
  920. * filesystem operations performed, and can also control the behavior
  921. * of the requester process in otherwise impossible ways. For example
  922. * it can delay the operation for arbitrary length of time allowing
  923. * DoS against the requester.
  924. *
  925. * For this reason only those processes can call into the filesystem,
  926. * for which the owner of the mount has ptrace privilege. This
  927. * excludes processes started by other users, suid or sgid processes.
  928. */
  929. int fuse_allow_current_process(struct fuse_conn *fc)
  930. {
  931. const struct cred *cred;
  932. if (fc->flags & FUSE_ALLOW_OTHER)
  933. return 1;
  934. cred = current_cred();
  935. if (uid_eq(cred->euid, fc->user_id) &&
  936. uid_eq(cred->suid, fc->user_id) &&
  937. uid_eq(cred->uid, fc->user_id) &&
  938. gid_eq(cred->egid, fc->group_id) &&
  939. gid_eq(cred->sgid, fc->group_id) &&
  940. gid_eq(cred->gid, fc->group_id))
  941. return 1;
  942. return 0;
  943. }
  944. static int fuse_access(struct inode *inode, int mask)
  945. {
  946. struct fuse_conn *fc = get_fuse_conn(inode);
  947. struct fuse_req *req;
  948. struct fuse_access_in inarg;
  949. int err;
  950. if (fc->no_access)
  951. return 0;
  952. req = fuse_get_req_nopages(fc);
  953. if (IS_ERR(req))
  954. return PTR_ERR(req);
  955. memset(&inarg, 0, sizeof(inarg));
  956. inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
  957. req->in.h.opcode = FUSE_ACCESS;
  958. req->in.h.nodeid = get_node_id(inode);
  959. req->in.numargs = 1;
  960. req->in.args[0].size = sizeof(inarg);
  961. req->in.args[0].value = &inarg;
  962. fuse_request_send(fc, req);
  963. err = req->out.h.error;
  964. fuse_put_request(fc, req);
  965. if (err == -ENOSYS) {
  966. fc->no_access = 1;
  967. err = 0;
  968. }
  969. return err;
  970. }
  971. static int fuse_perm_getattr(struct inode *inode, int mask)
  972. {
  973. if (mask & MAY_NOT_BLOCK)
  974. return -ECHILD;
  975. return fuse_do_getattr(inode, NULL, NULL);
  976. }
  977. /*
  978. * Check permission. The two basic access models of FUSE are:
  979. *
  980. * 1) Local access checking ('default_permissions' mount option) based
  981. * on file mode. This is the plain old disk filesystem permission
  982. * modell.
  983. *
  984. * 2) "Remote" access checking, where server is responsible for
  985. * checking permission in each inode operation. An exception to this
  986. * is if ->permission() was invoked from sys_access() in which case an
  987. * access request is sent. Execute permission is still checked
  988. * locally based on file mode.
  989. */
  990. static int fuse_permission(struct inode *inode, int mask)
  991. {
  992. struct fuse_conn *fc = get_fuse_conn(inode);
  993. bool refreshed = false;
  994. int err = 0;
  995. if (!fuse_allow_current_process(fc))
  996. return -EACCES;
  997. /*
  998. * If attributes are needed, refresh them before proceeding
  999. */
  1000. if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
  1001. ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
  1002. struct fuse_inode *fi = get_fuse_inode(inode);
  1003. if (fi->i_time < get_jiffies_64()) {
  1004. refreshed = true;
  1005. err = fuse_perm_getattr(inode, mask);
  1006. if (err)
  1007. return err;
  1008. }
  1009. }
  1010. if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
  1011. err = generic_permission(inode, mask);
  1012. /* If permission is denied, try to refresh file
  1013. attributes. This is also needed, because the root
  1014. node will at first have no permissions */
  1015. if (err == -EACCES && !refreshed) {
  1016. err = fuse_perm_getattr(inode, mask);
  1017. if (!err)
  1018. err = generic_permission(inode, mask);
  1019. }
  1020. /* Note: the opposite of the above test does not
  1021. exist. So if permissions are revoked this won't be
  1022. noticed immediately, only after the attribute
  1023. timeout has expired */
  1024. } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
  1025. if (mask & MAY_NOT_BLOCK)
  1026. return -ECHILD;
  1027. err = fuse_access(inode, mask);
  1028. } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
  1029. if (!(inode->i_mode & S_IXUGO)) {
  1030. if (refreshed)
  1031. return -EACCES;
  1032. err = fuse_perm_getattr(inode, mask);
  1033. if (!err && !(inode->i_mode & S_IXUGO))
  1034. return -EACCES;
  1035. }
  1036. }
  1037. return err;
  1038. }
  1039. static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
  1040. void *dstbuf, filldir_t filldir)
  1041. {
  1042. while (nbytes >= FUSE_NAME_OFFSET) {
  1043. struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
  1044. size_t reclen = FUSE_DIRENT_SIZE(dirent);
  1045. int over;
  1046. if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
  1047. return -EIO;
  1048. if (reclen > nbytes)
  1049. break;
  1050. if (memchr(dirent->name, '/', dirent->namelen) != NULL)
  1051. return -EIO;
  1052. over = filldir(dstbuf, dirent->name, dirent->namelen,
  1053. file->f_pos, dirent->ino, dirent->type);
  1054. if (over)
  1055. break;
  1056. buf += reclen;
  1057. nbytes -= reclen;
  1058. file->f_pos = dirent->off;
  1059. }
  1060. return 0;
  1061. }
  1062. static int fuse_direntplus_link(struct file *file,
  1063. struct fuse_direntplus *direntplus,
  1064. u64 attr_version)
  1065. {
  1066. int err;
  1067. struct fuse_entry_out *o = &direntplus->entry_out;
  1068. struct fuse_dirent *dirent = &direntplus->dirent;
  1069. struct dentry *parent = file->f_path.dentry;
  1070. struct qstr name = {
  1071. .len = dirent->namelen,
  1072. .name = dirent->name,
  1073. };
  1074. struct dentry *dentry;
  1075. struct dentry *alias;
  1076. struct inode *dir = parent->d_inode;
  1077. struct fuse_conn *fc;
  1078. struct inode *inode;
  1079. if (!o->nodeid) {
  1080. /*
  1081. * Unlike in the case of fuse_lookup, zero nodeid does not mean
  1082. * ENOENT. Instead, it only means the userspace filesystem did
  1083. * not want to return attributes/handle for this entry.
  1084. *
  1085. * So do nothing.
  1086. */
  1087. return 0;
  1088. }
  1089. if (name.name[0] == '.') {
  1090. /*
  1091. * We could potentially refresh the attributes of the directory
  1092. * and its parent?
  1093. */
  1094. if (name.len == 1)
  1095. return 0;
  1096. if (name.name[1] == '.' && name.len == 2)
  1097. return 0;
  1098. }
  1099. fc = get_fuse_conn(dir);
  1100. name.hash = full_name_hash(name.name, name.len);
  1101. dentry = d_lookup(parent, &name);
  1102. if (dentry && dentry->d_inode) {
  1103. inode = dentry->d_inode;
  1104. if (get_node_id(inode) == o->nodeid) {
  1105. struct fuse_inode *fi;
  1106. fi = get_fuse_inode(inode);
  1107. spin_lock(&fc->lock);
  1108. fi->nlookup++;
  1109. spin_unlock(&fc->lock);
  1110. /*
  1111. * The other branch to 'found' comes via fuse_iget()
  1112. * which bumps nlookup inside
  1113. */
  1114. goto found;
  1115. }
  1116. err = d_invalidate(dentry);
  1117. if (err)
  1118. goto out;
  1119. dput(dentry);
  1120. dentry = NULL;
  1121. }
  1122. dentry = d_alloc(parent, &name);
  1123. err = -ENOMEM;
  1124. if (!dentry)
  1125. goto out;
  1126. inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
  1127. &o->attr, entry_attr_timeout(o), attr_version);
  1128. if (!inode)
  1129. goto out;
  1130. alias = d_materialise_unique(dentry, inode);
  1131. err = PTR_ERR(alias);
  1132. if (IS_ERR(alias))
  1133. goto out;
  1134. if (alias) {
  1135. dput(dentry);
  1136. dentry = alias;
  1137. }
  1138. found:
  1139. fuse_change_attributes(inode, &o->attr, entry_attr_timeout(o),
  1140. attr_version);
  1141. fuse_change_entry_timeout(dentry, o);
  1142. err = 0;
  1143. out:
  1144. if (dentry)
  1145. dput(dentry);
  1146. return err;
  1147. }
  1148. static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
  1149. void *dstbuf, filldir_t filldir, u64 attr_version)
  1150. {
  1151. struct fuse_direntplus *direntplus;
  1152. struct fuse_dirent *dirent;
  1153. size_t reclen;
  1154. int over = 0;
  1155. int ret;
  1156. while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) {
  1157. direntplus = (struct fuse_direntplus *) buf;
  1158. dirent = &direntplus->dirent;
  1159. reclen = FUSE_DIRENTPLUS_SIZE(direntplus);
  1160. if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
  1161. return -EIO;
  1162. if (reclen > nbytes)
  1163. break;
  1164. if (!over) {
  1165. /* We fill entries into dstbuf only as much as
  1166. it can hold. But we still continue iterating
  1167. over remaining entries to link them. If not,
  1168. we need to send a FORGET for each of those
  1169. which we did not link.
  1170. */
  1171. over = filldir(dstbuf, dirent->name, dirent->namelen,
  1172. file->f_pos, dirent->ino,
  1173. dirent->type);
  1174. file->f_pos = dirent->off;
  1175. }
  1176. buf += reclen;
  1177. nbytes -= reclen;
  1178. ret = fuse_direntplus_link(file, direntplus, attr_version);
  1179. if (ret)
  1180. fuse_force_forget(file, direntplus->entry_out.nodeid);
  1181. }
  1182. return 0;
  1183. }
  1184. static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
  1185. {
  1186. int plus, err;
  1187. size_t nbytes;
  1188. struct page *page;
  1189. struct inode *inode = file->f_path.dentry->d_inode;
  1190. struct fuse_conn *fc = get_fuse_conn(inode);
  1191. struct fuse_req *req;
  1192. u64 attr_version = 0;
  1193. if (is_bad_inode(inode))
  1194. return -EIO;
  1195. req = fuse_get_req(fc, 1);
  1196. if (IS_ERR(req))
  1197. return PTR_ERR(req);
  1198. page = alloc_page(GFP_KERNEL);
  1199. if (!page) {
  1200. fuse_put_request(fc, req);
  1201. return -ENOMEM;
  1202. }
  1203. plus = fuse_use_readdirplus(inode, file);
  1204. req->out.argpages = 1;
  1205. req->num_pages = 1;
  1206. req->pages[0] = page;
  1207. req->page_descs[0].length = PAGE_SIZE;
  1208. if (plus) {
  1209. attr_version = fuse_get_attr_version(fc);
  1210. fuse_read_fill(req, file, file->f_pos, PAGE_SIZE,
  1211. FUSE_READDIRPLUS);
  1212. } else {
  1213. fuse_read_fill(req, file, file->f_pos, PAGE_SIZE,
  1214. FUSE_READDIR);
  1215. }
  1216. fuse_request_send(fc, req);
  1217. nbytes = req->out.args[0].size;
  1218. err = req->out.h.error;
  1219. fuse_put_request(fc, req);
  1220. if (!err) {
  1221. if (plus) {
  1222. err = parse_dirplusfile(page_address(page), nbytes,
  1223. file, dstbuf, filldir,
  1224. attr_version);
  1225. } else {
  1226. err = parse_dirfile(page_address(page), nbytes, file,
  1227. dstbuf, filldir);
  1228. }
  1229. }
  1230. __free_page(page);
  1231. fuse_invalidate_attr(inode); /* atime changed */
  1232. return err;
  1233. }
  1234. static char *read_link(struct dentry *dentry)
  1235. {
  1236. struct inode *inode = dentry->d_inode;
  1237. struct fuse_conn *fc = get_fuse_conn(inode);
  1238. struct fuse_req *req = fuse_get_req_nopages(fc);
  1239. char *link;
  1240. if (IS_ERR(req))
  1241. return ERR_CAST(req);
  1242. link = (char *) __get_free_page(GFP_KERNEL);
  1243. if (!link) {
  1244. link = ERR_PTR(-ENOMEM);
  1245. goto out;
  1246. }
  1247. req->in.h.opcode = FUSE_READLINK;
  1248. req->in.h.nodeid = get_node_id(inode);
  1249. req->out.argvar = 1;
  1250. req->out.numargs = 1;
  1251. req->out.args[0].size = PAGE_SIZE - 1;
  1252. req->out.args[0].value = link;
  1253. fuse_request_send(fc, req);
  1254. if (req->out.h.error) {
  1255. free_page((unsigned long) link);
  1256. link = ERR_PTR(req->out.h.error);
  1257. } else
  1258. link[req->out.args[0].size] = '\0';
  1259. out:
  1260. fuse_put_request(fc, req);
  1261. fuse_invalidate_attr(inode); /* atime changed */
  1262. return link;
  1263. }
  1264. static void free_link(char *link)
  1265. {
  1266. if (!IS_ERR(link))
  1267. free_page((unsigned long) link);
  1268. }
  1269. static void *fuse_follow_link(struct dentry *dentry, struct nameidata *nd)
  1270. {
  1271. nd_set_link(nd, read_link(dentry));
  1272. return NULL;
  1273. }
  1274. static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
  1275. {
  1276. free_link(nd_get_link(nd));
  1277. }
  1278. static int fuse_dir_open(struct inode *inode, struct file *file)
  1279. {
  1280. return fuse_open_common(inode, file, true);
  1281. }
  1282. static int fuse_dir_release(struct inode *inode, struct file *file)
  1283. {
  1284. fuse_release_common(file, FUSE_RELEASEDIR);
  1285. return 0;
  1286. }
  1287. static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
  1288. int datasync)
  1289. {
  1290. return fuse_fsync_common(file, start, end, datasync, 1);
  1291. }
  1292. static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
  1293. unsigned long arg)
  1294. {
  1295. struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
  1296. /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
  1297. if (fc->minor < 18)
  1298. return -ENOTTY;
  1299. return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
  1300. }
  1301. static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
  1302. unsigned long arg)
  1303. {
  1304. struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
  1305. if (fc->minor < 18)
  1306. return -ENOTTY;
  1307. return fuse_ioctl_common(file, cmd, arg,
  1308. FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
  1309. }
  1310. static bool update_mtime(unsigned ivalid)
  1311. {
  1312. /* Always update if mtime is explicitly set */
  1313. if (ivalid & ATTR_MTIME_SET)
  1314. return true;
  1315. /* If it's an open(O_TRUNC) or an ftruncate(), don't update */
  1316. if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
  1317. return false;
  1318. /* In all other cases update */
  1319. return true;
  1320. }
  1321. static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
  1322. {
  1323. unsigned ivalid = iattr->ia_valid;
  1324. if (ivalid & ATTR_MODE)
  1325. arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
  1326. if (ivalid & ATTR_UID)
  1327. arg->valid |= FATTR_UID, arg->uid = from_kuid(&init_user_ns, iattr->ia_uid);
  1328. if (ivalid & ATTR_GID)
  1329. arg->valid |= FATTR_GID, arg->gid = from_kgid(&init_user_ns, iattr->ia_gid);
  1330. if (ivalid & ATTR_SIZE)
  1331. arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
  1332. if (ivalid & ATTR_ATIME) {
  1333. arg->valid |= FATTR_ATIME;
  1334. arg->atime = iattr->ia_atime.tv_sec;
  1335. arg->atimensec = iattr->ia_atime.tv_nsec;
  1336. if (!(ivalid & ATTR_ATIME_SET))
  1337. arg->valid |= FATTR_ATIME_NOW;
  1338. }
  1339. if ((ivalid & ATTR_MTIME) && update_mtime(ivalid)) {
  1340. arg->valid |= FATTR_MTIME;
  1341. arg->mtime = iattr->ia_mtime.tv_sec;
  1342. arg->mtimensec = iattr->ia_mtime.tv_nsec;
  1343. if (!(ivalid & ATTR_MTIME_SET))
  1344. arg->valid |= FATTR_MTIME_NOW;
  1345. }
  1346. }
  1347. /*
  1348. * Prevent concurrent writepages on inode
  1349. *
  1350. * This is done by adding a negative bias to the inode write counter
  1351. * and waiting for all pending writes to finish.
  1352. */
  1353. void fuse_set_nowrite(struct inode *inode)
  1354. {
  1355. struct fuse_conn *fc = get_fuse_conn(inode);
  1356. struct fuse_inode *fi = get_fuse_inode(inode);
  1357. BUG_ON(!mutex_is_locked(&inode->i_mutex));
  1358. spin_lock(&fc->lock);
  1359. BUG_ON(fi->writectr < 0);
  1360. fi->writectr += FUSE_NOWRITE;
  1361. spin_unlock(&fc->lock);
  1362. wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
  1363. }
  1364. /*
  1365. * Allow writepages on inode
  1366. *
  1367. * Remove the bias from the writecounter and send any queued
  1368. * writepages.
  1369. */
  1370. static void __fuse_release_nowrite(struct inode *inode)
  1371. {
  1372. struct fuse_inode *fi = get_fuse_inode(inode);
  1373. BUG_ON(fi->writectr != FUSE_NOWRITE);
  1374. fi->writectr = 0;
  1375. fuse_flush_writepages(inode);
  1376. }
  1377. void fuse_release_nowrite(struct inode *inode)
  1378. {
  1379. struct fuse_conn *fc = get_fuse_conn(inode);
  1380. spin_lock(&fc->lock);
  1381. __fuse_release_nowrite(inode);
  1382. spin_unlock(&fc->lock);
  1383. }
  1384. /*
  1385. * Set attributes, and at the same time refresh them.
  1386. *
  1387. * Truncation is slightly complicated, because the 'truncate' request
  1388. * may fail, in which case we don't want to touch the mapping.
  1389. * vmtruncate() doesn't allow for this case, so do the rlimit checking
  1390. * and the actual truncation by hand.
  1391. */
  1392. int fuse_do_setattr(struct inode *inode, struct iattr *attr,
  1393. struct file *file)
  1394. {
  1395. struct fuse_conn *fc = get_fuse_conn(inode);
  1396. struct fuse_req *req;
  1397. struct fuse_setattr_in inarg;
  1398. struct fuse_attr_out outarg;
  1399. bool is_truncate = false;
  1400. loff_t oldsize;
  1401. int err;
  1402. if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
  1403. attr->ia_valid |= ATTR_FORCE;
  1404. err = inode_change_ok(inode, attr);
  1405. if (err)
  1406. return err;
  1407. if (attr->ia_valid & ATTR_OPEN) {
  1408. if (fc->atomic_o_trunc)
  1409. return 0;
  1410. file = NULL;
  1411. }
  1412. if (attr->ia_valid & ATTR_SIZE)
  1413. is_truncate = true;
  1414. req = fuse_get_req_nopages(fc);
  1415. if (IS_ERR(req))
  1416. return PTR_ERR(req);
  1417. if (is_truncate)
  1418. fuse_set_nowrite(inode);
  1419. memset(&inarg, 0, sizeof(inarg));
  1420. memset(&outarg, 0, sizeof(outarg));
  1421. iattr_to_fattr(attr, &inarg);
  1422. if (file) {
  1423. struct fuse_file *ff = file->private_data;
  1424. inarg.valid |= FATTR_FH;
  1425. inarg.fh = ff->fh;
  1426. }
  1427. if (attr->ia_valid & ATTR_SIZE) {
  1428. /* For mandatory locking in truncate */
  1429. inarg.valid |= FATTR_LOCKOWNER;
  1430. inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
  1431. }
  1432. req->in.h.opcode = FUSE_SETATTR;
  1433. req->in.h.nodeid = get_node_id(inode);
  1434. req->in.numargs = 1;
  1435. req->in.args[0].size = sizeof(inarg);
  1436. req->in.args[0].value = &inarg;
  1437. req->out.numargs = 1;
  1438. if (fc->minor < 9)
  1439. req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
  1440. else
  1441. req->out.args[0].size = sizeof(outarg);
  1442. req->out.args[0].value = &outarg;
  1443. fuse_request_send(fc, req);
  1444. err = req->out.h.error;
  1445. fuse_put_request(fc, req);
  1446. if (err) {
  1447. if (err == -EINTR)
  1448. fuse_invalidate_attr(inode);
  1449. goto error;
  1450. }
  1451. if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
  1452. make_bad_inode(inode);
  1453. err = -EIO;
  1454. goto error;
  1455. }
  1456. spin_lock(&fc->lock);
  1457. fuse_change_attributes_common(inode, &outarg.attr,
  1458. attr_timeout(&outarg));
  1459. oldsize = inode->i_size;
  1460. i_size_write(inode, outarg.attr.size);
  1461. if (is_truncate) {
  1462. /* NOTE: this may release/reacquire fc->lock */
  1463. __fuse_release_nowrite(inode);
  1464. }
  1465. spin_unlock(&fc->lock);
  1466. /*
  1467. * Only call invalidate_inode_pages2() after removing
  1468. * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
  1469. */
  1470. if (S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
  1471. truncate_pagecache(inode, oldsize, outarg.attr.size);
  1472. invalidate_inode_pages2(inode->i_mapping);
  1473. }
  1474. return 0;
  1475. error:
  1476. if (is_truncate)
  1477. fuse_release_nowrite(inode);
  1478. return err;
  1479. }
  1480. static int fuse_setattr(struct dentry *entry, struct iattr *attr)
  1481. {
  1482. struct inode *inode = entry->d_inode;
  1483. if (!fuse_allow_current_process(get_fuse_conn(inode)))
  1484. return -EACCES;
  1485. if (attr->ia_valid & ATTR_FILE)
  1486. return fuse_do_setattr(inode, attr, attr->ia_file);
  1487. else
  1488. return fuse_do_setattr(inode, attr, NULL);
  1489. }
  1490. static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
  1491. struct kstat *stat)
  1492. {
  1493. struct inode *inode = entry->d_inode;
  1494. struct fuse_conn *fc = get_fuse_conn(inode);
  1495. if (!fuse_allow_current_process(fc))
  1496. return -EACCES;
  1497. return fuse_update_attributes(inode, stat, NULL, NULL);
  1498. }
  1499. static int fuse_setxattr(struct dentry *entry, const char *name,
  1500. const void *value, size_t size, int flags)
  1501. {
  1502. struct inode *inode = entry->d_inode;
  1503. struct fuse_conn *fc = get_fuse_conn(inode);
  1504. struct fuse_req *req;
  1505. struct fuse_setxattr_in inarg;
  1506. int err;
  1507. if (fc->no_setxattr)
  1508. return -EOPNOTSUPP;
  1509. req = fuse_get_req_nopages(fc);
  1510. if (IS_ERR(req))
  1511. return PTR_ERR(req);
  1512. memset(&inarg, 0, sizeof(inarg));
  1513. inarg.size = size;
  1514. inarg.flags = flags;
  1515. req->in.h.opcode = FUSE_SETXATTR;
  1516. req->in.h.nodeid = get_node_id(inode);
  1517. req->in.numargs = 3;
  1518. req->in.args[0].size = sizeof(inarg);
  1519. req->in.args[0].value = &inarg;
  1520. req->in.args[1].size = strlen(name) + 1;
  1521. req->in.args[1].value = name;
  1522. req->in.args[2].size = size;
  1523. req->in.args[2].value = value;
  1524. fuse_request_send(fc, req);
  1525. err = req->out.h.error;
  1526. fuse_put_request(fc, req);
  1527. if (err == -ENOSYS) {
  1528. fc->no_setxattr = 1;
  1529. err = -EOPNOTSUPP;
  1530. }
  1531. if (!err)
  1532. fuse_invalidate_attr(inode);
  1533. return err;
  1534. }
  1535. static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
  1536. void *value, size_t size)
  1537. {
  1538. struct inode *inode = entry->d_inode;
  1539. struct fuse_conn *fc = get_fuse_conn(inode);
  1540. struct fuse_req *req;
  1541. struct fuse_getxattr_in inarg;
  1542. struct fuse_getxattr_out outarg;
  1543. ssize_t ret;
  1544. if (fc->no_getxattr)
  1545. return -EOPNOTSUPP;
  1546. req = fuse_get_req_nopages(fc);
  1547. if (IS_ERR(req))
  1548. return PTR_ERR(req);
  1549. memset(&inarg, 0, sizeof(inarg));
  1550. inarg.size = size;
  1551. req->in.h.opcode = FUSE_GETXATTR;
  1552. req->in.h.nodeid = get_node_id(inode);
  1553. req->in.numargs = 2;
  1554. req->in.args[0].size = sizeof(inarg);
  1555. req->in.args[0].value = &inarg;
  1556. req->in.args[1].size = strlen(name) + 1;
  1557. req->in.args[1].value = name;
  1558. /* This is really two different operations rolled into one */
  1559. req->out.numargs = 1;
  1560. if (size) {
  1561. req->out.argvar = 1;
  1562. req->out.args[0].size = size;
  1563. req->out.args[0].value = value;
  1564. } else {
  1565. req->out.args[0].size = sizeof(outarg);
  1566. req->out.args[0].value = &outarg;
  1567. }
  1568. fuse_request_send(fc, req);
  1569. ret = req->out.h.error;
  1570. if (!ret)
  1571. ret = size ? req->out.args[0].size : outarg.size;
  1572. else {
  1573. if (ret == -ENOSYS) {
  1574. fc->no_getxattr = 1;
  1575. ret = -EOPNOTSUPP;
  1576. }
  1577. }
  1578. fuse_put_request(fc, req);
  1579. return ret;
  1580. }
  1581. static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
  1582. {
  1583. struct inode *inode = entry->d_inode;
  1584. struct fuse_conn *fc = get_fuse_conn(inode);
  1585. struct fuse_req *req;
  1586. struct fuse_getxattr_in inarg;
  1587. struct fuse_getxattr_out outarg;
  1588. ssize_t ret;
  1589. if (!fuse_allow_current_process(fc))
  1590. return -EACCES;
  1591. if (fc->no_listxattr)
  1592. return -EOPNOTSUPP;
  1593. req = fuse_get_req_nopages(fc);
  1594. if (IS_ERR(req))
  1595. return PTR_ERR(req);
  1596. memset(&inarg, 0, sizeof(inarg));
  1597. inarg.size = size;
  1598. req->in.h.opcode = FUSE_LISTXATTR;
  1599. req->in.h.nodeid = get_node_id(inode);
  1600. req->in.numargs = 1;
  1601. req->in.args[0].size = sizeof(inarg);
  1602. req->in.args[0].value = &inarg;
  1603. /* This is really two different operations rolled into one */
  1604. req->out.numargs = 1;
  1605. if (size) {
  1606. req->out.argvar = 1;
  1607. req->out.args[0].size = size;
  1608. req->out.args[0].value = list;
  1609. } else {
  1610. req->out.args[0].size = sizeof(outarg);
  1611. req->out.args[0].value = &outarg;
  1612. }
  1613. fuse_request_send(fc, req);
  1614. ret = req->out.h.error;
  1615. if (!ret)
  1616. ret = size ? req->out.args[0].size : outarg.size;
  1617. else {
  1618. if (ret == -ENOSYS) {
  1619. fc->no_listxattr = 1;
  1620. ret = -EOPNOTSUPP;
  1621. }
  1622. }
  1623. fuse_put_request(fc, req);
  1624. return ret;
  1625. }
  1626. static int fuse_removexattr(struct dentry *entry, const char *name)
  1627. {
  1628. struct inode *inode = entry->d_inode;
  1629. struct fuse_conn *fc = get_fuse_conn(inode);
  1630. struct fuse_req *req;
  1631. int err;
  1632. if (fc->no_removexattr)
  1633. return -EOPNOTSUPP;
  1634. req = fuse_get_req_nopages(fc);
  1635. if (IS_ERR(req))
  1636. return PTR_ERR(req);
  1637. req->in.h.opcode = FUSE_REMOVEXATTR;
  1638. req->in.h.nodeid = get_node_id(inode);
  1639. req->in.numargs = 1;
  1640. req->in.args[0].size = strlen(name) + 1;
  1641. req->in.args[0].value = name;
  1642. fuse_request_send(fc, req);
  1643. err = req->out.h.error;
  1644. fuse_put_request(fc, req);
  1645. if (err == -ENOSYS) {
  1646. fc->no_removexattr = 1;
  1647. err = -EOPNOTSUPP;
  1648. }
  1649. if (!err)
  1650. fuse_invalidate_attr(inode);
  1651. return err;
  1652. }
  1653. static const struct inode_operations fuse_dir_inode_operations = {
  1654. .lookup = fuse_lookup,
  1655. .mkdir = fuse_mkdir,
  1656. .symlink = fuse_symlink,
  1657. .unlink = fuse_unlink,
  1658. .rmdir = fuse_rmdir,
  1659. .rename = fuse_rename,
  1660. .link = fuse_link,
  1661. .setattr = fuse_setattr,
  1662. .create = fuse_create,
  1663. .mknod = fuse_mknod,
  1664. .permission = fuse_permission,
  1665. .getattr = fuse_getattr,
  1666. .setxattr = fuse_setxattr,
  1667. .getxattr = fuse_getxattr,
  1668. .listxattr = fuse_listxattr,
  1669. .removexattr = fuse_removexattr,
  1670. };
  1671. static const struct file_operations fuse_dir_operations = {
  1672. .llseek = generic_file_llseek,
  1673. .read = generic_read_dir,
  1674. .readdir = fuse_readdir,
  1675. .open = fuse_dir_open,
  1676. .release = fuse_dir_release,
  1677. .fsync = fuse_dir_fsync,
  1678. .unlocked_ioctl = fuse_dir_ioctl,
  1679. .compat_ioctl = fuse_dir_compat_ioctl,
  1680. };
  1681. static const struct inode_operations fuse_common_inode_operations = {
  1682. .setattr = fuse_setattr,
  1683. .permission = fuse_permission,
  1684. .getattr = fuse_getattr,
  1685. .setxattr = fuse_setxattr,
  1686. .getxattr = fuse_getxattr,
  1687. .listxattr = fuse_listxattr,
  1688. .removexattr = fuse_removexattr,
  1689. };
  1690. static const struct inode_operations fuse_symlink_inode_operations = {
  1691. .setattr = fuse_setattr,
  1692. .follow_link = fuse_follow_link,
  1693. .put_link = fuse_put_link,
  1694. .readlink = generic_readlink,
  1695. .getattr = fuse_getattr,
  1696. .setxattr = fuse_setxattr,
  1697. .getxattr = fuse_getxattr,
  1698. .listxattr = fuse_listxattr,
  1699. .removexattr = fuse_removexattr,
  1700. };
  1701. void fuse_init_common(struct inode *inode)
  1702. {
  1703. inode->i_op = &fuse_common_inode_operations;
  1704. }
  1705. void fuse_init_dir(struct inode *inode)
  1706. {
  1707. inode->i_op = &fuse_dir_inode_operations;
  1708. inode->i_fop = &fuse_dir_operations;
  1709. }
  1710. void fuse_init_symlink(struct inode *inode)
  1711. {
  1712. inode->i_op = &fuse_symlink_inode_operations;
  1713. }