inotify_user.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. /*
  2. * fs/inotify_user.c - inotify support for userspace
  3. *
  4. * Authors:
  5. * John McCutchan <ttb@tentacle.dhs.org>
  6. * Robert Love <rml@novell.com>
  7. *
  8. * Copyright (C) 2005 John McCutchan
  9. * Copyright 2006 Hewlett-Packard Development Company, L.P.
  10. *
  11. * Copyright (C) 2009 Eric Paris <Red Hat Inc>
  12. * inotify was largely rewriten to make use of the fsnotify infrastructure
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License as published by the
  16. * Free Software Foundation; either version 2, or (at your option) any
  17. * later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. */
  24. #include <linux/file.h>
  25. #include <linux/fs.h> /* struct inode */
  26. #include <linux/fsnotify_backend.h>
  27. #include <linux/idr.h>
  28. #include <linux/init.h> /* module_init */
  29. #include <linux/inotify.h>
  30. #include <linux/kernel.h> /* roundup() */
  31. #include <linux/namei.h> /* LOOKUP_FOLLOW */
  32. #include <linux/sched.h> /* struct user */
  33. #include <linux/slab.h> /* struct kmem_cache */
  34. #include <linux/syscalls.h>
  35. #include <linux/types.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/poll.h>
  39. #include <linux/wait.h>
  40. #include "inotify.h"
  41. #include <asm/ioctls.h>
  42. /* these are configurable via /proc/sys/fs/inotify/ */
  43. static int inotify_max_user_instances __read_mostly;
  44. static int inotify_max_queued_events __read_mostly;
  45. static int inotify_max_user_watches __read_mostly;
  46. static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
  47. struct kmem_cache *event_priv_cachep __read_mostly;
  48. #ifdef CONFIG_SYSCTL
  49. #include <linux/sysctl.h>
  50. static int zero;
  51. ctl_table inotify_table[] = {
  52. {
  53. .procname = "max_user_instances",
  54. .data = &inotify_max_user_instances,
  55. .maxlen = sizeof(int),
  56. .mode = 0644,
  57. .proc_handler = proc_dointvec_minmax,
  58. .extra1 = &zero,
  59. },
  60. {
  61. .procname = "max_user_watches",
  62. .data = &inotify_max_user_watches,
  63. .maxlen = sizeof(int),
  64. .mode = 0644,
  65. .proc_handler = proc_dointvec_minmax,
  66. .extra1 = &zero,
  67. },
  68. {
  69. .procname = "max_queued_events",
  70. .data = &inotify_max_queued_events,
  71. .maxlen = sizeof(int),
  72. .mode = 0644,
  73. .proc_handler = proc_dointvec_minmax,
  74. .extra1 = &zero
  75. },
  76. { }
  77. };
  78. #endif /* CONFIG_SYSCTL */
  79. static inline __u32 inotify_arg_to_mask(u32 arg)
  80. {
  81. __u32 mask;
  82. /*
  83. * everything should accept their own ignored, cares about children,
  84. * and should receive events when the inode is unmounted
  85. */
  86. mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
  87. /* mask off the flags used to open the fd */
  88. mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
  89. return mask;
  90. }
  91. static inline u32 inotify_mask_to_arg(__u32 mask)
  92. {
  93. return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
  94. IN_Q_OVERFLOW);
  95. }
  96. /* intofiy userspace file descriptor functions */
  97. static unsigned int inotify_poll(struct file *file, poll_table *wait)
  98. {
  99. struct fsnotify_group *group = file->private_data;
  100. int ret = 0;
  101. poll_wait(file, &group->notification_waitq, wait);
  102. mutex_lock(&group->notification_mutex);
  103. if (!fsnotify_notify_queue_is_empty(group))
  104. ret = POLLIN | POLLRDNORM;
  105. mutex_unlock(&group->notification_mutex);
  106. return ret;
  107. }
  108. /*
  109. * Get an inotify_kernel_event if one exists and is small
  110. * enough to fit in "count". Return an error pointer if
  111. * not large enough.
  112. *
  113. * Called with the group->notification_mutex held.
  114. */
  115. static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
  116. size_t count)
  117. {
  118. size_t event_size = sizeof(struct inotify_event);
  119. struct fsnotify_event *event;
  120. if (fsnotify_notify_queue_is_empty(group))
  121. return NULL;
  122. event = fsnotify_peek_notify_event(group);
  123. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  124. if (event->name_len)
  125. event_size += roundup(event->name_len + 1, event_size);
  126. if (event_size > count)
  127. return ERR_PTR(-EINVAL);
  128. /* held the notification_mutex the whole time, so this is the
  129. * same event we peeked above */
  130. fsnotify_remove_notify_event(group);
  131. return event;
  132. }
  133. /*
  134. * Copy an event to user space, returning how much we copied.
  135. *
  136. * We already checked that the event size is smaller than the
  137. * buffer we had in "get_one_event()" above.
  138. */
  139. static ssize_t copy_event_to_user(struct fsnotify_group *group,
  140. struct fsnotify_event *event,
  141. char __user *buf)
  142. {
  143. struct inotify_event inotify_event;
  144. struct fsnotify_event_private_data *fsn_priv;
  145. struct inotify_event_private_data *priv;
  146. size_t event_size = sizeof(struct inotify_event);
  147. size_t name_len = 0;
  148. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  149. /* we get the inotify watch descriptor from the event private data */
  150. spin_lock(&event->lock);
  151. fsn_priv = fsnotify_remove_priv_from_event(group, event);
  152. spin_unlock(&event->lock);
  153. if (!fsn_priv)
  154. inotify_event.wd = -1;
  155. else {
  156. priv = container_of(fsn_priv, struct inotify_event_private_data,
  157. fsnotify_event_priv_data);
  158. inotify_event.wd = priv->wd;
  159. inotify_free_event_priv(fsn_priv);
  160. }
  161. /*
  162. * round up event->name_len so it is a multiple of event_size
  163. * plus an extra byte for the terminating '\0'.
  164. */
  165. if (event->name_len)
  166. name_len = roundup(event->name_len + 1, event_size);
  167. inotify_event.len = name_len;
  168. inotify_event.mask = inotify_mask_to_arg(event->mask);
  169. inotify_event.cookie = event->sync_cookie;
  170. /* send the main event */
  171. if (copy_to_user(buf, &inotify_event, event_size))
  172. return -EFAULT;
  173. buf += event_size;
  174. /*
  175. * fsnotify only stores the pathname, so here we have to send the pathname
  176. * and then pad that pathname out to a multiple of sizeof(inotify_event)
  177. * with zeros. I get my zeros from the nul_inotify_event.
  178. */
  179. if (name_len) {
  180. unsigned int len_to_zero = name_len - event->name_len;
  181. /* copy the path name */
  182. if (copy_to_user(buf, event->file_name, event->name_len))
  183. return -EFAULT;
  184. buf += event->name_len;
  185. /* fill userspace with 0's */
  186. if (clear_user(buf, len_to_zero))
  187. return -EFAULT;
  188. buf += len_to_zero;
  189. event_size += name_len;
  190. }
  191. return event_size;
  192. }
  193. static ssize_t inotify_read(struct file *file, char __user *buf,
  194. size_t count, loff_t *pos)
  195. {
  196. struct fsnotify_group *group;
  197. struct fsnotify_event *kevent;
  198. char __user *start;
  199. int ret;
  200. DEFINE_WAIT(wait);
  201. start = buf;
  202. group = file->private_data;
  203. while (1) {
  204. prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
  205. mutex_lock(&group->notification_mutex);
  206. kevent = get_one_event(group, count);
  207. mutex_unlock(&group->notification_mutex);
  208. pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
  209. if (kevent) {
  210. ret = PTR_ERR(kevent);
  211. if (IS_ERR(kevent))
  212. break;
  213. ret = copy_event_to_user(group, kevent, buf);
  214. fsnotify_put_event(kevent);
  215. if (ret < 0)
  216. break;
  217. buf += ret;
  218. count -= ret;
  219. continue;
  220. }
  221. ret = -EAGAIN;
  222. if (file->f_flags & O_NONBLOCK)
  223. break;
  224. ret = -EINTR;
  225. if (signal_pending(current))
  226. break;
  227. if (start != buf)
  228. break;
  229. schedule();
  230. }
  231. finish_wait(&group->notification_waitq, &wait);
  232. if (start != buf && ret != -EFAULT)
  233. ret = buf - start;
  234. return ret;
  235. }
  236. static int inotify_fasync(int fd, struct file *file, int on)
  237. {
  238. struct fsnotify_group *group = file->private_data;
  239. return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
  240. }
  241. static int inotify_release(struct inode *ignored, struct file *file)
  242. {
  243. struct fsnotify_group *group = file->private_data;
  244. pr_debug("%s: group=%p\n", __func__, group);
  245. fsnotify_clear_marks_by_group(group);
  246. /* free this group, matching get was inotify_init->fsnotify_obtain_group */
  247. fsnotify_put_group(group);
  248. return 0;
  249. }
  250. static long inotify_ioctl(struct file *file, unsigned int cmd,
  251. unsigned long arg)
  252. {
  253. struct fsnotify_group *group;
  254. struct fsnotify_event_holder *holder;
  255. struct fsnotify_event *event;
  256. void __user *p;
  257. int ret = -ENOTTY;
  258. size_t send_len = 0;
  259. group = file->private_data;
  260. p = (void __user *) arg;
  261. pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
  262. switch (cmd) {
  263. case FIONREAD:
  264. mutex_lock(&group->notification_mutex);
  265. list_for_each_entry(holder, &group->notification_list, event_list) {
  266. event = holder->event;
  267. send_len += sizeof(struct inotify_event);
  268. if (event->name_len)
  269. send_len += roundup(event->name_len + 1,
  270. sizeof(struct inotify_event));
  271. }
  272. mutex_unlock(&group->notification_mutex);
  273. ret = put_user(send_len, (int __user *) p);
  274. break;
  275. }
  276. return ret;
  277. }
  278. static const struct file_operations inotify_fops = {
  279. .poll = inotify_poll,
  280. .read = inotify_read,
  281. .fasync = inotify_fasync,
  282. .release = inotify_release,
  283. .unlocked_ioctl = inotify_ioctl,
  284. .compat_ioctl = inotify_ioctl,
  285. .llseek = noop_llseek,
  286. };
  287. /*
  288. * find_inode - resolve a user-given path to a specific inode
  289. */
  290. static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
  291. {
  292. int error;
  293. error = user_path_at(AT_FDCWD, dirname, flags, path);
  294. if (error)
  295. return error;
  296. /* you can only watch an inode if you have read permissions on it */
  297. error = inode_permission2(path->mnt, path->dentry->d_inode, MAY_READ);
  298. if (error)
  299. path_put(path);
  300. return error;
  301. }
  302. static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
  303. int *last_wd,
  304. struct inotify_inode_mark *i_mark)
  305. {
  306. int ret;
  307. do {
  308. if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
  309. return -ENOMEM;
  310. spin_lock(idr_lock);
  311. ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
  312. &i_mark->wd);
  313. /* we added the mark to the idr, take a reference */
  314. if (!ret) {
  315. *last_wd = i_mark->wd;
  316. fsnotify_get_mark(&i_mark->fsn_mark);
  317. }
  318. spin_unlock(idr_lock);
  319. } while (ret == -EAGAIN);
  320. return ret;
  321. }
  322. static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
  323. int wd)
  324. {
  325. struct idr *idr = &group->inotify_data.idr;
  326. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  327. struct inotify_inode_mark *i_mark;
  328. assert_spin_locked(idr_lock);
  329. i_mark = idr_find(idr, wd);
  330. if (i_mark) {
  331. struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
  332. fsnotify_get_mark(fsn_mark);
  333. /* One ref for being in the idr, one ref we just took */
  334. BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
  335. }
  336. return i_mark;
  337. }
  338. static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
  339. int wd)
  340. {
  341. struct inotify_inode_mark *i_mark;
  342. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  343. spin_lock(idr_lock);
  344. i_mark = inotify_idr_find_locked(group, wd);
  345. spin_unlock(idr_lock);
  346. return i_mark;
  347. }
  348. static void do_inotify_remove_from_idr(struct fsnotify_group *group,
  349. struct inotify_inode_mark *i_mark)
  350. {
  351. struct idr *idr = &group->inotify_data.idr;
  352. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  353. int wd = i_mark->wd;
  354. assert_spin_locked(idr_lock);
  355. idr_remove(idr, wd);
  356. /* removed from the idr, drop that ref */
  357. fsnotify_put_mark(&i_mark->fsn_mark);
  358. }
  359. /*
  360. * Remove the mark from the idr (if present) and drop the reference
  361. * on the mark because it was in the idr.
  362. */
  363. static void inotify_remove_from_idr(struct fsnotify_group *group,
  364. struct inotify_inode_mark *i_mark)
  365. {
  366. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  367. struct inotify_inode_mark *found_i_mark = NULL;
  368. int wd;
  369. spin_lock(idr_lock);
  370. wd = i_mark->wd;
  371. /*
  372. * does this i_mark think it is in the idr? we shouldn't get called
  373. * if it wasn't....
  374. */
  375. if (wd == -1) {
  376. WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
  377. " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
  378. i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
  379. goto out;
  380. }
  381. /* Lets look in the idr to see if we find it */
  382. found_i_mark = inotify_idr_find_locked(group, wd);
  383. if (unlikely(!found_i_mark)) {
  384. WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
  385. " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
  386. i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
  387. goto out;
  388. }
  389. /*
  390. * We found an mark in the idr at the right wd, but it's
  391. * not the mark we were told to remove. eparis seriously
  392. * fucked up somewhere.
  393. */
  394. if (unlikely(found_i_mark != i_mark)) {
  395. WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
  396. "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
  397. "found_i_mark->group=%p found_i_mark->inode=%p\n",
  398. __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
  399. i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
  400. found_i_mark->fsn_mark.group,
  401. found_i_mark->fsn_mark.i.inode);
  402. goto out;
  403. }
  404. /*
  405. * One ref for being in the idr
  406. * one ref held by the caller trying to kill us
  407. * one ref grabbed by inotify_idr_find
  408. */
  409. if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
  410. printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
  411. " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
  412. i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
  413. /* we can't really recover with bad ref cnting.. */
  414. BUG();
  415. }
  416. do_inotify_remove_from_idr(group, i_mark);
  417. out:
  418. /* match the ref taken by inotify_idr_find_locked() */
  419. if (found_i_mark)
  420. fsnotify_put_mark(&found_i_mark->fsn_mark);
  421. i_mark->wd = -1;
  422. spin_unlock(idr_lock);
  423. }
  424. /*
  425. * Send IN_IGNORED for this wd, remove this wd from the idr.
  426. */
  427. void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
  428. struct fsnotify_group *group)
  429. {
  430. struct inotify_inode_mark *i_mark;
  431. struct fsnotify_event *ignored_event, *notify_event;
  432. struct inotify_event_private_data *event_priv;
  433. struct fsnotify_event_private_data *fsn_event_priv;
  434. int ret;
  435. ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
  436. FSNOTIFY_EVENT_NONE, NULL, 0,
  437. GFP_NOFS);
  438. if (!ignored_event)
  439. return;
  440. i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
  441. event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
  442. if (unlikely(!event_priv))
  443. goto skip_send_ignore;
  444. fsn_event_priv = &event_priv->fsnotify_event_priv_data;
  445. fsn_event_priv->group = group;
  446. event_priv->wd = i_mark->wd;
  447. notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
  448. if (notify_event) {
  449. if (IS_ERR(notify_event))
  450. ret = PTR_ERR(notify_event);
  451. else
  452. fsnotify_put_event(notify_event);
  453. inotify_free_event_priv(fsn_event_priv);
  454. }
  455. skip_send_ignore:
  456. /* matches the reference taken when the event was created */
  457. fsnotify_put_event(ignored_event);
  458. /* remove this mark from the idr */
  459. inotify_remove_from_idr(group, i_mark);
  460. atomic_dec(&group->inotify_data.user->inotify_watches);
  461. }
  462. /* ding dong the mark is dead */
  463. static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
  464. {
  465. struct inotify_inode_mark *i_mark;
  466. i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
  467. kmem_cache_free(inotify_inode_mark_cachep, i_mark);
  468. }
  469. static int inotify_update_existing_watch(struct fsnotify_group *group,
  470. struct inode *inode,
  471. u32 arg)
  472. {
  473. struct fsnotify_mark *fsn_mark;
  474. struct inotify_inode_mark *i_mark;
  475. __u32 old_mask, new_mask;
  476. __u32 mask;
  477. int add = (arg & IN_MASK_ADD);
  478. int ret;
  479. mask = inotify_arg_to_mask(arg);
  480. fsn_mark = fsnotify_find_inode_mark(group, inode);
  481. if (!fsn_mark)
  482. return -ENOENT;
  483. i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
  484. spin_lock(&fsn_mark->lock);
  485. old_mask = fsn_mark->mask;
  486. if (add)
  487. fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
  488. else
  489. fsnotify_set_mark_mask_locked(fsn_mark, mask);
  490. new_mask = fsn_mark->mask;
  491. spin_unlock(&fsn_mark->lock);
  492. if (old_mask != new_mask) {
  493. /* more bits in old than in new? */
  494. int dropped = (old_mask & ~new_mask);
  495. /* more bits in this fsn_mark than the inode's mask? */
  496. int do_inode = (new_mask & ~inode->i_fsnotify_mask);
  497. /* update the inode with this new fsn_mark */
  498. if (dropped || do_inode)
  499. fsnotify_recalc_inode_mask(inode);
  500. }
  501. /* return the wd */
  502. ret = i_mark->wd;
  503. /* match the get from fsnotify_find_mark() */
  504. fsnotify_put_mark(fsn_mark);
  505. return ret;
  506. }
  507. static int inotify_new_watch(struct fsnotify_group *group,
  508. struct inode *inode,
  509. u32 arg)
  510. {
  511. struct inotify_inode_mark *tmp_i_mark;
  512. __u32 mask;
  513. int ret;
  514. struct idr *idr = &group->inotify_data.idr;
  515. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  516. mask = inotify_arg_to_mask(arg);
  517. tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
  518. if (unlikely(!tmp_i_mark))
  519. return -ENOMEM;
  520. fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
  521. tmp_i_mark->fsn_mark.mask = mask;
  522. tmp_i_mark->wd = -1;
  523. ret = -ENOSPC;
  524. if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
  525. goto out_err;
  526. ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
  527. tmp_i_mark);
  528. if (ret)
  529. goto out_err;
  530. /* we are on the idr, now get on the inode */
  531. ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
  532. if (ret) {
  533. /* we failed to get on the inode, get off the idr */
  534. inotify_remove_from_idr(group, tmp_i_mark);
  535. goto out_err;
  536. }
  537. /* increment the number of watches the user has */
  538. atomic_inc(&group->inotify_data.user->inotify_watches);
  539. /* return the watch descriptor for this new mark */
  540. ret = tmp_i_mark->wd;
  541. out_err:
  542. /* match the ref from fsnotify_init_mark() */
  543. fsnotify_put_mark(&tmp_i_mark->fsn_mark);
  544. return ret;
  545. }
  546. static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
  547. {
  548. int ret = 0;
  549. retry:
  550. /* try to update and existing watch with the new arg */
  551. ret = inotify_update_existing_watch(group, inode, arg);
  552. /* no mark present, try to add a new one */
  553. if (ret == -ENOENT)
  554. ret = inotify_new_watch(group, inode, arg);
  555. /*
  556. * inotify_new_watch could race with another thread which did an
  557. * inotify_new_watch between the update_existing and the add watch
  558. * here, go back and try to update an existing mark again.
  559. */
  560. if (ret == -EEXIST)
  561. goto retry;
  562. return ret;
  563. }
  564. static struct fsnotify_group *inotify_new_group(unsigned int max_events)
  565. {
  566. struct fsnotify_group *group;
  567. group = fsnotify_alloc_group(&inotify_fsnotify_ops);
  568. if (IS_ERR(group))
  569. return group;
  570. group->max_events = max_events;
  571. spin_lock_init(&group->inotify_data.idr_lock);
  572. idr_init(&group->inotify_data.idr);
  573. group->inotify_data.last_wd = 0;
  574. group->inotify_data.fa = NULL;
  575. group->inotify_data.user = get_current_user();
  576. if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
  577. inotify_max_user_instances) {
  578. fsnotify_put_group(group);
  579. return ERR_PTR(-EMFILE);
  580. }
  581. return group;
  582. }
  583. /* inotify syscalls */
  584. SYSCALL_DEFINE1(inotify_init1, int, flags)
  585. {
  586. struct fsnotify_group *group;
  587. int ret;
  588. /* Check the IN_* constants for consistency. */
  589. BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
  590. BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
  591. if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
  592. return -EINVAL;
  593. /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
  594. group = inotify_new_group(inotify_max_queued_events);
  595. if (IS_ERR(group))
  596. return PTR_ERR(group);
  597. ret = anon_inode_getfd("inotify", &inotify_fops, group,
  598. O_RDONLY | flags);
  599. if (ret < 0)
  600. fsnotify_put_group(group);
  601. return ret;
  602. }
  603. SYSCALL_DEFINE0(inotify_init)
  604. {
  605. return sys_inotify_init1(0);
  606. }
  607. SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
  608. u32, mask)
  609. {
  610. struct fsnotify_group *group;
  611. struct inode *inode;
  612. struct path path;
  613. struct path alteredpath;
  614. struct path *canonical_path = &path;
  615. struct file *filp;
  616. int ret, fput_needed;
  617. unsigned flags = 0;
  618. /* don't allow invalid bits: we don't want flags set */
  619. if (unlikely(!(mask & ALL_INOTIFY_BITS)))
  620. return -EINVAL;
  621. filp = fget_light(fd, &fput_needed);
  622. if (unlikely(!filp))
  623. return -EBADF;
  624. /* verify that this is indeed an inotify instance */
  625. if (unlikely(filp->f_op != &inotify_fops)) {
  626. ret = -EINVAL;
  627. goto fput_and_out;
  628. }
  629. if (!(mask & IN_DONT_FOLLOW))
  630. flags |= LOOKUP_FOLLOW;
  631. if (mask & IN_ONLYDIR)
  632. flags |= LOOKUP_DIRECTORY;
  633. ret = inotify_find_inode(pathname, &path, flags);
  634. if (ret)
  635. goto fput_and_out;
  636. /* support stacked filesystems */
  637. if(path.dentry && path.dentry->d_op) {
  638. if (path.dentry->d_op->d_canonical_path) {
  639. path.dentry->d_op->d_canonical_path(&path, &alteredpath);
  640. canonical_path = &alteredpath;
  641. path_put(&path);
  642. }
  643. }
  644. /* inode held in place by reference to path; group by fget on fd */
  645. inode = canonical_path->dentry->d_inode;
  646. group = filp->private_data;
  647. /* create/update an inode mark */
  648. ret = inotify_update_watch(group, inode, mask);
  649. path_put(canonical_path);
  650. fput_and_out:
  651. fput_light(filp, fput_needed);
  652. return ret;
  653. }
  654. SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
  655. {
  656. struct fsnotify_group *group;
  657. struct inotify_inode_mark *i_mark;
  658. struct file *filp;
  659. int ret = 0, fput_needed;
  660. filp = fget_light(fd, &fput_needed);
  661. if (unlikely(!filp))
  662. return -EBADF;
  663. /* verify that this is indeed an inotify instance */
  664. ret = -EINVAL;
  665. if (unlikely(filp->f_op != &inotify_fops))
  666. goto out;
  667. group = filp->private_data;
  668. ret = -EINVAL;
  669. i_mark = inotify_idr_find(group, wd);
  670. if (unlikely(!i_mark))
  671. goto out;
  672. ret = 0;
  673. fsnotify_destroy_mark(&i_mark->fsn_mark);
  674. /* match ref taken by inotify_idr_find */
  675. fsnotify_put_mark(&i_mark->fsn_mark);
  676. out:
  677. fput_light(filp, fput_needed);
  678. return ret;
  679. }
  680. /*
  681. * inotify_user_setup - Our initialization function. Note that we cannot return
  682. * error because we have compiled-in VFS hooks. So an (unlikely) failure here
  683. * must result in panic().
  684. */
  685. static int __init inotify_user_setup(void)
  686. {
  687. BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
  688. BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
  689. BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
  690. BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
  691. BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
  692. BUILD_BUG_ON(IN_OPEN != FS_OPEN);
  693. BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
  694. BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
  695. BUILD_BUG_ON(IN_CREATE != FS_CREATE);
  696. BUILD_BUG_ON(IN_DELETE != FS_DELETE);
  697. BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
  698. BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
  699. BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
  700. BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
  701. BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
  702. BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
  703. BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
  704. BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
  705. BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
  706. inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
  707. event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
  708. inotify_max_queued_events = 16384;
  709. inotify_max_user_instances = 128;
  710. inotify_max_user_watches = 8192;
  711. return 0;
  712. }
  713. module_init(inotify_user_setup);