inode_mark.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/init.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/mutex.h>
  23. #include <linux/spinlock.h>
  24. #include <asm/atomic.h>
  25. #include <linux/fsnotify_backend.h>
  26. #include "fsnotify.h"
  27. #include "../internal.h"
  28. /*
  29. * Recalculate the mask of events relevant to a given inode locked.
  30. */
  31. static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
  32. {
  33. struct fsnotify_mark *mark;
  34. struct hlist_node *pos;
  35. __u32 new_mask = 0;
  36. assert_spin_locked(&inode->i_lock);
  37. hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
  38. new_mask |= mark->mask;
  39. inode->i_fsnotify_mask = new_mask;
  40. }
  41. /*
  42. * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
  43. * any notifier is interested in hearing for this inode.
  44. */
  45. void fsnotify_recalc_inode_mask(struct inode *inode)
  46. {
  47. spin_lock(&inode->i_lock);
  48. fsnotify_recalc_inode_mask_locked(inode);
  49. spin_unlock(&inode->i_lock);
  50. __fsnotify_update_child_dentry_flags(inode);
  51. }
  52. void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
  53. {
  54. struct inode *inode = mark->i.inode;
  55. assert_spin_locked(&mark->lock);
  56. assert_spin_locked(&mark->group->mark_lock);
  57. spin_lock(&inode->i_lock);
  58. hlist_del_init_rcu(&mark->i.i_list);
  59. mark->i.inode = NULL;
  60. /*
  61. * this mark is now off the inode->i_fsnotify_marks list and we
  62. * hold the inode->i_lock, so this is the perfect time to update the
  63. * inode->i_fsnotify_mask
  64. */
  65. fsnotify_recalc_inode_mask_locked(inode);
  66. spin_unlock(&inode->i_lock);
  67. }
  68. /*
  69. * Given an inode, destroy all of the marks associated with that inode.
  70. */
  71. void fsnotify_clear_marks_by_inode(struct inode *inode)
  72. {
  73. struct fsnotify_mark *mark, *lmark;
  74. struct hlist_node *pos, *n;
  75. LIST_HEAD(free_list);
  76. spin_lock(&inode->i_lock);
  77. hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
  78. list_add(&mark->i.free_i_list, &free_list);
  79. hlist_del_init_rcu(&mark->i.i_list);
  80. fsnotify_get_mark(mark);
  81. }
  82. spin_unlock(&inode->i_lock);
  83. list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
  84. fsnotify_destroy_mark(mark);
  85. fsnotify_put_mark(mark);
  86. }
  87. }
  88. /*
  89. * Given a group clear all of the inode marks associated with that group.
  90. */
  91. void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
  92. {
  93. fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
  94. }
  95. /*
  96. * given a group and inode, find the mark associated with that combination.
  97. * if found take a reference to that mark and return it, else return NULL
  98. */
  99. struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group,
  100. struct inode *inode)
  101. {
  102. struct fsnotify_mark *mark;
  103. struct hlist_node *pos;
  104. assert_spin_locked(&inode->i_lock);
  105. hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
  106. if (mark->group == group) {
  107. fsnotify_get_mark(mark);
  108. return mark;
  109. }
  110. }
  111. return NULL;
  112. }
  113. /*
  114. * given a group and inode, find the mark associated with that combination.
  115. * if found take a reference to that mark and return it, else return NULL
  116. */
  117. struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
  118. struct inode *inode)
  119. {
  120. struct fsnotify_mark *mark;
  121. spin_lock(&inode->i_lock);
  122. mark = fsnotify_find_inode_mark_locked(group, inode);
  123. spin_unlock(&inode->i_lock);
  124. return mark;
  125. }
  126. /*
  127. * If we are setting a mark mask on an inode mark we should pin the inode
  128. * in memory.
  129. */
  130. void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
  131. __u32 mask)
  132. {
  133. struct inode *inode;
  134. assert_spin_locked(&mark->lock);
  135. if (mask &&
  136. mark->i.inode &&
  137. !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
  138. mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
  139. inode = igrab(mark->i.inode);
  140. /*
  141. * we shouldn't be able to get here if the inode wasn't
  142. * already safely held in memory. But bug in case it
  143. * ever is wrong.
  144. */
  145. BUG_ON(!inode);
  146. }
  147. }
  148. /*
  149. * Attach an initialized mark to a given inode.
  150. * These marks may be used for the fsnotify backend to determine which
  151. * event types should be delivered to which group and for which inodes. These
  152. * marks are ordered according to priority, highest number first, and then by
  153. * the group's location in memory.
  154. */
  155. int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
  156. struct fsnotify_group *group, struct inode *inode,
  157. int allow_dups)
  158. {
  159. struct fsnotify_mark *lmark;
  160. struct hlist_node *node, *last = NULL;
  161. int ret = 0;
  162. mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
  163. assert_spin_locked(&mark->lock);
  164. assert_spin_locked(&group->mark_lock);
  165. spin_lock(&inode->i_lock);
  166. mark->i.inode = inode;
  167. /* is mark the first mark? */
  168. if (hlist_empty(&inode->i_fsnotify_marks)) {
  169. hlist_add_head_rcu(&mark->i.i_list, &inode->i_fsnotify_marks);
  170. goto out;
  171. }
  172. /* should mark be in the middle of the current list? */
  173. hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) {
  174. last = node;
  175. if ((lmark->group == group) && !allow_dups) {
  176. ret = -EEXIST;
  177. goto out;
  178. }
  179. if (mark->group->priority < lmark->group->priority)
  180. continue;
  181. if ((mark->group->priority == lmark->group->priority) &&
  182. (mark->group < lmark->group))
  183. continue;
  184. hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
  185. goto out;
  186. }
  187. BUG_ON(last == NULL);
  188. /* mark should be the last entry. last is the current last entry */
  189. hlist_add_after_rcu(last, &mark->i.i_list);
  190. out:
  191. fsnotify_recalc_inode_mask_locked(inode);
  192. spin_unlock(&inode->i_lock);
  193. return ret;
  194. }
  195. /**
  196. * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
  197. * @list: list of inodes being unmounted (sb->s_inodes)
  198. *
  199. * Called during unmount with no locks held, so needs to be safe against
  200. * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
  201. */
  202. void fsnotify_unmount_inodes(struct list_head *list)
  203. {
  204. struct inode *inode, *next_i, *need_iput = NULL;
  205. spin_lock(&inode_sb_list_lock);
  206. list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
  207. struct inode *need_iput_tmp;
  208. /*
  209. * We cannot __iget() an inode in state I_FREEING,
  210. * I_WILL_FREE, or I_NEW which is fine because by that point
  211. * the inode cannot have any associated watches.
  212. */
  213. spin_lock(&inode->i_lock);
  214. if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
  215. spin_unlock(&inode->i_lock);
  216. continue;
  217. }
  218. /*
  219. * If i_count is zero, the inode cannot have any watches and
  220. * doing an __iget/iput with MS_ACTIVE clear would actually
  221. * evict all inodes with zero i_count from icache which is
  222. * unnecessarily violent and may in fact be illegal to do.
  223. */
  224. if (!atomic_read(&inode->i_count)) {
  225. spin_unlock(&inode->i_lock);
  226. continue;
  227. }
  228. need_iput_tmp = need_iput;
  229. need_iput = NULL;
  230. /* In case fsnotify_inode_delete() drops a reference. */
  231. if (inode != need_iput_tmp)
  232. __iget(inode);
  233. else
  234. need_iput_tmp = NULL;
  235. spin_unlock(&inode->i_lock);
  236. /* In case the dropping of a reference would nuke next_i. */
  237. if ((&next_i->i_sb_list != list) &&
  238. atomic_read(&next_i->i_count)) {
  239. spin_lock(&next_i->i_lock);
  240. if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
  241. __iget(next_i);
  242. need_iput = next_i;
  243. }
  244. spin_unlock(&next_i->i_lock);
  245. }
  246. /*
  247. * We can safely drop inode_sb_list_lock here because we hold
  248. * references on both inode and next_i. Also no new inodes
  249. * will be added since the umount has begun.
  250. */
  251. spin_unlock(&inode_sb_list_lock);
  252. if (need_iput_tmp)
  253. iput(need_iput_tmp);
  254. /* for each watch, send FS_UNMOUNT and then remove it */
  255. fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
  256. fsnotify_inode_delete(inode);
  257. iput(inode);
  258. spin_lock(&inode_sb_list_lock);
  259. }
  260. spin_unlock(&inode_sb_list_lock);
  261. }