inode_mark.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /*
  2. * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/init.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/mutex.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/atomic.h>
  25. #include <linux/fsnotify_backend.h>
  26. #include "fsnotify.h"
  27. #include "../internal.h"
  28. /*
  29. * Recalculate the mask of events relevant to a given inode locked.
  30. */
  31. static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
  32. {
  33. struct fsnotify_mark *mark;
  34. struct hlist_node *pos;
  35. __u32 new_mask = 0;
  36. assert_spin_locked(&inode->i_lock);
  37. hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
  38. new_mask |= mark->mask;
  39. inode->i_fsnotify_mask = new_mask;
  40. }
  41. /*
  42. * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
  43. * any notifier is interested in hearing for this inode.
  44. */
  45. void fsnotify_recalc_inode_mask(struct inode *inode)
  46. {
  47. spin_lock(&inode->i_lock);
  48. fsnotify_recalc_inode_mask_locked(inode);
  49. spin_unlock(&inode->i_lock);
  50. __fsnotify_update_child_dentry_flags(inode);
  51. }
  52. void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
  53. {
  54. struct inode *inode = mark->i.inode;
  55. BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
  56. assert_spin_locked(&mark->lock);
  57. spin_lock(&inode->i_lock);
  58. hlist_del_init_rcu(&mark->i.i_list);
  59. mark->i.inode = NULL;
  60. /*
  61. * this mark is now off the inode->i_fsnotify_marks list and we
  62. * hold the inode->i_lock, so this is the perfect time to update the
  63. * inode->i_fsnotify_mask
  64. */
  65. fsnotify_recalc_inode_mask_locked(inode);
  66. spin_unlock(&inode->i_lock);
  67. }
  68. /*
  69. * Given an inode, destroy all of the marks associated with that inode.
  70. */
  71. void fsnotify_clear_marks_by_inode(struct inode *inode)
  72. {
  73. struct fsnotify_mark *mark, *lmark;
  74. struct hlist_node *pos, *n;
  75. LIST_HEAD(free_list);
  76. spin_lock(&inode->i_lock);
  77. hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
  78. list_add(&mark->i.free_i_list, &free_list);
  79. hlist_del_init_rcu(&mark->i.i_list);
  80. fsnotify_get_mark(mark);
  81. }
  82. spin_unlock(&inode->i_lock);
  83. list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
  84. struct fsnotify_group *group;
  85. spin_lock(&mark->lock);
  86. fsnotify_get_group(mark->group);
  87. group = mark->group;
  88. spin_unlock(&mark->lock);
  89. fsnotify_destroy_mark(mark, group);
  90. fsnotify_put_mark(mark);
  91. fsnotify_put_group(group);
  92. }
  93. }
  94. /*
  95. * Given a group clear all of the inode marks associated with that group.
  96. */
  97. void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
  98. {
  99. fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
  100. }
  101. /*
  102. * given a group and inode, find the mark associated with that combination.
  103. * if found take a reference to that mark and return it, else return NULL
  104. */
  105. static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
  106. struct fsnotify_group *group,
  107. struct inode *inode)
  108. {
  109. struct fsnotify_mark *mark;
  110. struct hlist_node *pos;
  111. assert_spin_locked(&inode->i_lock);
  112. hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
  113. if (mark->group == group) {
  114. fsnotify_get_mark(mark);
  115. return mark;
  116. }
  117. }
  118. return NULL;
  119. }
  120. /*
  121. * given a group and inode, find the mark associated with that combination.
  122. * if found take a reference to that mark and return it, else return NULL
  123. */
  124. struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
  125. struct inode *inode)
  126. {
  127. struct fsnotify_mark *mark;
  128. spin_lock(&inode->i_lock);
  129. mark = fsnotify_find_inode_mark_locked(group, inode);
  130. spin_unlock(&inode->i_lock);
  131. return mark;
  132. }
  133. /*
  134. * If we are setting a mark mask on an inode mark we should pin the inode
  135. * in memory.
  136. */
  137. void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
  138. __u32 mask)
  139. {
  140. struct inode *inode;
  141. assert_spin_locked(&mark->lock);
  142. if (mask &&
  143. mark->i.inode &&
  144. !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
  145. mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
  146. inode = igrab(mark->i.inode);
  147. /*
  148. * we shouldn't be able to get here if the inode wasn't
  149. * already safely held in memory. But bug in case it
  150. * ever is wrong.
  151. */
  152. BUG_ON(!inode);
  153. }
  154. }
  155. /*
  156. * Attach an initialized mark to a given inode.
  157. * These marks may be used for the fsnotify backend to determine which
  158. * event types should be delivered to which group and for which inodes. These
  159. * marks are ordered according to priority, highest number first, and then by
  160. * the group's location in memory.
  161. */
  162. int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
  163. struct fsnotify_group *group, struct inode *inode,
  164. int allow_dups)
  165. {
  166. struct fsnotify_mark *lmark;
  167. struct hlist_node *node, *last = NULL;
  168. int ret = 0;
  169. mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
  170. BUG_ON(!mutex_is_locked(&group->mark_mutex));
  171. assert_spin_locked(&mark->lock);
  172. spin_lock(&inode->i_lock);
  173. mark->i.inode = inode;
  174. /* is mark the first mark? */
  175. if (hlist_empty(&inode->i_fsnotify_marks)) {
  176. hlist_add_head_rcu(&mark->i.i_list, &inode->i_fsnotify_marks);
  177. goto out;
  178. }
  179. /* should mark be in the middle of the current list? */
  180. hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) {
  181. last = node;
  182. if ((lmark->group == group) && !allow_dups) {
  183. ret = -EEXIST;
  184. goto out;
  185. }
  186. if (mark->group->priority < lmark->group->priority)
  187. continue;
  188. if ((mark->group->priority == lmark->group->priority) &&
  189. (mark->group < lmark->group))
  190. continue;
  191. hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
  192. goto out;
  193. }
  194. BUG_ON(last == NULL);
  195. /* mark should be the last entry. last is the current last entry */
  196. hlist_add_after_rcu(last, &mark->i.i_list);
  197. out:
  198. fsnotify_recalc_inode_mask_locked(inode);
  199. spin_unlock(&inode->i_lock);
  200. return ret;
  201. }
  202. /**
  203. * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
  204. * @list: list of inodes being unmounted (sb->s_inodes)
  205. *
  206. * Called during unmount with no locks held, so needs to be safe against
  207. * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
  208. */
  209. void fsnotify_unmount_inodes(struct list_head *list)
  210. {
  211. struct inode *inode, *next_i, *need_iput = NULL;
  212. spin_lock(&inode_sb_list_lock);
  213. list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
  214. struct inode *need_iput_tmp;
  215. /*
  216. * We cannot __iget() an inode in state I_FREEING,
  217. * I_WILL_FREE, or I_NEW which is fine because by that point
  218. * the inode cannot have any associated watches.
  219. */
  220. spin_lock(&inode->i_lock);
  221. if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
  222. spin_unlock(&inode->i_lock);
  223. continue;
  224. }
  225. /*
  226. * If i_count is zero, the inode cannot have any watches and
  227. * doing an __iget/iput with MS_ACTIVE clear would actually
  228. * evict all inodes with zero i_count from icache which is
  229. * unnecessarily violent and may in fact be illegal to do.
  230. */
  231. if (!atomic_read(&inode->i_count)) {
  232. spin_unlock(&inode->i_lock);
  233. continue;
  234. }
  235. need_iput_tmp = need_iput;
  236. need_iput = NULL;
  237. /* In case fsnotify_inode_delete() drops a reference. */
  238. if (inode != need_iput_tmp)
  239. __iget(inode);
  240. else
  241. need_iput_tmp = NULL;
  242. spin_unlock(&inode->i_lock);
  243. /* In case the dropping of a reference would nuke next_i. */
  244. if ((&next_i->i_sb_list != list) &&
  245. atomic_read(&next_i->i_count)) {
  246. spin_lock(&next_i->i_lock);
  247. if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
  248. __iget(next_i);
  249. need_iput = next_i;
  250. }
  251. spin_unlock(&next_i->i_lock);
  252. }
  253. /*
  254. * We can safely drop inode_sb_list_lock here because we hold
  255. * references on both inode and next_i. Also no new inodes
  256. * will be added since the umount has begun.
  257. */
  258. spin_unlock(&inode_sb_list_lock);
  259. if (need_iput_tmp)
  260. iput(need_iput_tmp);
  261. /* for each watch, send FS_UNMOUNT and then remove it */
  262. fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
  263. fsnotify_inode_delete(inode);
  264. iput(inode);
  265. spin_lock(&inode_sb_list_lock);
  266. }
  267. spin_unlock(&inode_sb_list_lock);
  268. }