|
@@ -121,18 +121,18 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
|
|
|
* The caller had better be holding a reference to this mark so we don't actually
|
|
|
* do the final put under the mark->lock
|
|
|
*/
|
|
|
-void fsnotify_destroy_mark(struct fsnotify_mark *mark,
|
|
|
- struct fsnotify_group *group)
|
|
|
+void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
|
|
|
+ struct fsnotify_group *group)
|
|
|
{
|
|
|
struct inode *inode = NULL;
|
|
|
|
|
|
- mutex_lock(&group->mark_mutex);
|
|
|
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
|
|
|
+
|
|
|
spin_lock(&mark->lock);
|
|
|
|
|
|
/* something else already called this function on this mark */
|
|
|
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
|
|
|
spin_unlock(&mark->lock);
|
|
|
- mutex_unlock(&group->mark_mutex);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -149,6 +149,8 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
|
|
|
list_del_init(&mark->g_list);
|
|
|
|
|
|
spin_unlock(&mark->lock);
|
|
|
+
|
|
|
+ /* release lock temporarily */
|
|
|
mutex_unlock(&group->mark_mutex);
|
|
|
|
|
|
spin_lock(&destroy_lock);
|
|
@@ -184,6 +186,16 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
|
|
|
*/
|
|
|
|
|
|
atomic_dec(&group->num_marks);
|
|
|
+
|
|
|
+ mutex_lock(&group->mark_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+void fsnotify_destroy_mark(struct fsnotify_mark *mark,
|
|
|
+ struct fsnotify_group *group)
|
|
|
+{
|
|
|
+ mutex_lock(&group->mark_mutex);
|
|
|
+ fsnotify_destroy_mark_locked(mark, group);
|
|
|
+ mutex_unlock(&group->mark_mutex);
|
|
|
}
|
|
|
|
|
|
void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
|
|
@@ -208,14 +220,15 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
|
|
|
* These marks may be used for the fsnotify backend to determine which
|
|
|
* event types should be delivered to which group.
|
|
|
*/
|
|
|
-int fsnotify_add_mark(struct fsnotify_mark *mark,
|
|
|
- struct fsnotify_group *group, struct inode *inode,
|
|
|
- struct vfsmount *mnt, int allow_dups)
|
|
|
+int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
|
|
|
+ struct fsnotify_group *group, struct inode *inode,
|
|
|
+ struct vfsmount *mnt, int allow_dups)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
|
|
|
BUG_ON(inode && mnt);
|
|
|
BUG_ON(!inode && !mnt);
|
|
|
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
|
|
|
|
|
|
/*
|
|
|
* LOCKING ORDER!!!!
|
|
@@ -223,8 +236,6 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
|
|
|
* mark->lock
|
|
|
* inode->i_lock
|
|
|
*/
|
|
|
- mutex_lock(&group->mark_mutex);
|
|
|
-
|
|
|
spin_lock(&mark->lock);
|
|
|
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
|
|
|
|
|
@@ -250,8 +261,6 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
|
|
|
fsnotify_set_mark_mask_locked(mark, mark->mask);
|
|
|
spin_unlock(&mark->lock);
|
|
|
|
|
|
- mutex_unlock(&group->mark_mutex);
|
|
|
-
|
|
|
if (inode)
|
|
|
__fsnotify_update_child_dentry_flags(inode);
|
|
|
|
|
@@ -264,7 +273,6 @@ err:
|
|
|
atomic_dec(&group->num_marks);
|
|
|
|
|
|
spin_unlock(&mark->lock);
|
|
|
- mutex_unlock(&group->mark_mutex);
|
|
|
|
|
|
spin_lock(&destroy_lock);
|
|
|
list_add(&mark->destroy_list, &destroy_list);
|
|
@@ -274,6 +282,16 @@ err:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
|
|
|
+ struct inode *inode, struct vfsmount *mnt, int allow_dups)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ mutex_lock(&group->mark_mutex);
|
|
|
+ ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups);
|
|
|
+ mutex_unlock(&group->mark_mutex);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* clear any marks in a group in which mark->flags & flags is true
|
|
|
*/
|