|
@@ -688,23 +688,27 @@ static int dqinit_needed(struct inode *inode, int type)
|
|
/* This routine is guarded by dqonoff_mutex mutex */
|
|
/* This routine is guarded by dqonoff_mutex mutex */
|
|
static void add_dquot_ref(struct super_block *sb, int type)
|
|
static void add_dquot_ref(struct super_block *sb, int type)
|
|
{
|
|
{
|
|
- struct list_head *p;
|
|
|
|
|
|
+ struct inode *inode;
|
|
|
|
|
|
restart:
|
|
restart:
|
|
- file_list_lock();
|
|
|
|
- list_for_each(p, &sb->s_files) {
|
|
|
|
- struct file *filp = list_entry(p, struct file, f_u.fu_list);
|
|
|
|
- struct inode *inode = filp->f_path.dentry->d_inode;
|
|
|
|
- if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) {
|
|
|
|
- struct dentry *dentry = dget(filp->f_path.dentry);
|
|
|
|
- file_list_unlock();
|
|
|
|
- sb->dq_op->initialize(inode, type);
|
|
|
|
- dput(dentry);
|
|
|
|
- /* As we may have blocked we had better restart... */
|
|
|
|
- goto restart;
|
|
|
|
- }
|
|
|
|
|
|
+ spin_lock(&inode_lock);
|
|
|
|
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
|
|
|
|
+ if (!atomic_read(&inode->i_writecount))
|
|
|
|
+ continue;
|
|
|
|
+ if (!dqinit_needed(inode, type))
|
|
|
|
+ continue;
|
|
|
|
+ if (inode->i_state & (I_FREEING|I_WILL_FREE))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ __iget(inode);
|
|
|
|
+ spin_unlock(&inode_lock);
|
|
|
|
+
|
|
|
|
+ sb->dq_op->initialize(inode, type);
|
|
|
|
+ iput(inode);
|
|
|
|
+ /* As we may have blocked we had better restart... */
|
|
|
|
+ goto restart;
|
|
}
|
|
}
|
|
- file_list_unlock();
|
|
|
|
|
|
+ spin_unlock(&inode_lock);
|
|
}
|
|
}
|
|
|
|
|
|
/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
|
|
/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
|