|
@@ -963,6 +963,43 @@ static void kill_suid(struct dentry *dentry)
|
|
mutex_unlock(&dentry->d_inode->i_mutex);
|
|
mutex_unlock(&dentry->d_inode->i_mutex);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Gathered writes: If another process is currently writing to the file,
|
|
|
|
+ * there's a high chance this is another nfsd (triggered by a bulk write
|
|
|
|
+ * from a client's biod). Rather than syncing the file with each write
|
|
|
|
+ * request, we sleep for 10 msec.
|
|
|
|
+ *
|
|
|
|
+ * I don't know if this roughly approximates C. Juszak's idea of
|
|
|
|
+ * gathered writes, but it's a nice and simple solution (IMHO), and it
|
|
|
|
+ * seems to work:-)
|
|
|
|
+ *
|
|
|
|
+ * Note: we do this only in the NFSv2 case, since v3 and higher have a
|
|
|
|
+ * better tool (separate unstable writes and commits) for solving this
|
|
|
|
+ * problem.
|
|
|
|
+ */
|
|
|
|
+static int wait_for_concurrent_writes(struct file *file)
|
|
|
|
+{
|
|
|
|
+ struct inode *inode = file->f_path.dentry->d_inode;
|
|
|
|
+ static ino_t last_ino;
|
|
|
|
+ static dev_t last_dev;
|
|
|
|
+ int err = 0;
|
|
|
|
+
|
|
|
|
+ if (atomic_read(&inode->i_writecount) > 1
|
|
|
|
+ || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
|
|
|
|
+ dprintk("nfsd: write defer %d\n", task_pid_nr(current));
|
|
|
|
+ msleep(10);
|
|
|
|
+ dprintk("nfsd: write resume %d\n", task_pid_nr(current));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (inode->i_state & I_DIRTY) {
|
|
|
|
+ dprintk("nfsd: write sync %d\n", task_pid_nr(current));
|
|
|
|
+ err = nfsd_sync(file);
|
|
|
|
+ }
|
|
|
|
+ last_ino = inode->i_ino;
|
|
|
|
+ last_dev = inode->i_sb->s_dev;
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
static __be32
|
|
static __be32
|
|
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
|
|
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
|
|
loff_t offset, struct kvec *vec, int vlen,
|
|
loff_t offset, struct kvec *vec, int vlen,
|
|
@@ -1026,36 +1063,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
|
|
if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
|
|
if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
|
|
kill_suid(dentry);
|
|
kill_suid(dentry);
|
|
|
|
|
|
- if (host_err >= 0 && stable && use_wgather) {
|
|
|
|
- static ino_t last_ino;
|
|
|
|
- static dev_t last_dev;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Gathered writes: If another process is currently
|
|
|
|
- * writing to the file, there's a high chance
|
|
|
|
- * this is another nfsd (triggered by a bulk write
|
|
|
|
- * from a client's biod). Rather than syncing the
|
|
|
|
- * file with each write request, we sleep for 10 msec.
|
|
|
|
- *
|
|
|
|
- * I don't know if this roughly approximates
|
|
|
|
- * C. Juszak's idea of gathered writes, but it's a
|
|
|
|
- * nice and simple solution (IMHO), and it seems to
|
|
|
|
- * work:-)
|
|
|
|
- */
|
|
|
|
- if (atomic_read(&inode->i_writecount) > 1
|
|
|
|
- || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
|
|
|
|
- dprintk("nfsd: write defer %d\n", task_pid_nr(current));
|
|
|
|
- msleep(10);
|
|
|
|
- dprintk("nfsd: write resume %d\n", task_pid_nr(current));
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (inode->i_state & I_DIRTY) {
|
|
|
|
- dprintk("nfsd: write sync %d\n", task_pid_nr(current));
|
|
|
|
- host_err=nfsd_sync(file);
|
|
|
|
- }
|
|
|
|
- last_ino = inode->i_ino;
|
|
|
|
- last_dev = inode->i_sb->s_dev;
|
|
|
|
- }
|
|
|
|
|
|
+ if (host_err >= 0 && stable && use_wgather)
|
|
|
|
+ host_err = wait_for_concurrent_writes(file);
|
|
|
|
|
|
dprintk("nfsd: write complete host_err=%d\n", host_err);
|
|
dprintk("nfsd: write complete host_err=%d\n", host_err);
|
|
if (host_err >= 0)
|
|
if (host_err >= 0)
|