Browse Source

ceph: cleanup async writeback, truncation, invalidate helpers

Grab inode ref in helper.  Make work functions static, with consistent
naming.

Signed-off-by: Sage Weil <sage@newdream.net>
Sage Weil 15 years ago
parent
commit
3c6f6b79a6
4 changed files with 65 additions and 43 deletions
  1. 1 2
      fs/ceph/addr.c
  2. 8 17
      fs/ceph/caps.c
  3. 52 9
      fs/ceph/inode.c
  4. 4 15
      fs/ceph/super.h

+ 1 - 2
fs/ceph/addr.c

@@ -947,8 +947,7 @@ retry_locked:
 			 */
 			snapc = ceph_get_snap_context((void *)page->private);
 			unlock_page(page);
-			if (ceph_queue_writeback(inode))
-				igrab(inode);
+			ceph_queue_writeback(inode);
 			wait_event_interruptible(ci->i_cap_wq,
 			       context_is_writeable_or_written(inode, snapc));
 			ceph_put_snap_context(snapc);

+ 8 - 17
fs/ceph/caps.c

@@ -1602,8 +1602,7 @@ ack:
 	spin_unlock(&inode->i_lock);
 
 	if (queue_invalidate)
-		if (ceph_queue_page_invalidation(inode))
-			igrab(inode);
+		ceph_queue_invalidate(inode);
 
 	if (session && drop_session_lock)
 		mutex_unlock(&session->s_mutex);
@@ -2178,7 +2177,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
 	int wake = 0;
 	int writeback = 0;
 	int revoked_rdcache = 0;
-	int invalidate_async = 0;
+	int queue_invalidate = 0;
 	int tried_invalidate = 0;
 	int ret;
 
@@ -2205,7 +2204,7 @@ restart:
 			/* there were locked pages.. invalidate later
 			   in a separate thread. */
 			if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
-				invalidate_async = 1;
+				queue_invalidate = 1;
 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
 			}
 		} else {
@@ -2319,21 +2318,15 @@ restart:
 	}
 
 	spin_unlock(&inode->i_lock);
-	if (writeback) {
+	if (writeback)
 		/*
 		 * queue inode for writeback: we can't actually call
 		 * filemap_write_and_wait, etc. from message handler
 		 * context.
 		 */
-		dout("queueing %p for writeback\n", inode);
-		if (ceph_queue_writeback(inode))
-			igrab(inode);
-	}
-	if (invalidate_async) {
-		dout("queueing %p for page invalidation\n", inode);
-		if (ceph_queue_page_invalidation(inode))
-			igrab(inode);
-	}
+		ceph_queue_writeback(inode);
+	if (queue_invalidate)
+		ceph_queue_invalidate(inode);
 	if (wake)
 		wake_up(&ci->i_cap_wq);
 	return reply;
@@ -2479,9 +2472,7 @@ static void handle_cap_trunc(struct inode *inode,
 	spin_unlock(&inode->i_lock);
 
 	if (queue_trunc)
-		if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
-			       &ci->i_vmtruncate_work))
-			igrab(inode);
+		ceph_queue_vmtruncate(inode);
 }
 
 /*

+ 52 - 9
fs/ceph/inode.c

@@ -28,7 +28,9 @@
 
 static const struct inode_operations ceph_symlink_iops;
 
-static void ceph_inode_invalidate_pages(struct work_struct *work);
+static void ceph_invalidate_work(struct work_struct *work);
+static void ceph_writeback_work(struct work_struct *work);
+static void ceph_vmtruncate_work(struct work_struct *work);
 
 /*
  * find or create an inode, given the ceph ino number
@@ -357,8 +359,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
 
-	INIT_WORK(&ci->i_wb_work, ceph_inode_writeback);
-	INIT_WORK(&ci->i_pg_inv_work, ceph_inode_invalidate_pages);
+	INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
+	INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
 
 	INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
 
@@ -675,9 +677,7 @@ no_change:
 
 	/* queue truncate if we saw i_size decrease */
 	if (queue_trunc)
-		if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
-			       &ci->i_vmtruncate_work))
-			igrab(inode);
+		ceph_queue_vmtruncate(inode);
 
 	/* populate frag tree */
 	/* FIXME: move me up, if/when version reflects fragtree changes */
@@ -1243,7 +1243,18 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
  * Write back inode data in a worker thread.  (This can't be done
  * in the message handler context.)
  */
-void ceph_inode_writeback(struct work_struct *work)
+void ceph_queue_writeback(struct inode *inode)
+{
+	if (queue_work(ceph_inode_to_client(inode)->wb_wq,
+		       &ceph_inode(inode)->i_wb_work)) {
+		dout("ceph_queue_invalidate %p\n", inode);
+		igrab(inode);
+	} else {
+		dout("ceph_queue_invalidate %p failed\n", inode);
+	}
+}
+
+static void ceph_writeback_work(struct work_struct *work)
 {
 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
 						  i_wb_work);
@@ -1254,11 +1265,25 @@ void ceph_inode_writeback(struct work_struct *work)
 	iput(inode);
 }
 
+/*
+ * queue an async invalidation
+ */
+void ceph_queue_invalidate(struct inode *inode)
+{
+	if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
+		       &ceph_inode(inode)->i_pg_inv_work)) {
+		dout("ceph_queue_invalidate %p\n", inode);
+		igrab(inode);
+	} else {
+		dout("ceph_queue_invalidate %p failed\n", inode);
+	}
+}
+
 /*
  * Invalidate inode pages in a worker thread.  (This can't be done
  * in the message handler context.)
  */
-static void ceph_inode_invalidate_pages(struct work_struct *work)
+static void ceph_invalidate_work(struct work_struct *work)
 {
 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
 						  i_pg_inv_work);
@@ -1307,7 +1332,7 @@ out:
  *
  * We also truncate in a separate thread as well.
  */
-void ceph_vmtruncate_work(struct work_struct *work)
+static void ceph_vmtruncate_work(struct work_struct *work)
 {
 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
 						  i_vmtruncate_work);
@@ -1320,6 +1345,24 @@ void ceph_vmtruncate_work(struct work_struct *work)
 	iput(inode);
 }
 
+/*
+ * Queue an async vmtruncate.  If we fail to queue work, we will handle
+ * the truncation the next time we call __ceph_do_pending_vmtruncate.
+ */
+void ceph_queue_vmtruncate(struct inode *inode)
+{
+	struct ceph_inode_info *ci = ceph_inode(inode);
+
+	if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
+		       &ci->i_vmtruncate_work)) {
+		dout("ceph_queue_vmtruncate %p\n", inode);
+		igrab(inode);
+	} else {
+		dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
+		     inode, ci->i_truncate_pending);
+	}
+}
+
 /*
  * called with i_mutex held.
  *

+ 4 - 15
fs/ceph/super.h

@@ -573,18 +573,6 @@ static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb)
 	return (struct ceph_client *)sb->s_fs_info;
 }
 
-static inline int ceph_queue_writeback(struct inode *inode)
-{
-	return queue_work(ceph_inode_to_client(inode)->wb_wq,
-		   &ceph_inode(inode)->i_wb_work);
-}
-
-static inline int ceph_queue_page_invalidation(struct inode *inode)
-{
-	return queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
-		   &ceph_inode(inode)->i_pg_inv_work);
-}
-
 
 /*
  * we keep buffered readdir results attached to file->private_data
@@ -772,10 +760,11 @@ extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
 extern int ceph_inode_holds_cap(struct inode *inode, int mask);
 
 extern int ceph_inode_set_size(struct inode *inode, loff_t size);
-extern void ceph_inode_writeback(struct work_struct *work);
-extern void ceph_vmtruncate_work(struct work_struct *work);
 extern void __ceph_do_pending_vmtruncate(struct inode *inode);
-extern void __ceph_queue_vmtruncate(struct inode *inode);
+extern void ceph_queue_vmtruncate(struct inode *inode);
+
+extern void ceph_queue_invalidate(struct inode *inode);
+extern void ceph_queue_writeback(struct inode *inode);
 
 extern int ceph_do_getattr(struct inode *inode, int mask);
 extern int ceph_permission(struct inode *inode, int mask);