Эх сурвалжийг харах

ceph: clean up readdir caps reservation

Use a global counter for the minimum number of allocated caps instead of
hard coding a check against readdir_max.  This takes into account multiple
client instances, and avoids examining the superblock mount options when a
cap is dropped.

Signed-off-by: Sage Weil <sage@newdream.net>
Sage Weil 15 жил өмнө
parent
commit
85ccce43a3
4 өөрчлөгдсөн 33 нэмэгдсэн , 13 устгасан
  1. 17 6
      fs/ceph/caps.c
  2. 7 6
      fs/ceph/debugfs.c
  3. 5 0
      fs/ceph/super.c
  4. 4 1
      fs/ceph/super.h

+ 17 - 6
fs/ceph/caps.c

@@ -128,6 +128,7 @@ static int caps_total_count;        /* total caps allocated */
 static int caps_use_count;          /* in use */
 static int caps_reserve_count;      /* unused, reserved */
 static int caps_avail_count;        /* unused, unreserved */
+static int caps_min_count;          /* keep at least this many (unreserved) */
 
 void __init ceph_caps_init(void)
 {
@@ -149,6 +150,15 @@ void ceph_caps_finalize(void)
 	caps_avail_count = 0;
 	caps_use_count = 0;
 	caps_reserve_count = 0;
+	caps_min_count = 0;
+	spin_unlock(&caps_list_lock);
+}
+
+void ceph_adjust_min_caps(int delta)
+{
+	spin_lock(&caps_list_lock);
+	caps_min_count += delta;
+	BUG_ON(caps_min_count < 0);
 	spin_unlock(&caps_list_lock);
 }
 
@@ -265,12 +275,10 @@ static void put_cap(struct ceph_cap *cap,
 	     caps_reserve_count, caps_avail_count);
 	caps_use_count--;
 	/*
-	 * Keep some preallocated caps around, at least enough to do a
-	 * readdir (which needs to preallocate lots of them), to avoid
-	 * lots of free/alloc churn.
+	 * Keep some preallocated caps around (ceph_min_count), to
+	 * avoid lots of free/alloc churn.
 	 */
-	if (caps_avail_count >= caps_reserve_count +
-	    ceph_client(cap->ci->vfs_inode.i_sb)->mount_args->max_readdir) {
+	if (caps_avail_count >= caps_reserve_count + caps_min_count) {
 		caps_total_count--;
 		kmem_cache_free(ceph_cap_cachep, cap);
 	} else {
@@ -289,7 +297,8 @@ static void put_cap(struct ceph_cap *cap,
 }
 
 void ceph_reservation_status(struct ceph_client *client,
-			     int *total, int *avail, int *used, int *reserved)
+			     int *total, int *avail, int *used, int *reserved,
+			     int *min)
 {
 	if (total)
 		*total = caps_total_count;
@@ -299,6 +308,8 @@ void ceph_reservation_status(struct ceph_client *client,
 		*used = caps_use_count;
 	if (reserved)
 		*reserved = caps_reserve_count;
+	if (min)
+		*min = caps_min_count;
 }
 
 /*

+ 7 - 6
fs/ceph/debugfs.c

@@ -255,14 +255,15 @@ static int osdc_show(struct seq_file *s, void *pp)
 static int caps_show(struct seq_file *s, void *p)
 {
 	struct ceph_client *client = p;
-	int total, avail, used, reserved;
+	int total, avail, used, reserved, min;
 
-	ceph_reservation_status(client, &total, &avail, &used, &reserved);
+	ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
 	seq_printf(s, "total\t\t%d\n"
-		      "avail\t\t%d\n"
-		      "used\t\t%d\n"
-		      "reserved\t%d\n",
-		   total, avail, used, reserved);
+		   "avail\t\t%d\n"
+		   "used\t\t%d\n"
+		   "reserved\t%d\n"
+		   "min\t%d\n",
+		   total, avail, used, reserved, min);
 	return 0;
 }
 

+ 5 - 0
fs/ceph/super.c

@@ -578,6 +578,9 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
 	if (!client->wb_pagevec_pool)
 		goto fail_trunc_wq;
 
+	/* caps */
+	client->min_caps = args->max_readdir;
+	ceph_adjust_min_caps(client->min_caps);
 
 	/* subsystems */
 	err = ceph_monc_init(&client->monc, client);
@@ -619,6 +622,8 @@ static void ceph_destroy_client(struct ceph_client *client)
 	ceph_monc_stop(&client->monc);
 	ceph_osdc_stop(&client->osdc);
 
+	ceph_adjust_min_caps(-client->min_caps);
+
 	ceph_debugfs_client_cleanup(client);
 	destroy_workqueue(client->wb_wq);
 	destroy_workqueue(client->pg_inv_wq);

+ 4 - 1
fs/ceph/super.h

@@ -129,6 +129,8 @@ struct ceph_client {
 
 	int auth_err;
 
+	int min_caps;                  /* min caps i added */
+
 	struct ceph_messenger *msgr;   /* messenger instance */
 	struct ceph_mon_client monc;
 	struct ceph_mds_client mdsc;
@@ -557,11 +559,12 @@ extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci);
 
 extern void ceph_caps_init(void);
 extern void ceph_caps_finalize(void);
+extern void ceph_adjust_min_caps(int delta);
 extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need);
 extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx);
 extern void ceph_reservation_status(struct ceph_client *client,
 				    int *total, int *avail, int *used,
-				    int *reserved);
+				    int *reserved, int *min);
 
 static inline struct ceph_client *ceph_inode_to_client(struct inode *inode)
 {