|
@@ -19,7 +19,6 @@
|
|
#include <linux/list.h>
|
|
#include <linux/list.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/module.h>
|
|
#include <linux/module.h>
|
|
-#include <linux/rwsem.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/uaccess.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/debugfs.h>
|
|
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
|
|
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
|
|
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
|
|
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
|
|
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
|
|
|
|
|
|
-static DECLARE_RWSEM(gfs2_umount_flush_sem);
|
|
|
|
static struct dentry *gfs2_root;
|
|
static struct dentry *gfs2_root;
|
|
static struct workqueue_struct *glock_workqueue;
|
|
static struct workqueue_struct *glock_workqueue;
|
|
struct workqueue_struct *gfs2_delete_workqueue;
|
|
struct workqueue_struct *gfs2_delete_workqueue;
|
|
@@ -154,12 +152,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
|
|
static void glock_free(struct gfs2_glock *gl)
|
|
static void glock_free(struct gfs2_glock *gl)
|
|
{
|
|
{
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
- struct inode *aspace = gl->gl_aspace;
|
|
|
|
|
|
+ struct address_space *mapping = gfs2_glock2aspace(gl);
|
|
|
|
+ struct kmem_cache *cachep = gfs2_glock_cachep;
|
|
|
|
|
|
- if (aspace)
|
|
|
|
- gfs2_aspace_put(aspace);
|
|
|
|
|
|
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
|
|
trace_gfs2_glock_put(gl);
|
|
trace_gfs2_glock_put(gl);
|
|
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
|
|
|
|
|
|
+ if (mapping)
|
|
|
|
+ cachep = gfs2_glock_aspace_cachep;
|
|
|
|
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -712,7 +712,6 @@ static void glock_work_func(struct work_struct *work)
|
|
finish_xmote(gl, gl->gl_reply);
|
|
finish_xmote(gl, gl->gl_reply);
|
|
drop_ref = 1;
|
|
drop_ref = 1;
|
|
}
|
|
}
|
|
- down_read(&gfs2_umount_flush_sem);
|
|
|
|
spin_lock(&gl->gl_spin);
|
|
spin_lock(&gl->gl_spin);
|
|
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
|
|
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
|
|
gl->gl_state != LM_ST_UNLOCKED &&
|
|
gl->gl_state != LM_ST_UNLOCKED &&
|
|
@@ -725,7 +724,6 @@ static void glock_work_func(struct work_struct *work)
|
|
}
|
|
}
|
|
run_queue(gl, 0);
|
|
run_queue(gl, 0);
|
|
spin_unlock(&gl->gl_spin);
|
|
spin_unlock(&gl->gl_spin);
|
|
- up_read(&gfs2_umount_flush_sem);
|
|
|
|
if (!delay ||
|
|
if (!delay ||
|
|
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
|
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
|
gfs2_glock_put(gl);
|
|
gfs2_glock_put(gl);
|
|
@@ -750,10 +748,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|
const struct gfs2_glock_operations *glops, int create,
|
|
const struct gfs2_glock_operations *glops, int create,
|
|
struct gfs2_glock **glp)
|
|
struct gfs2_glock **glp)
|
|
{
|
|
{
|
|
|
|
+ struct super_block *s = sdp->sd_vfs;
|
|
struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
|
|
struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
|
|
struct gfs2_glock *gl, *tmp;
|
|
struct gfs2_glock *gl, *tmp;
|
|
unsigned int hash = gl_hash(sdp, &name);
|
|
unsigned int hash = gl_hash(sdp, &name);
|
|
- int error;
|
|
|
|
|
|
+ struct address_space *mapping;
|
|
|
|
|
|
read_lock(gl_lock_addr(hash));
|
|
read_lock(gl_lock_addr(hash));
|
|
gl = search_bucket(hash, sdp, &name);
|
|
gl = search_bucket(hash, sdp, &name);
|
|
@@ -765,7 +764,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|
if (!create)
|
|
if (!create)
|
|
return -ENOENT;
|
|
return -ENOENT;
|
|
|
|
|
|
- gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
|
|
|
|
|
|
+ if (glops->go_flags & GLOF_ASPACE)
|
|
|
|
+ gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
|
|
|
|
+ else
|
|
|
|
+ gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
|
|
if (!gl)
|
|
if (!gl)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
@@ -784,18 +786,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|
gl->gl_tchange = jiffies;
|
|
gl->gl_tchange = jiffies;
|
|
gl->gl_object = NULL;
|
|
gl->gl_object = NULL;
|
|
gl->gl_sbd = sdp;
|
|
gl->gl_sbd = sdp;
|
|
- gl->gl_aspace = NULL;
|
|
|
|
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
|
|
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
|
|
INIT_WORK(&gl->gl_delete, delete_work_func);
|
|
INIT_WORK(&gl->gl_delete, delete_work_func);
|
|
|
|
|
|
- /* If this glock protects actual on-disk data or metadata blocks,
|
|
|
|
- create a VFS inode to manage the pages/buffers holding them. */
|
|
|
|
- if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
|
|
|
|
- gl->gl_aspace = gfs2_aspace_get(sdp);
|
|
|
|
- if (!gl->gl_aspace) {
|
|
|
|
- error = -ENOMEM;
|
|
|
|
- goto fail;
|
|
|
|
- }
|
|
|
|
|
|
+ mapping = gfs2_glock2aspace(gl);
|
|
|
|
+ if (mapping) {
|
|
|
|
+ mapping->a_ops = &gfs2_meta_aops;
|
|
|
|
+ mapping->host = s->s_bdev->bd_inode;
|
|
|
|
+ mapping->flags = 0;
|
|
|
|
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
|
|
|
|
+ mapping->assoc_mapping = NULL;
|
|
|
|
+ mapping->backing_dev_info = s->s_bdi;
|
|
|
|
+ mapping->writeback_index = 0;
|
|
}
|
|
}
|
|
|
|
|
|
write_lock(gl_lock_addr(hash));
|
|
write_lock(gl_lock_addr(hash));
|
|
@@ -812,10 +814,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|
*glp = gl;
|
|
*glp = gl;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
-
|
|
|
|
-fail:
|
|
|
|
- kmem_cache_free(gfs2_glock_cachep, gl);
|
|
|
|
- return error;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1510,35 +1508,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
|
|
|
|
|
|
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
|
|
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
|
|
{
|
|
{
|
|
- unsigned long t;
|
|
|
|
unsigned int x;
|
|
unsigned int x;
|
|
- int cont;
|
|
|
|
|
|
|
|
- t = jiffies;
|
|
|
|
-
|
|
|
|
- for (;;) {
|
|
|
|
- cont = 0;
|
|
|
|
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
|
|
|
|
- if (examine_bucket(clear_glock, sdp, x))
|
|
|
|
- cont = 1;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!cont)
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- if (time_after_eq(jiffies,
|
|
|
|
- t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
|
|
|
|
- fs_warn(sdp, "Unmount seems to be stalled. "
|
|
|
|
- "Dumping lock state...\n");
|
|
|
|
- gfs2_dump_lockstate(sdp);
|
|
|
|
- t = jiffies;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- down_write(&gfs2_umount_flush_sem);
|
|
|
|
- invalidate_inodes(sdp->sd_vfs);
|
|
|
|
- up_write(&gfs2_umount_flush_sem);
|
|
|
|
- msleep(10);
|
|
|
|
- }
|
|
|
|
|
|
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
|
|
|
|
+ examine_bucket(clear_glock, sdp, x);
|
|
flush_workqueue(glock_workqueue);
|
|
flush_workqueue(glock_workqueue);
|
|
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
|
|
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
|
|
gfs2_dump_lockstate(sdp);
|
|
gfs2_dump_lockstate(sdp);
|
|
@@ -1685,7 +1658,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
|
|
dtime *= 1000000/HZ; /* demote time in uSec */
|
|
dtime *= 1000000/HZ; /* demote time in uSec */
|
|
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
|
|
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
|
|
dtime = 0;
|
|
dtime = 0;
|
|
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
|
|
|
|
|
|
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
|
|
state2str(gl->gl_state),
|
|
state2str(gl->gl_state),
|
|
gl->gl_name.ln_type,
|
|
gl->gl_name.ln_type,
|
|
(unsigned long long)gl->gl_name.ln_number,
|
|
(unsigned long long)gl->gl_name.ln_number,
|