|
@@ -678,7 +678,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
|
|
|
} else {
|
|
|
BUG_ON(mapping->assoc_mapping != buffer_mapping);
|
|
|
}
|
|
|
- if (list_empty(&bh->b_assoc_buffers)) {
|
|
|
+ if (!bh->b_assoc_map) {
|
|
|
spin_lock(&buffer_mapping->private_lock);
|
|
|
list_move_tail(&bh->b_assoc_buffers,
|
|
|
&mapping->private_list);
|
|
@@ -794,6 +794,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|
|
{
|
|
|
struct buffer_head *bh;
|
|
|
struct list_head tmp;
|
|
|
+ struct address_space *mapping;
|
|
|
int err = 0, err2;
|
|
|
|
|
|
INIT_LIST_HEAD(&tmp);
|
|
@@ -801,9 +802,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|
|
spin_lock(lock);
|
|
|
while (!list_empty(list)) {
|
|
|
bh = BH_ENTRY(list->next);
|
|
|
+ mapping = bh->b_assoc_map;
|
|
|
__remove_assoc_queue(bh);
|
|
|
+ /* Avoid race with mark_buffer_dirty_inode() which does
|
|
|
+ * a lockless check and we rely on seeing the dirty bit */
|
|
|
+ smp_mb();
|
|
|
if (buffer_dirty(bh) || buffer_locked(bh)) {
|
|
|
list_add(&bh->b_assoc_buffers, &tmp);
|
|
|
+ bh->b_assoc_map = mapping;
|
|
|
if (buffer_dirty(bh)) {
|
|
|
get_bh(bh);
|
|
|
spin_unlock(lock);
|
|
@@ -822,8 +828,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|
|
|
|
|
while (!list_empty(&tmp)) {
|
|
|
bh = BH_ENTRY(tmp.prev);
|
|
|
- list_del_init(&bh->b_assoc_buffers);
|
|
|
get_bh(bh);
|
|
|
+ mapping = bh->b_assoc_map;
|
|
|
+ __remove_assoc_queue(bh);
|
|
|
+ /* Avoid race with mark_buffer_dirty_inode() which does
|
|
|
+ * a lockless check and we rely on seeing the dirty bit */
|
|
|
+ smp_mb();
|
|
|
+ if (buffer_dirty(bh)) {
|
|
|
+ list_add(&bh->b_assoc_buffers,
|
|
|
+ &bh->b_assoc_map->private_list);
|
|
|
+ bh->b_assoc_map = mapping;
|
|
|
+ }
|
|
|
spin_unlock(lock);
|
|
|
wait_on_buffer(bh);
|
|
|
if (!buffer_uptodate(bh))
|
|
@@ -1195,7 +1210,7 @@ void __brelse(struct buffer_head * buf)
|
|
|
void __bforget(struct buffer_head *bh)
|
|
|
{
|
|
|
clear_buffer_dirty(bh);
|
|
|
- if (!list_empty(&bh->b_assoc_buffers)) {
|
|
|
+ if (bh->b_assoc_map) {
|
|
|
struct address_space *buffer_mapping = bh->b_page->mapping;
|
|
|
|
|
|
spin_lock(&buffer_mapping->private_lock);
|
|
@@ -3022,7 +3037,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
|
|
|
do {
|
|
|
struct buffer_head *next = bh->b_this_page;
|
|
|
|
|
|
- if (!list_empty(&bh->b_assoc_buffers))
|
|
|
+ if (bh->b_assoc_map)
|
|
|
__remove_assoc_queue(bh);
|
|
|
bh = next;
|
|
|
} while (bh != head);
|