dect
/
linux-2.6
Archived
13
0
Fork 0

Btrfs: make sure to flush queued bios if write_cache_pages waits

write_cache_pages tries to build up a large bio to stuff down the pipe.
But if it needs to wait for a page lock, it needs to make sure and send
down any pending writes so we don't deadlock with anyone who has the
page lock and is waiting for writeback of things inside the bio.

Dave Sterba triggered this as a deadlock between the autodefrag code and
the extent write_cache_pages

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Chris Mason 2011-11-01 10:08:06 -04:00
parent e688b7252f
commit 01d658f2ca
4 changed files with 22 additions and 10 deletions

View File

@ -2735,7 +2735,8 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
return ret;
}
int btree_lock_page_hook(struct page *page)
static int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *))
{
struct inode *inode = page->mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
@ -2752,7 +2753,10 @@ int btree_lock_page_hook(struct page *page)
if (!eb)
goto out;
btrfs_tree_lock(eb);
if (!btrfs_try_tree_write_lock(eb)) {
flush_fn(data);
btrfs_tree_lock(eb);
}
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
@ -2767,7 +2771,10 @@ int btree_lock_page_hook(struct page *page)
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
out:
lock_page(page);
if (!trylock_page(page)) {
flush_fn(data);
lock_page(page);
}
return 0;
}

View File

@ -83,8 +83,6 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btree_lock_page_hook(struct page *page);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void);

View File

@ -2613,10 +2613,16 @@ retry:
* swizzled back from swapper_space to tmpfs file
* mapping
*/
if (tree->ops && tree->ops->write_cache_pages_lock_hook)
tree->ops->write_cache_pages_lock_hook(page);
else
lock_page(page);
if (tree->ops &&
tree->ops->write_cache_pages_lock_hook) {
tree->ops->write_cache_pages_lock_hook(page,
data, flush_fn);
} else {
if (!trylock_page(page)) {
flush_fn(data);
lock_page(page);
}
}
if (unlikely(page->mapping != mapping)) {
unlock_page(page);

View File

@ -86,7 +86,8 @@ struct extent_io_ops {
struct extent_state *other);
void (*split_extent_hook)(struct inode *inode,
struct extent_state *orig, u64 split);
int (*write_cache_pages_lock_hook)(struct page *page);
int (*write_cache_pages_lock_hook)(struct page *page, void *data,
void (*flush_fn)(void *));
};
struct extent_io_tree {