dect
/
linux-2.6
Archived
13
0
Fork 0

Btrfs: Don't drop extent_map cache during releasepage on the btree inode

The btree inode should only have a single extent_map in the cache,
it doesn't make sense to ever drop it.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Chris Mason 2008-04-18 10:29:50 -04:00
parent 7ae9c09d8f
commit 7b13b7b119
3 changed files with 44 additions and 20 deletions

View File

@ -76,13 +76,12 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
struct extent_map *em;
int ret;
again:
spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
spin_unlock(&em_tree->lock);
if (em) {
if (em)
goto out;
}
em = alloc_extent_map(GFP_NOFS);
if (!em) {
em = ERR_PTR(-ENOMEM);
@ -95,15 +94,21 @@ again:
spin_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock);
if (ret == -EEXIST) {
free_extent_map(em);
em = NULL;
goto again;
em = lookup_extent_mapping(em_tree, start, len);
if (em)
ret = 0;
else
ret = -EIO;
} else if (ret) {
em = ERR_PTR(ret);
free_extent_map(em);
em = NULL;
}
spin_unlock(&em_tree->lock);
if (ret)
em = ERR_PTR(ret);
out:
return em;
}
@ -496,7 +501,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
}
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
ret = try_release_extent_mapping(map, tree, page, gfp_flags);
ret = try_release_extent_state(map, tree, page, gfp_flags);
if (ret == 1) {
invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
ClearPagePrivate(page);

View File

@ -2463,6 +2463,31 @@ err:
}
EXPORT_SYMBOL(extent_prepare_write);
/*
* a helper for releasepage, this tests for areas of the page that
* are locked or under IO and drops the related state bits if it is safe
* to drop the page.
*/
int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask)
{
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
int ret = 1;
if (test_range_bit(tree, start, end, EXTENT_IOBITS, 0))
ret = 0;
else {
if ((mask & GFP_NOFS) == GFP_NOFS)
mask = GFP_NOFS;
clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
1, 1, mask);
}
return ret;
}
EXPORT_SYMBOL(try_release_extent_state);
/*
* a helper for releasepage. As long as there are no locked extents
* in the range corresponding to the page, both state records and extent
@ -2475,8 +2500,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_map *em;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
u64 orig_start = start;
int ret = 1;
if ((mask & __GFP_WAIT) &&
page->mapping->host->i_size > 16 * 1024 * 1024) {
u64 len;
@ -2507,15 +2531,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
free_extent_map(em);
}
}
if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
ret = 0;
else {
if ((mask & GFP_NOFS) == GFP_NOFS)
mask = GFP_NOFS;
clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1, 1, mask);
}
return ret;
return try_release_extent_state(map, tree, page, mask);
}
EXPORT_SYMBOL(try_release_extent_mapping);

View File

@ -110,6 +110,9 @@ void extent_io_tree_empty_lru(struct extent_io_tree *tree);
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,