dect
/
linux-2.6
Archived
13
0
Fork 0

mm: change __remove_from_page_cache()

Now we renamed remove_from_page_cache with delete_from_page_cache.  As
consistency of __remove_from_swap_cache and remove_from_swap_cache, we
change internal page cache handling function name, too.

Signed-off-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Minchan Kim 2011-03-22 16:32:44 -07:00 committed by Linus Torvalds
parent 702cfbf93a
commit e64a782fec
5 changed files with 8 additions and 8 deletions

View File

@ -456,7 +456,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*

View File

@ -108,11 +108,11 @@
*/
/*
* Remove a page from the page cache and free it. Caller has to make
* Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the mapping's tree_lock.
*/
void __remove_from_page_cache(struct page *page)
void __delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
@ -154,7 +154,7 @@ void delete_from_page_cache(struct page *page)
freepage = mapping->a_ops->freepage;
spin_lock_irq(&mapping->tree_lock);
__remove_from_page_cache(page);
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
@ -444,7 +444,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
new->index = offset;
spin_lock_irq(&mapping->tree_lock);
__remove_from_page_cache(old);
__delete_from_page_cache(old);
error = radix_tree_insert(&mapping->page_tree, offset, new);
BUG_ON(error);
mapping->nrpages++;

View File

@ -1130,7 +1130,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
/*
* Now take care of user space mappings.
* Abort on fail: __remove_from_page_cache() assumes unmapped page.
* Abort on fail: __delete_from_page_cache() assumes unmapped page.
*/
if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);

View File

@ -388,7 +388,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
clear_page_mlock(page);
BUG_ON(page_has_private(page));
__remove_from_page_cache(page);
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);

View File

@ -514,7 +514,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
freepage = mapping->a_ops->freepage;
__remove_from_page_cache(page);
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);