mm: PageBuddy and mapcount robustness

Change the _mapcount value indicating PageBuddy from -2 to -128 for
more robusteness against page_mapcount() undeflows.

Use reset_page_mapcount instead of __ClearPageBuddy in bad_page to
ignore the previous retval of PageBuddy().

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrea Arcangeli 2011-03-18 00:16:35 +01:00 committed by Linus Torvalds
parent 7b7adc4a01
commit ef2b4b95a6
2 changed files with 11 additions and 4 deletions

View File

@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page)
/*
* PageBuddy() indicate that the page is free and in the buddy system
* (see mm/page_alloc.c).
*
* PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
* -2 so that an underflow of the page_mapcount() won't be mistaken
* for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
* efficiently by most CPU architectures.
*/
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
static inline int PageBuddy(struct page *page)
{
return atomic_read(&page->_mapcount) == -2;
return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
}
static inline void __SetPageBuddy(struct page *page)
{
VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
atomic_set(&page->_mapcount, -2);
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
}
static inline void __ClearPageBuddy(struct page *page)

View File

@ -286,7 +286,7 @@ static void bad_page(struct page *page)
/* Don't complain about poisoned pages */
if (PageHWPoison(page)) {
__ClearPageBuddy(page);
reset_page_mapcount(page); /* remove PageBuddy */
return;
}
@ -317,7 +317,7 @@ static void bad_page(struct page *page)
dump_stack();
out:
/* Leave bad fields for debug, except PageBuddy could make trouble */
__ClearPageBuddy(page);
reset_page_mapcount(page); /* remove PageBuddy */
add_taint(TAINT_BAD_PAGE);
}