diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index 0887b34bc59b..353a836ed63c 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -229,8 +229,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) if (PageHighMem(page)) return; if (!enable) - mutex_debug_check_no_locks_freed(page_address(page), - numpages * PAGE_SIZE); + debug_check_no_locks_freed(page_address(page), + numpages * PAGE_SIZE); /* the return value is ignored - the calls cannot fail, * large pages are disabled at boot time. diff --git a/include/linux/mm.h b/include/linux/mm.h index ff1fa87df8d0..0ac255720f34 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1030,13 +1030,19 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ + mutex_debug_check_no_locks_freed(from, len); +} + #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page *page, int numpages, int enable) { if (!PageHighMem(page) && !enable) - mutex_debug_check_no_locks_freed(page_address(page), - numpages * PAGE_SIZE); + debug_check_no_locks_freed(page_address(page), + numpages * PAGE_SIZE); } #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dafd12ec7a0c..084a2de7e52a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -446,8 +446,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) arch_free_page(page, order); if (!PageHighMem(page)) - mutex_debug_check_no_locks_freed(page_address(page), - PAGE_SIZE<