From c03cb3149daed3e411657e3212d05ae27cf1a874 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 11 Oct 2009 10:33:02 -0700 Subject: [PATCH 1/3] x86: Relegate CONFIG_PAT and CONFIG_MTRR configurability to EMBEDDED MTRR and PAT support (which got added to CPUs over 10 years ago) are no longer really optional in that more and more things are depending on PAT just working, including various drivers and newer versions of X. (to not even speak of MTRR) Having this as a regular config option just no longer makes sense. This patch relegates CONFIG_X86_PAT to the EMBEDDED category so ultra-embedded can still disable it if they really need to. Also-Suggested-by: Roland Dreier Signed-off-by: Arjan van de Ven Cc: Linus Torvalds Cc: Henrique de Moraes Holschuh LKML-Reference: <20091011103302.62bded41@infradead.org> Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c876bace8fdc..a67363bbe825 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1321,7 +1321,9 @@ config MATH_EMULATION kernel, it won't hurt. config MTRR - bool "MTRR (Memory Type Range Register) support" + bool + default y + prompt "MTRR (Memory Type Range Register) support" if EMBEDDED ---help--- On Intel P6 family processors (Pentium Pro, Pentium II and later) the Memory Type Range Registers (MTRRs) may be used to control @@ -1387,7 +1389,8 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT config X86_PAT bool - prompt "x86 PAT support" + default y + prompt "x86 PAT support" if EMBEDDED depends on MTRR ---help--- Use PAT attributes to setup page level cache control. From 83ea05ea69290b2e30da795527dbe304db1e2331 Mon Sep 17 00:00:00 2001 From: Xiaotian Feng Date: Tue, 10 Nov 2009 17:23:07 +0800 Subject: [PATCH 2/3] x86: pat: Clean up req_type special case for reserve_memtype() Commit: b6ff32d: x86, PAT: Consolidate code in pat_x_mtrr_type() and reserve_memtype() consolidated code in pat_x_mtrr_type() and reserve_memtype(), which removed the special case (req_type is -1) for the PAT-enabled part. We should also change comments and the PAT-disabled part. Signed-off-by: Xiaotian Feng Cc: Suresh Siddha Cc: Venkatesh Pallipadi LKML-Reference: <1257844987-7906-1-git-send-email-dfeng@redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index e78cd0ec2bcf..81fb75344cd3 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -355,9 +355,6 @@ static int free_ram_pages_type(u64 start, u64 end) * - _PAGE_CACHE_UC_MINUS * - _PAGE_CACHE_UC * - * req_type will have a special case value '-1', when requester want to inherit - * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. - * * If new_type is NULL, function will return an error if it cannot reserve the * region with req_type. If new_type is non-NULL, function will return * available type in new_type in case of no error. In case of any error @@ -377,9 +374,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, if (!pat_enabled) { /* This is identical to page table setting without PAT */ if (new_type) { - if (req_type == -1) - *new_type = _PAGE_CACHE_WB; - else if (req_type == _PAGE_CACHE_WC) + if (req_type == _PAGE_CACHE_WC) *new_type = _PAGE_CACHE_UC_MINUS; else *new_type = req_type & _PAGE_CACHE_MASK; From 2fb8f4e6a83dcaec15c1dd0ee8a6f618e7ece7f0 Mon Sep 17 00:00:00 2001 From: Xiaotian Feng Date: Tue, 10 Nov 2009 17:23:25 +0800 Subject: [PATCH 3/3] x86: pat: Remove ioremap_default() Commit: b6ff32d: x86, PAT: Consolidate code in pat_x_mtrr_type() and reserve_memtype() consolidated reserve_memtype() and pat_x_mtrr_type, this made ioremap_default() same as ioremap_cache(). Remove the redundant function and change the only caller to use ioremap_cache. Signed-off-by: Xiaotian Feng Cc: Suresh Siddha Cc: Venkatesh Pallipadi LKML-Reference: <1257845005-7938-1-git-send-email-dfeng@redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 334e63ca7b2b..3af10dee0147 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -283,30 +283,6 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) } EXPORT_SYMBOL(ioremap_cache); -static void __iomem *ioremap_default(resource_size_t phys_addr, - unsigned long size) -{ - unsigned long flags; - void __iomem *ret; - int err; - - /* - * - WB for WB-able memory and no other conflicting mappings - * - UC_MINUS for non-WB-able memory with no other conflicting mappings - * - Inherit from confliting mappings otherwise - */ - err = reserve_memtype(phys_addr, phys_addr + size, - _PAGE_CACHE_WB, &flags); - if (err < 0) - return NULL; - - ret = __ioremap_caller(phys_addr, size, flags, - __builtin_return_address(0)); - - free_memtype(phys_addr, phys_addr + size); - return ret; -} - void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { @@ -382,7 +358,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) if (page_is_ram(start >> PAGE_SHIFT)) return __va(phys); - addr = (void __force *)ioremap_default(start, PAGE_SIZE); + addr = (void __force *)ioremap_cache(start, PAGE_SIZE); if (addr) addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));