x86: revert "x86: CPA: avoid split of alias mappings"

Revert:

  commit 8be8f54bae
  Author: Thomas Gleixner <tglx@linutronix.de>
  Date:   Sat Feb 23 20:43:21 2008 +0100

      x86: CPA: avoid split of alias mappings

because it clearly mishandles the case when __change_page_attr(), called
from __change_page_attr_set_clr(), changes cpa->processed to 1 and
cpa_process_alias(cpa) is executed right after that.

This crashes my x86-64 test box early in the boot process
(ref. http://bugzilla.kernel.org/show_bug.cgi?id=10140#c4).

Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Rafael J. Wysocki 2008-03-03 01:17:37 +01:00 committed by Ingo Molnar
parent 038f2f7255
commit 9b5cf48b06
1 changed files with 10 additions and 11 deletions

View File

@ -26,7 +26,6 @@ struct cpa_data {
pgprot_t mask_set;
pgprot_t mask_clr;
int numpages;
int processed;
int flushtlb;
unsigned long pfn;
};
@ -291,8 +290,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
*/
nextpage_addr = (address + psize) & pmask;
numpages = (nextpage_addr - address) >> PAGE_SHIFT;
if (numpages < cpa->processed)
cpa->processed = numpages;
if (numpages < cpa->numpages)
cpa->numpages = numpages;
/*
* We are safe now. Check whether the new pgprot is the same:
@ -319,7 +318,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
*/
addr = address + PAGE_SIZE;
pfn++;
for (i = 1; i < cpa->processed; i++, addr += PAGE_SIZE, pfn++) {
for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
if (pgprot_val(chk_prot) != pgprot_val(new_prot))
@ -343,7 +342,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* that we limited the number of possible pages already to
* the number of pages in the large page.
*/
if (address == (nextpage_addr - psize) && cpa->processed == numpages) {
if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
/*
* The address is aligned and the number of pages
* covers the full page.
@ -573,7 +572,7 @@ repeat:
set_pte_atomic(kpte, new_pte);
cpa->flushtlb = 1;
}
cpa->processed = 1;
cpa->numpages = 1;
return 0;
}
@ -584,7 +583,7 @@ repeat:
do_split = try_preserve_large_page(kpte, address, cpa);
/*
* When the range fits into the existing large page,
* return. cp->processed and cpa->tlbflush have been updated in
* return. cp->numpages and cpa->tlbflush have been updated in
* try_large_page:
*/
if (do_split <= 0)
@ -663,7 +662,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
* Store the remaining nr of pages for the large page
* preservation check.
*/
cpa->numpages = cpa->processed = numpages;
cpa->numpages = numpages;
ret = __change_page_attr(cpa, checkalias);
if (ret)
@ -680,9 +679,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
* CPA operation. Either a large page has been
* preserved or a single page update happened.
*/
BUG_ON(cpa->processed > numpages);
numpages -= cpa->processed;
cpa->vaddr += cpa->processed * PAGE_SIZE;
BUG_ON(cpa->numpages > numpages);
numpages -= cpa->numpages;
cpa->vaddr += cpa->numpages * PAGE_SIZE;
}
return 0;
}