powerpc/mm: update radix__ptep_set_access_flag to not do full mm tlb flush

When we are updating a pte, we just need to flush the tlb mapping
that pte. Right now we do a full mm flush because we don't track the page
size. Now that we have page size details in pte use that to do the
optimized flush

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Aneesh Kumar K.V 2016-11-28 11:47:02 +05:30 committed by Michael Ellerman
parent 6d3a0379eb
commit b3603e174f
7 changed files with 15 additions and 15 deletions

View File

@ -224,7 +224,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry)
pte_t *ptep, pte_t entry,
unsigned long address)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);

View File

@ -578,10 +578,11 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
*/
static inline void __ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry)
pte_t *ptep, pte_t entry,
unsigned long address)
{
if (radix_enabled())
return radix__ptep_set_access_flags(mm, ptep, entry);
return radix__ptep_set_access_flags(mm, ptep, entry, address);
return hash__ptep_set_access_flags(ptep, entry);
}

View File

@ -167,7 +167,8 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
* function doesn't need to invalidate tlb.
*/
static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry)
pte_t *ptep, pte_t entry,
unsigned long address)
{
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
@ -183,13 +184,7 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
* new value of pte
*/
new_pte = old_pte | set;
/*
* For now let's do heavy pid flush
* radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
*/
radix__flush_tlb_mm(mm);
radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
__radix_pte_update(ptep, 0, new_pte);
} else
__radix_pte_update(ptep, 0, set);

View File

@ -268,7 +268,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry)
pte_t *ptep, pte_t entry,
unsigned long address)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);

View File

@ -289,7 +289,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
* function doesn't need to flush the hash entry
*/
static inline void __ptep_set_access_flags(struct mm_struct *mm,
pte_t *ptep, pte_t entry)
pte_t *ptep, pte_t entry,
unsigned long address)
{
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);

View File

@ -35,7 +35,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
#endif
changed = !pmd_same(*(pmdp), entry);
if (changed) {
__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry));
__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
pmd_pte(entry), address);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;

View File

@ -224,7 +224,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
if (changed) {
if (!is_vm_hugetlb_page(vma))
assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(vma->vm_mm, ptep, entry);
__ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
flush_tlb_page(vma, address);
}
return changed;