diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3b91c981a27..184a02a4d87 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -318,12 +318,42 @@ void flush_tlb_mm(struct mm_struct *mm) #define FLUSHALL_BAR 16 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline unsigned long has_large_page(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + unsigned long addr = ALIGN(start, HPAGE_SIZE); + for (; addr < end; addr += HPAGE_SIZE) { + pgd = pgd_offset(mm, addr); + if (likely(!pgd_none(*pgd))) { + pud = pud_offset(pgd, addr); + if (likely(!pud_none(*pud))) { + pmd = pmd_offset(pud, addr); + if (likely(!pmd_none(*pmd))) + if (pmd_large(*pmd)) + return addr; + } + } + } + return 0; +} +#else +static inline unsigned long has_large_page(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + return 0; +} +#endif void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm; if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) { +flush_all: flush_tlb_mm(vma->vm_mm); return; } @@ -346,6 +376,10 @@ void flush_tlb_range(struct vm_area_struct *vma, if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR) local_flush_tlb(); else { + if (has_large_page(mm, start, end)) { + preempt_enable(); + goto flush_all; + } for (addr = start; addr < end; addr += PAGE_SIZE) __flush_tlb_single(addr);