Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / arch / mips / mm / tlb-r4k.c
index c17d762..bba9c14 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include <asm/cpu.h>
 #include <asm/cpu-type.h>
 extern void build_tlb_refill_handler(void);
 
 /*
- * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
- * unfortunately, itlb is not totally transparent to software.
+ * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
+ * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
+ * itlb/dtlb are not totally transparent to software.
  */
-static inline void flush_itlb(void)
+static inline void flush_micro_tlb(void)
 {
        switch (current_cpu_type()) {
        case CPU_LOONGSON2:
+               write_c0_diag(LOONGSON_DIAG_ITLB);
+               break;
        case CPU_LOONGSON3:
-               write_c0_diag(4);
+               write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
                break;
        default:
                break;
        }
 }
 
-static inline void flush_itlb_vm(struct vm_area_struct *vma)
+static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
 {
        if (vma->vm_flags & VM_EXEC)
-               flush_itlb();
+               flush_micro_tlb();
 }
 
 void local_flush_tlb_all(void)
@@ -64,8 +67,11 @@ void local_flush_tlb_all(void)
 
        entry = read_c0_wired();
 
-       /* Blast 'em all away. */
-       if (cpu_has_tlbinv) {
+       /*
+        * Blast 'em all away.
+        * If there are any wired entries, fall back to iterating
+        */
+       if (cpu_has_tlbinv && !entry) {
                if (current_cpu_data.tlbsizevtlb) {
                        write_c0_index(0);
                        mtc0_tlbw_hazard();
@@ -93,7 +99,7 @@ void local_flush_tlb_all(void)
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
        htw_start();
-       flush_itlb();
+       flush_micro_tlb();
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(local_flush_tlb_all);
@@ -159,7 +165,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                } else {
                        drop_mmu_context(mm, cpu);
                }
-               flush_itlb();
+               flush_micro_tlb();
                local_irq_restore(flags);
        }
 }
@@ -205,7 +211,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        } else {
                local_flush_tlb_all();
        }
-       flush_itlb();
+       flush_micro_tlb();
        local_irq_restore(flags);
 }
 
@@ -240,7 +246,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
        finish:
                write_c0_entryhi(oldpid);
                htw_start();
-               flush_itlb_vm(vma);
+               flush_micro_tlb_vm(vma);
                local_irq_restore(flags);
        }
 }
@@ -274,7 +280,7 @@ void local_flush_tlb_one(unsigned long page)
        }
        write_c0_entryhi(oldpid);
        htw_start();
-       flush_itlb();
+       flush_micro_tlb();
        local_irq_restore(flags);
 }
 
@@ -301,7 +307,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        local_irq_save(flags);
 
        htw_stop();
-       pid = read_c0_entryhi() & ASID_MASK;
+       pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
        address &= (PAGE_MASK << 1);
        write_c0_entryhi(address | pid);
        pgdp = pgd_offset(vma->vm_mm, address);
@@ -336,10 +342,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 #ifdef CONFIG_XPA
                write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
-               writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+               if (cpu_has_xpa)
+                       writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
                ptep++;
                write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
-               writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+               if (cpu_has_xpa)
+                       writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
 #else
                write_c0_entrylo0(ptep->pte_high);
                ptep++;
@@ -357,7 +365,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        }
        tlbw_use_hazard();
        htw_start();
-       flush_itlb_vm(vma);
+       flush_micro_tlb_vm(vma);
        local_irq_restore(flags);
 }
 
@@ -400,19 +408,20 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
-int __init has_transparent_hugepage(void)
+int has_transparent_hugepage(void)
 {
-       unsigned int mask;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       write_c0_pagemask(PM_HUGE_MASK);
-       back_to_back_c0_hazard();
-       mask = read_c0_pagemask();
-       write_c0_pagemask(PM_DEFAULT_MASK);
+       static unsigned int mask = -1;
 
-       local_irq_restore(flags);
+       if (mask == -1) {       /* first call comes during __init */
+               unsigned long flags;
 
+               local_irq_save(flags);
+               write_c0_pagemask(PM_HUGE_MASK);
+               back_to_back_c0_hazard();
+               mask = read_c0_pagemask();
+               write_c0_pagemask(PM_DEFAULT_MASK);
+               local_irq_restore(flags);
+       }
        return mask == PM_HUGE_MASK;
 }