MIPS: Add 1074K CPU support explicitly.
[cascardo/linux.git] / arch / mips / mm / c-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/preempt.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/bitops.h>
21
22 #include <asm/bcache.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cache.h>
25 #include <asm/cacheops.h>
26 #include <asm/cpu.h>
27 #include <asm/cpu-features.h>
28 #include <asm/cpu-type.h>
29 #include <asm/io.h>
30 #include <asm/page.h>
31 #include <asm/pgtable.h>
32 #include <asm/r4kcache.h>
33 #include <asm/sections.h>
34 #include <asm/mmu_context.h>
35 #include <asm/war.h>
36 #include <asm/cacheflush.h> /* for run_uncached() */
37 #include <asm/traps.h>
38 #include <asm/dma-coherence.h>
39
40 /*
41  * Special Variant of smp_call_function for use by cache functions:
42  *
43  *  o No return value
44  *  o collapses to normal function call on UP kernels
45  *  o collapses to normal function call on systems with a single shared
46  *    primary cache.
47  *  o doesn't disable interrupts on the local CPU
48  */
49 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
50 {
51         preempt_disable();
52
53 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
54         smp_call_function(func, info, 1);
55 #endif
56         func(info);
57         preempt_enable();
58 }
59
60 #if defined(CONFIG_MIPS_CMP)
61 #define cpu_has_safe_index_cacheops 0
62 #else
63 #define cpu_has_safe_index_cacheops 1
64 #endif
65
66 /*
67  * Must die.
68  */
69 static unsigned long icache_size __read_mostly;
70 static unsigned long dcache_size __read_mostly;
71 static unsigned long scache_size __read_mostly;
72
73 /*
74  * Dummy cache handling routines for machines without boardcaches
75  */
76 static void cache_noop(void) {}
77
78 static struct bcache_ops no_sc_ops = {
79         .bc_enable = (void *)cache_noop,
80         .bc_disable = (void *)cache_noop,
81         .bc_wback_inv = (void *)cache_noop,
82         .bc_inv = (void *)cache_noop
83 };
84
85 struct bcache_ops *bcops = &no_sc_ops;
86
87 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
88 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
89
90 #define R4600_HIT_CACHEOP_WAR_IMPL                                      \
91 do {                                                                    \
92         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
93                 *(volatile unsigned long *)CKSEG1;                      \
94         if (R4600_V1_HIT_CACHEOP_WAR)                                   \
95                 __asm__ __volatile__("nop;nop;nop;nop");                \
96 } while (0)
97
98 static void (*r4k_blast_dcache_page)(unsigned long addr);
99
100 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
101 {
102         R4600_HIT_CACHEOP_WAR_IMPL;
103         blast_dcache32_page(addr);
104 }
105
106 static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
107 {
108         R4600_HIT_CACHEOP_WAR_IMPL;
109         blast_dcache64_page(addr);
110 }
111
112 static void r4k_blast_dcache_page_setup(void)
113 {
114         unsigned long  dc_lsize = cpu_dcache_line_size();
115
116         if (dc_lsize == 0)
117                 r4k_blast_dcache_page = (void *)cache_noop;
118         else if (dc_lsize == 16)
119                 r4k_blast_dcache_page = blast_dcache16_page;
120         else if (dc_lsize == 32)
121                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
122         else if (dc_lsize == 64)
123                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
124 }
125
126 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
127
128 static void r4k_blast_dcache_page_indexed_setup(void)
129 {
130         unsigned long dc_lsize = cpu_dcache_line_size();
131
132         if (dc_lsize == 0)
133                 r4k_blast_dcache_page_indexed = (void *)cache_noop;
134         else if (dc_lsize == 16)
135                 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
136         else if (dc_lsize == 32)
137                 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
138         else if (dc_lsize == 64)
139                 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
140 }
141
142 void (* r4k_blast_dcache)(void);
143 EXPORT_SYMBOL(r4k_blast_dcache);
144
145 static void r4k_blast_dcache_setup(void)
146 {
147         unsigned long dc_lsize = cpu_dcache_line_size();
148
149         if (dc_lsize == 0)
150                 r4k_blast_dcache = (void *)cache_noop;
151         else if (dc_lsize == 16)
152                 r4k_blast_dcache = blast_dcache16;
153         else if (dc_lsize == 32)
154                 r4k_blast_dcache = blast_dcache32;
155         else if (dc_lsize == 64)
156                 r4k_blast_dcache = blast_dcache64;
157 }
158
159 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
160 #define JUMP_TO_ALIGN(order) \
161         __asm__ __volatile__( \
162                 "b\t1f\n\t" \
163                 ".align\t" #order "\n\t" \
164                 "1:\n\t" \
165                 )
166 #define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
167 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
168
169 static inline void blast_r4600_v1_icache32(void)
170 {
171         unsigned long flags;
172
173         local_irq_save(flags);
174         blast_icache32();
175         local_irq_restore(flags);
176 }
177
178 static inline void tx49_blast_icache32(void)
179 {
180         unsigned long start = INDEX_BASE;
181         unsigned long end = start + current_cpu_data.icache.waysize;
182         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
183         unsigned long ws_end = current_cpu_data.icache.ways <<
184                                current_cpu_data.icache.waybit;
185         unsigned long ws, addr;
186
187         CACHE32_UNROLL32_ALIGN2;
188         /* I'm in even chunk.  blast odd chunks */
189         for (ws = 0; ws < ws_end; ws += ws_inc)
190                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
191                         cache32_unroll32(addr|ws, Index_Invalidate_I);
192         CACHE32_UNROLL32_ALIGN;
193         /* I'm in odd chunk.  blast even chunks */
194         for (ws = 0; ws < ws_end; ws += ws_inc)
195                 for (addr = start; addr < end; addr += 0x400 * 2)
196                         cache32_unroll32(addr|ws, Index_Invalidate_I);
197 }
198
199 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
200 {
201         unsigned long flags;
202
203         local_irq_save(flags);
204         blast_icache32_page_indexed(page);
205         local_irq_restore(flags);
206 }
207
208 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
209 {
210         unsigned long indexmask = current_cpu_data.icache.waysize - 1;
211         unsigned long start = INDEX_BASE + (page & indexmask);
212         unsigned long end = start + PAGE_SIZE;
213         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
214         unsigned long ws_end = current_cpu_data.icache.ways <<
215                                current_cpu_data.icache.waybit;
216         unsigned long ws, addr;
217
218         CACHE32_UNROLL32_ALIGN2;
219         /* I'm in even chunk.  blast odd chunks */
220         for (ws = 0; ws < ws_end; ws += ws_inc)
221                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
222                         cache32_unroll32(addr|ws, Index_Invalidate_I);
223         CACHE32_UNROLL32_ALIGN;
224         /* I'm in odd chunk.  blast even chunks */
225         for (ws = 0; ws < ws_end; ws += ws_inc)
226                 for (addr = start; addr < end; addr += 0x400 * 2)
227                         cache32_unroll32(addr|ws, Index_Invalidate_I);
228 }
229
230 static void (* r4k_blast_icache_page)(unsigned long addr);
231
232 static void r4k_blast_icache_page_setup(void)
233 {
234         unsigned long ic_lsize = cpu_icache_line_size();
235
236         if (ic_lsize == 0)
237                 r4k_blast_icache_page = (void *)cache_noop;
238         else if (ic_lsize == 16)
239                 r4k_blast_icache_page = blast_icache16_page;
240         else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
241                 r4k_blast_icache_page = loongson2_blast_icache32_page;
242         else if (ic_lsize == 32)
243                 r4k_blast_icache_page = blast_icache32_page;
244         else if (ic_lsize == 64)
245                 r4k_blast_icache_page = blast_icache64_page;
246 }
247
248
249 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
250
251 static void r4k_blast_icache_page_indexed_setup(void)
252 {
253         unsigned long ic_lsize = cpu_icache_line_size();
254
255         if (ic_lsize == 0)
256                 r4k_blast_icache_page_indexed = (void *)cache_noop;
257         else if (ic_lsize == 16)
258                 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
259         else if (ic_lsize == 32) {
260                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
261                         r4k_blast_icache_page_indexed =
262                                 blast_icache32_r4600_v1_page_indexed;
263                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
264                         r4k_blast_icache_page_indexed =
265                                 tx49_blast_icache32_page_indexed;
266                 else if (current_cpu_type() == CPU_LOONGSON2)
267                         r4k_blast_icache_page_indexed =
268                                 loongson2_blast_icache32_page_indexed;
269                 else
270                         r4k_blast_icache_page_indexed =
271                                 blast_icache32_page_indexed;
272         } else if (ic_lsize == 64)
273                 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
274 }
275
276 void (* r4k_blast_icache)(void);
277 EXPORT_SYMBOL(r4k_blast_icache);
278
279 static void r4k_blast_icache_setup(void)
280 {
281         unsigned long ic_lsize = cpu_icache_line_size();
282
283         if (ic_lsize == 0)
284                 r4k_blast_icache = (void *)cache_noop;
285         else if (ic_lsize == 16)
286                 r4k_blast_icache = blast_icache16;
287         else if (ic_lsize == 32) {
288                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
289                         r4k_blast_icache = blast_r4600_v1_icache32;
290                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
291                         r4k_blast_icache = tx49_blast_icache32;
292                 else if (current_cpu_type() == CPU_LOONGSON2)
293                         r4k_blast_icache = loongson2_blast_icache32;
294                 else
295                         r4k_blast_icache = blast_icache32;
296         } else if (ic_lsize == 64)
297                 r4k_blast_icache = blast_icache64;
298 }
299
300 static void (* r4k_blast_scache_page)(unsigned long addr);
301
302 static void r4k_blast_scache_page_setup(void)
303 {
304         unsigned long sc_lsize = cpu_scache_line_size();
305
306         if (scache_size == 0)
307                 r4k_blast_scache_page = (void *)cache_noop;
308         else if (sc_lsize == 16)
309                 r4k_blast_scache_page = blast_scache16_page;
310         else if (sc_lsize == 32)
311                 r4k_blast_scache_page = blast_scache32_page;
312         else if (sc_lsize == 64)
313                 r4k_blast_scache_page = blast_scache64_page;
314         else if (sc_lsize == 128)
315                 r4k_blast_scache_page = blast_scache128_page;
316 }
317
318 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
319
320 static void r4k_blast_scache_page_indexed_setup(void)
321 {
322         unsigned long sc_lsize = cpu_scache_line_size();
323
324         if (scache_size == 0)
325                 r4k_blast_scache_page_indexed = (void *)cache_noop;
326         else if (sc_lsize == 16)
327                 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
328         else if (sc_lsize == 32)
329                 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
330         else if (sc_lsize == 64)
331                 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
332         else if (sc_lsize == 128)
333                 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
334 }
335
336 static void (* r4k_blast_scache)(void);
337
338 static void r4k_blast_scache_setup(void)
339 {
340         unsigned long sc_lsize = cpu_scache_line_size();
341
342         if (scache_size == 0)
343                 r4k_blast_scache = (void *)cache_noop;
344         else if (sc_lsize == 16)
345                 r4k_blast_scache = blast_scache16;
346         else if (sc_lsize == 32)
347                 r4k_blast_scache = blast_scache32;
348         else if (sc_lsize == 64)
349                 r4k_blast_scache = blast_scache64;
350         else if (sc_lsize == 128)
351                 r4k_blast_scache = blast_scache128;
352 }
353
354 static inline void local_r4k___flush_cache_all(void * args)
355 {
356         switch (current_cpu_type()) {
357         case CPU_LOONGSON2:
358         case CPU_R4000SC:
359         case CPU_R4000MC:
360         case CPU_R4400SC:
361         case CPU_R4400MC:
362         case CPU_R10000:
363         case CPU_R12000:
364         case CPU_R14000:
365                 /*
366                  * These caches are inclusive caches, that is, if something
367                  * is not cached in the S-cache, we know it also won't be
368                  * in one of the primary caches.
369                  */
370                 r4k_blast_scache();
371                 break;
372
373         default:
374                 r4k_blast_dcache();
375                 r4k_blast_icache();
376                 break;
377         }
378 }
379
380 static void r4k___flush_cache_all(void)
381 {
382         r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
383 }
384
385 static inline int has_valid_asid(const struct mm_struct *mm)
386 {
387 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
388         int i;
389
390         for_each_online_cpu(i)
391                 if (cpu_context(i, mm))
392                         return 1;
393
394         return 0;
395 #else
396         return cpu_context(smp_processor_id(), mm);
397 #endif
398 }
399
400 static void r4k__flush_cache_vmap(void)
401 {
402         r4k_blast_dcache();
403 }
404
405 static void r4k__flush_cache_vunmap(void)
406 {
407         r4k_blast_dcache();
408 }
409
410 static inline void local_r4k_flush_cache_range(void * args)
411 {
412         struct vm_area_struct *vma = args;
413         int exec = vma->vm_flags & VM_EXEC;
414
415         if (!(has_valid_asid(vma->vm_mm)))
416                 return;
417
418         r4k_blast_dcache();
419         if (exec)
420                 r4k_blast_icache();
421 }
422
423 static void r4k_flush_cache_range(struct vm_area_struct *vma,
424         unsigned long start, unsigned long end)
425 {
426         int exec = vma->vm_flags & VM_EXEC;
427
428         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
429                 r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
430 }
431
432 static inline void local_r4k_flush_cache_mm(void * args)
433 {
434         struct mm_struct *mm = args;
435
436         if (!has_valid_asid(mm))
437                 return;
438
439         /*
440          * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
441          * only flush the primary caches but R10000 and R12000 behave sane ...
442          * R4000SC and R4400SC indexed S-cache ops also invalidate primary
443          * caches, so we can bail out early.
444          */
445         if (current_cpu_type() == CPU_R4000SC ||
446             current_cpu_type() == CPU_R4000MC ||
447             current_cpu_type() == CPU_R4400SC ||
448             current_cpu_type() == CPU_R4400MC) {
449                 r4k_blast_scache();
450                 return;
451         }
452
453         r4k_blast_dcache();
454 }
455
456 static void r4k_flush_cache_mm(struct mm_struct *mm)
457 {
458         if (!cpu_has_dc_aliases)
459                 return;
460
461         r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
462 }
463
464 struct flush_cache_page_args {
465         struct vm_area_struct *vma;
466         unsigned long addr;
467         unsigned long pfn;
468 };
469
470 static inline void local_r4k_flush_cache_page(void *args)
471 {
472         struct flush_cache_page_args *fcp_args = args;
473         struct vm_area_struct *vma = fcp_args->vma;
474         unsigned long addr = fcp_args->addr;
475         struct page *page = pfn_to_page(fcp_args->pfn);
476         int exec = vma->vm_flags & VM_EXEC;
477         struct mm_struct *mm = vma->vm_mm;
478         int map_coherent = 0;
479         pgd_t *pgdp;
480         pud_t *pudp;
481         pmd_t *pmdp;
482         pte_t *ptep;
483         void *vaddr;
484
485         /*
486          * If ownes no valid ASID yet, cannot possibly have gotten
487          * this page into the cache.
488          */
489         if (!has_valid_asid(mm))
490                 return;
491
492         addr &= PAGE_MASK;
493         pgdp = pgd_offset(mm, addr);
494         pudp = pud_offset(pgdp, addr);
495         pmdp = pmd_offset(pudp, addr);
496         ptep = pte_offset(pmdp, addr);
497
498         /*
499          * If the page isn't marked valid, the page cannot possibly be
500          * in the cache.
501          */
502         if (!(pte_present(*ptep)))
503                 return;
504
505         if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
506                 vaddr = NULL;
507         else {
508                 /*
509                  * Use kmap_coherent or kmap_atomic to do flushes for
510                  * another ASID than the current one.
511                  */
512                 map_coherent = (cpu_has_dc_aliases &&
513                                 page_mapped(page) && !Page_dcache_dirty(page));
514                 if (map_coherent)
515                         vaddr = kmap_coherent(page, addr);
516                 else
517                         vaddr = kmap_atomic(page);
518                 addr = (unsigned long)vaddr;
519         }
520
521         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
522                 r4k_blast_dcache_page(addr);
523                 if (exec && !cpu_icache_snoops_remote_store)
524                         r4k_blast_scache_page(addr);
525         }
526         if (exec) {
527                 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
528                         int cpu = smp_processor_id();
529
530                         if (cpu_context(cpu, mm) != 0)
531                                 drop_mmu_context(mm, cpu);
532                 } else
533                         r4k_blast_icache_page(addr);
534         }
535
536         if (vaddr) {
537                 if (map_coherent)
538                         kunmap_coherent();
539                 else
540                         kunmap_atomic(vaddr);
541         }
542 }
543
544 static void r4k_flush_cache_page(struct vm_area_struct *vma,
545         unsigned long addr, unsigned long pfn)
546 {
547         struct flush_cache_page_args args;
548
549         args.vma = vma;
550         args.addr = addr;
551         args.pfn = pfn;
552
553         r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
554 }
555
556 static inline void local_r4k_flush_data_cache_page(void * addr)
557 {
558         r4k_blast_dcache_page((unsigned long) addr);
559 }
560
561 static void r4k_flush_data_cache_page(unsigned long addr)
562 {
563         if (in_atomic())
564                 local_r4k_flush_data_cache_page((void *)addr);
565         else
566                 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
567 }
568
569 struct flush_icache_range_args {
570         unsigned long start;
571         unsigned long end;
572 };
573
574 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
575 {
576         if (!cpu_has_ic_fills_f_dc) {
577                 if (end - start >= dcache_size) {
578                         r4k_blast_dcache();
579                 } else {
580                         R4600_HIT_CACHEOP_WAR_IMPL;
581                         protected_blast_dcache_range(start, end);
582                 }
583         }
584
585         if (end - start > icache_size)
586                 r4k_blast_icache();
587         else {
588                 switch (boot_cpu_type()) {
589                 case CPU_LOONGSON2:
590                         protected_loongson2_blast_icache_range(start, end);
591                         break;
592
593                 default:
594                         protected_blast_icache_range(start, end);
595                         break;
596                 }
597         }
598 }
599
600 static inline void local_r4k_flush_icache_range_ipi(void *args)
601 {
602         struct flush_icache_range_args *fir_args = args;
603         unsigned long start = fir_args->start;
604         unsigned long end = fir_args->end;
605
606         local_r4k_flush_icache_range(start, end);
607 }
608
609 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
610 {
611         struct flush_icache_range_args args;
612
613         args.start = start;
614         args.end = end;
615
616         r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
617         instruction_hazard();
618 }
619
620 #ifdef CONFIG_DMA_NONCOHERENT
621
622 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
623 {
624         /* Catch bad driver code */
625         BUG_ON(size == 0);
626
627         preempt_disable();
628         if (cpu_has_inclusive_pcaches) {
629                 if (size >= scache_size)
630                         r4k_blast_scache();
631                 else
632                         blast_scache_range(addr, addr + size);
633                 preempt_enable();
634                 __sync();
635                 return;
636         }
637
638         /*
639          * Either no secondary cache or the available caches don't have the
640          * subset property so we have to flush the primary caches
641          * explicitly
642          */
643         if (cpu_has_safe_index_cacheops && size >= dcache_size) {
644                 r4k_blast_dcache();
645         } else {
646                 R4600_HIT_CACHEOP_WAR_IMPL;
647                 blast_dcache_range(addr, addr + size);
648         }
649         preempt_enable();
650
651         bc_wback_inv(addr, size);
652         __sync();
653 }
654
655 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
656 {
657         /* Catch bad driver code */
658         BUG_ON(size == 0);
659
660         preempt_disable();
661         if (cpu_has_inclusive_pcaches) {
662                 if (size >= scache_size)
663                         r4k_blast_scache();
664                 else {
665                         /*
666                          * There is no clearly documented alignment requirement
667                          * for the cache instruction on MIPS processors and
668                          * some processors, among them the RM5200 and RM7000
669                          * QED processors will throw an address error for cache
670                          * hit ops with insufficient alignment.  Solved by
671                          * aligning the address to cache line size.
672                          */
673                         blast_inv_scache_range(addr, addr + size);
674                 }
675                 preempt_enable();
676                 __sync();
677                 return;
678         }
679
680         if (cpu_has_safe_index_cacheops && size >= dcache_size) {
681                 r4k_blast_dcache();
682         } else {
683                 R4600_HIT_CACHEOP_WAR_IMPL;
684                 blast_inv_dcache_range(addr, addr + size);
685         }
686         preempt_enable();
687
688         bc_inv(addr, size);
689         __sync();
690 }
691 #endif /* CONFIG_DMA_NONCOHERENT */
692
693 /*
694  * While we're protected against bad userland addresses we don't care
695  * very much about what happens in that case.  Usually a segmentation
696  * fault will dump the process later on anyway ...
697  */
698 static void local_r4k_flush_cache_sigtramp(void * arg)
699 {
700         unsigned long ic_lsize = cpu_icache_line_size();
701         unsigned long dc_lsize = cpu_dcache_line_size();
702         unsigned long sc_lsize = cpu_scache_line_size();
703         unsigned long addr = (unsigned long) arg;
704
705         R4600_HIT_CACHEOP_WAR_IMPL;
706         if (dc_lsize)
707                 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
708         if (!cpu_icache_snoops_remote_store && scache_size)
709                 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
710         if (ic_lsize)
711                 protected_flush_icache_line(addr & ~(ic_lsize - 1));
712         if (MIPS4K_ICACHE_REFILL_WAR) {
713                 __asm__ __volatile__ (
714                         ".set push\n\t"
715                         ".set noat\n\t"
716                         ".set mips3\n\t"
717 #ifdef CONFIG_32BIT
718                         "la     $at,1f\n\t"
719 #endif
720 #ifdef CONFIG_64BIT
721                         "dla    $at,1f\n\t"
722 #endif
723                         "cache  %0,($at)\n\t"
724                         "nop; nop; nop\n"
725                         "1:\n\t"
726                         ".set pop"
727                         :
728                         : "i" (Hit_Invalidate_I));
729         }
730         if (MIPS_CACHE_SYNC_WAR)
731                 __asm__ __volatile__ ("sync");
732 }
733
734 static void r4k_flush_cache_sigtramp(unsigned long addr)
735 {
736         r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
737 }
738
739 static void r4k_flush_icache_all(void)
740 {
741         if (cpu_has_vtag_icache)
742                 r4k_blast_icache();
743 }
744
745 struct flush_kernel_vmap_range_args {
746         unsigned long   vaddr;
747         int             size;
748 };
749
750 static inline void local_r4k_flush_kernel_vmap_range(void *args)
751 {
752         struct flush_kernel_vmap_range_args *vmra = args;
753         unsigned long vaddr = vmra->vaddr;
754         int size = vmra->size;
755
756         /*
757          * Aliases only affect the primary caches so don't bother with
758          * S-caches or T-caches.
759          */
760         if (cpu_has_safe_index_cacheops && size >= dcache_size)
761                 r4k_blast_dcache();
762         else {
763                 R4600_HIT_CACHEOP_WAR_IMPL;
764                 blast_dcache_range(vaddr, vaddr + size);
765         }
766 }
767
768 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
769 {
770         struct flush_kernel_vmap_range_args args;
771
772         args.vaddr = (unsigned long) vaddr;
773         args.size = size;
774
775         r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
776 }
777
778 static inline void rm7k_erratum31(void)
779 {
780         const unsigned long ic_lsize = 32;
781         unsigned long addr;
782
783         /* RM7000 erratum #31. The icache is screwed at startup. */
784         write_c0_taglo(0);
785         write_c0_taghi(0);
786
787         for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
788                 __asm__ __volatile__ (
789                         ".set push\n\t"
790                         ".set noreorder\n\t"
791                         ".set mips3\n\t"
792                         "cache\t%1, 0(%0)\n\t"
793                         "cache\t%1, 0x1000(%0)\n\t"
794                         "cache\t%1, 0x2000(%0)\n\t"
795                         "cache\t%1, 0x3000(%0)\n\t"
796                         "cache\t%2, 0(%0)\n\t"
797                         "cache\t%2, 0x1000(%0)\n\t"
798                         "cache\t%2, 0x2000(%0)\n\t"
799                         "cache\t%2, 0x3000(%0)\n\t"
800                         "cache\t%1, 0(%0)\n\t"
801                         "cache\t%1, 0x1000(%0)\n\t"
802                         "cache\t%1, 0x2000(%0)\n\t"
803                         "cache\t%1, 0x3000(%0)\n\t"
804                         ".set pop\n"
805                         :
806                         : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
807         }
808 }
809
810 static inline void alias_74k_erratum(struct cpuinfo_mips *c)
811 {
812         unsigned int imp = c->processor_id & PRID_IMP_MASK;
813         unsigned int rev = c->processor_id & PRID_REV_MASK;
814
815         /*
816          * Early versions of the 74K do not update the cache tags on a
817          * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
818          * aliases. In this case it is better to treat the cache as always
819          * having aliases.
820          */
821         switch (imp) {
822         case PRID_IMP_74K:
823                 if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
824                         c->dcache.flags |= MIPS_CACHE_VTAG;
825                 if (rev == PRID_REV_ENCODE_332(2, 4, 0))
826                         write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
827                 break;
828         case PRID_IMP_1074K:
829                 if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
830                         c->dcache.flags |= MIPS_CACHE_VTAG;
831                         write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
832                 }
833                 break;
834         default:
835                 BUG();
836         }
837 }
838
839 static char *way_string[] = { NULL, "direct mapped", "2-way",
840         "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
841 };
842
843 static void probe_pcache(void)
844 {
845         struct cpuinfo_mips *c = &current_cpu_data;
846         unsigned int config = read_c0_config();
847         unsigned int prid = read_c0_prid();
848         unsigned long config1;
849         unsigned int lsize;
850
851         switch (current_cpu_type()) {
852         case CPU_R4600:                 /* QED style two way caches? */
853         case CPU_R4700:
854         case CPU_R5000:
855         case CPU_NEVADA:
856                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
857                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
858                 c->icache.ways = 2;
859                 c->icache.waybit = __ffs(icache_size/2);
860
861                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
862                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
863                 c->dcache.ways = 2;
864                 c->dcache.waybit= __ffs(dcache_size/2);
865
866                 c->options |= MIPS_CPU_CACHE_CDEX_P;
867                 break;
868
869         case CPU_R5432:
870         case CPU_R5500:
871                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
872                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
873                 c->icache.ways = 2;
874                 c->icache.waybit= 0;
875
876                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
877                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
878                 c->dcache.ways = 2;
879                 c->dcache.waybit = 0;
880
881                 c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
882                 break;
883
884         case CPU_TX49XX:
885                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
886                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
887                 c->icache.ways = 4;
888                 c->icache.waybit= 0;
889
890                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
891                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
892                 c->dcache.ways = 4;
893                 c->dcache.waybit = 0;
894
895                 c->options |= MIPS_CPU_CACHE_CDEX_P;
896                 c->options |= MIPS_CPU_PREFETCH;
897                 break;
898
899         case CPU_R4000PC:
900         case CPU_R4000SC:
901         case CPU_R4000MC:
902         case CPU_R4400PC:
903         case CPU_R4400SC:
904         case CPU_R4400MC:
905         case CPU_R4300:
906                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
907                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
908                 c->icache.ways = 1;
909                 c->icache.waybit = 0;   /* doesn't matter */
910
911                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
912                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
913                 c->dcache.ways = 1;
914                 c->dcache.waybit = 0;   /* does not matter */
915
916                 c->options |= MIPS_CPU_CACHE_CDEX_P;
917                 break;
918
919         case CPU_R10000:
920         case CPU_R12000:
921         case CPU_R14000:
922                 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
923                 c->icache.linesz = 64;
924                 c->icache.ways = 2;
925                 c->icache.waybit = 0;
926
927                 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
928                 c->dcache.linesz = 32;
929                 c->dcache.ways = 2;
930                 c->dcache.waybit = 0;
931
932                 c->options |= MIPS_CPU_PREFETCH;
933                 break;
934
935         case CPU_VR4133:
936                 write_c0_config(config & ~VR41_CONF_P4K);
937         case CPU_VR4131:
938                 /* Workaround for cache instruction bug of VR4131 */
939                 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
940                     c->processor_id == 0x0c82U) {
941                         config |= 0x00400000U;
942                         if (c->processor_id == 0x0c80U)
943                                 config |= VR41_CONF_BP;
944                         write_c0_config(config);
945                 } else
946                         c->options |= MIPS_CPU_CACHE_CDEX_P;
947
948                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
949                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
950                 c->icache.ways = 2;
951                 c->icache.waybit = __ffs(icache_size/2);
952
953                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
954                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
955                 c->dcache.ways = 2;
956                 c->dcache.waybit = __ffs(dcache_size/2);
957                 break;
958
959         case CPU_VR41XX:
960         case CPU_VR4111:
961         case CPU_VR4121:
962         case CPU_VR4122:
963         case CPU_VR4181:
964         case CPU_VR4181A:
965                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
966                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
967                 c->icache.ways = 1;
968                 c->icache.waybit = 0;   /* doesn't matter */
969
970                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
971                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
972                 c->dcache.ways = 1;
973                 c->dcache.waybit = 0;   /* does not matter */
974
975                 c->options |= MIPS_CPU_CACHE_CDEX_P;
976                 break;
977
978         case CPU_RM7000:
979                 rm7k_erratum31();
980
981                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
982                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
983                 c->icache.ways = 4;
984                 c->icache.waybit = __ffs(icache_size / c->icache.ways);
985
986                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
987                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
988                 c->dcache.ways = 4;
989                 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
990
991                 c->options |= MIPS_CPU_CACHE_CDEX_P;
992                 c->options |= MIPS_CPU_PREFETCH;
993                 break;
994
995         case CPU_LOONGSON2:
996                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
997                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
998                 if (prid & 0x3)
999                         c->icache.ways = 4;
1000                 else
1001                         c->icache.ways = 2;
1002                 c->icache.waybit = 0;
1003
1004                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1005                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1006                 if (prid & 0x3)
1007                         c->dcache.ways = 4;
1008                 else
1009                         c->dcache.ways = 2;
1010                 c->dcache.waybit = 0;
1011                 break;
1012
1013         default:
1014                 if (!(config & MIPS_CONF_M))
1015                         panic("Don't know how to probe P-caches on this cpu.");
1016
1017                 /*
1018                  * So we seem to be a MIPS32 or MIPS64 CPU
1019                  * So let's probe the I-cache ...
1020                  */
1021                 config1 = read_c0_config1();
1022
1023                 lsize = (config1 >> 19) & 7;
1024
1025                 /* IL == 7 is reserved */
1026                 if (lsize == 7)
1027                         panic("Invalid icache line size");
1028
1029                 c->icache.linesz = lsize ? 2 << lsize : 0;
1030
1031                 c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1032                 c->icache.ways = 1 + ((config1 >> 16) & 7);
1033
1034                 icache_size = c->icache.sets *
1035                               c->icache.ways *
1036                               c->icache.linesz;
1037                 c->icache.waybit = __ffs(icache_size/c->icache.ways);
1038
1039                 if (config & 0x8)               /* VI bit */
1040                         c->icache.flags |= MIPS_CACHE_VTAG;
1041
1042                 /*
1043                  * Now probe the MIPS32 / MIPS64 data cache.
1044                  */
1045                 c->dcache.flags = 0;
1046
1047                 lsize = (config1 >> 10) & 7;
1048
1049                 /* DL == 7 is reserved */
1050                 if (lsize == 7)
1051                         panic("Invalid dcache line size");
1052
1053                 c->dcache.linesz = lsize ? 2 << lsize : 0;
1054
1055                 c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1056                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
1057
1058                 dcache_size = c->dcache.sets *
1059                               c->dcache.ways *
1060                               c->dcache.linesz;
1061                 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1062
1063                 c->options |= MIPS_CPU_PREFETCH;
1064                 break;
1065         }
1066
1067         /*
1068          * Processor configuration sanity check for the R4000SC erratum
1069          * #5.  With page sizes larger than 32kB there is no possibility
1070          * to get a VCE exception anymore so we don't care about this
1071          * misconfiguration.  The case is rather theoretical anyway;
1072          * presumably no vendor is shipping his hardware in the "bad"
1073          * configuration.
1074          */
1075         if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1076             (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1077             !(config & CONF_SC) && c->icache.linesz != 16 &&
1078             PAGE_SIZE <= 0x8000)
1079                 panic("Improper R4000SC processor configuration detected");
1080
1081         /* compute a couple of other cache variables */
1082         c->icache.waysize = icache_size / c->icache.ways;
1083         c->dcache.waysize = dcache_size / c->dcache.ways;
1084
1085         c->icache.sets = c->icache.linesz ?
1086                 icache_size / (c->icache.linesz * c->icache.ways) : 0;
1087         c->dcache.sets = c->dcache.linesz ?
1088                 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1089
1090         /*
1091          * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
1092          * 2-way virtually indexed so normally would suffer from aliases.  So
1093          * normally they'd suffer from aliases but magic in the hardware deals
1094          * with that for us so we don't need to take care ourselves.
1095          */
1096         switch (current_cpu_type()) {
1097         case CPU_20KC:
1098         case CPU_25KF:
1099         case CPU_SB1:
1100         case CPU_SB1A:
1101         case CPU_XLR:
1102                 c->dcache.flags |= MIPS_CACHE_PINDEX;
1103                 break;
1104
1105         case CPU_R10000:
1106         case CPU_R12000:
1107         case CPU_R14000:
1108                 break;
1109
1110         case CPU_M14KC:
1111         case CPU_M14KEC:
1112         case CPU_24K:
1113         case CPU_34K:
1114         case CPU_74K:
1115         case CPU_1004K:
1116         case CPU_1074K:
1117         case CPU_INTERAPTIV:
1118         case CPU_PROAPTIV:
1119                 if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K))
1120                         alias_74k_erratum(c);
1121                 if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1122                     (c->icache.waysize > PAGE_SIZE))
1123                         c->icache.flags |= MIPS_CACHE_ALIASES;
1124                 if (read_c0_config7() & MIPS_CONF7_AR) {
1125                         /*
1126                          * Effectively physically indexed dcache,
1127                          * thus no virtual aliases.
1128                         */
1129                         c->dcache.flags |= MIPS_CACHE_PINDEX;
1130                         break;
1131                 }
1132         default:
1133                 if (c->dcache.waysize > PAGE_SIZE)
1134                         c->dcache.flags |= MIPS_CACHE_ALIASES;
1135         }
1136
1137         switch (current_cpu_type()) {
1138         case CPU_20KC:
1139                 /*
1140                  * Some older 20Kc chips doesn't have the 'VI' bit in
1141                  * the config register.
1142                  */
1143                 c->icache.flags |= MIPS_CACHE_VTAG;
1144                 break;
1145
1146         case CPU_ALCHEMY:
1147                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1148                 break;
1149
1150         case CPU_LOONGSON2:
1151                 /*
1152                  * LOONGSON2 has 4 way icache, but when using indexed cache op,
1153                  * one op will act on all 4 ways
1154                  */
1155                 c->icache.ways = 1;
1156         }
1157
1158         printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1159                icache_size >> 10,
1160                c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1161                way_string[c->icache.ways], c->icache.linesz);
1162
1163         printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1164                dcache_size >> 10, way_string[c->dcache.ways],
1165                (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1166                (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1167                         "cache aliases" : "no aliases",
1168                c->dcache.linesz);
1169 }
1170
1171 /*
1172  * If you even _breathe_ on this function, look at the gcc output and make sure
1173  * it does not pop things on and off the stack for the cache sizing loop that
1174  * executes in KSEG1 space or else you will crash and burn badly.  You have
1175  * been warned.
1176  */
1177 static int probe_scache(void)
1178 {
1179         unsigned long flags, addr, begin, end, pow2;
1180         unsigned int config = read_c0_config();
1181         struct cpuinfo_mips *c = &current_cpu_data;
1182
1183         if (config & CONF_SC)
1184                 return 0;
1185
1186         begin = (unsigned long) &_stext;
1187         begin &= ~((4 * 1024 * 1024) - 1);
1188         end = begin + (4 * 1024 * 1024);
1189
1190         /*
1191          * This is such a bitch, you'd think they would make it easy to do
1192          * this.  Away you daemons of stupidity!
1193          */
1194         local_irq_save(flags);
1195
1196         /* Fill each size-multiple cache line with a valid tag. */
1197         pow2 = (64 * 1024);
1198         for (addr = begin; addr < end; addr = (begin + pow2)) {
1199                 unsigned long *p = (unsigned long *) addr;
1200                 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1201                 pow2 <<= 1;
1202         }
1203
1204         /* Load first line with zero (therefore invalid) tag. */
1205         write_c0_taglo(0);
1206         write_c0_taghi(0);
1207         __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1208         cache_op(Index_Store_Tag_I, begin);
1209         cache_op(Index_Store_Tag_D, begin);
1210         cache_op(Index_Store_Tag_SD, begin);
1211
1212         /* Now search for the wrap around point. */
1213         pow2 = (128 * 1024);
1214         for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1215                 cache_op(Index_Load_Tag_SD, addr);
1216                 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1217                 if (!read_c0_taglo())
1218                         break;
1219                 pow2 <<= 1;
1220         }
1221         local_irq_restore(flags);
1222         addr -= begin;
1223
1224         scache_size = addr;
1225         c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1226         c->scache.ways = 1;
1227         c->dcache.waybit = 0;           /* does not matter */
1228
1229         return 1;
1230 }
1231
1232 static void __init loongson2_sc_init(void)
1233 {
1234         struct cpuinfo_mips *c = &current_cpu_data;
1235
1236         scache_size = 512*1024;
1237         c->scache.linesz = 32;
1238         c->scache.ways = 4;
1239         c->scache.waybit = 0;
1240         c->scache.waysize = scache_size / (c->scache.ways);
1241         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1242         pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1243                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1244
1245         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1246 }
1247
1248 extern int r5k_sc_init(void);
1249 extern int rm7k_sc_init(void);
1250 extern int mips_sc_init(void);
1251
1252 static void setup_scache(void)
1253 {
1254         struct cpuinfo_mips *c = &current_cpu_data;
1255         unsigned int config = read_c0_config();
1256         int sc_present = 0;
1257
1258         /*
1259          * Do the probing thing on R4000SC and R4400SC processors.  Other
1260          * processors don't have a S-cache that would be relevant to the
1261          * Linux memory management.
1262          */
1263         switch (current_cpu_type()) {
1264         case CPU_R4000SC:
1265         case CPU_R4000MC:
1266         case CPU_R4400SC:
1267         case CPU_R4400MC:
1268                 sc_present = run_uncached(probe_scache);
1269                 if (sc_present)
1270                         c->options |= MIPS_CPU_CACHE_CDEX_S;
1271                 break;
1272
1273         case CPU_R10000:
1274         case CPU_R12000:
1275         case CPU_R14000:
1276                 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1277                 c->scache.linesz = 64 << ((config >> 13) & 1);
1278                 c->scache.ways = 2;
1279                 c->scache.waybit= 0;
1280                 sc_present = 1;
1281                 break;
1282
1283         case CPU_R5000:
1284         case CPU_NEVADA:
1285 #ifdef CONFIG_R5000_CPU_SCACHE
1286                 r5k_sc_init();
1287 #endif
1288                 return;
1289
1290         case CPU_RM7000:
1291 #ifdef CONFIG_RM7000_CPU_SCACHE
1292                 rm7k_sc_init();
1293 #endif
1294                 return;
1295
1296         case CPU_LOONGSON2:
1297                 loongson2_sc_init();
1298                 return;
1299
1300         case CPU_XLP:
1301                 /* don't need to worry about L2, fully coherent */
1302                 return;
1303
1304         default:
1305                 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1306                                     MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1307 #ifdef CONFIG_MIPS_CPU_SCACHE
1308                         if (mips_sc_init ()) {
1309                                 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1310                                 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1311                                        scache_size >> 10,
1312                                        way_string[c->scache.ways], c->scache.linesz);
1313                         }
1314 #else
1315                         if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1316                                 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1317 #endif
1318                         return;
1319                 }
1320                 sc_present = 0;
1321         }
1322
1323         if (!sc_present)
1324                 return;
1325
1326         /* compute a couple of other cache variables */
1327         c->scache.waysize = scache_size / c->scache.ways;
1328
1329         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1330
1331         printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1332                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1333
1334         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1335 }
1336
1337 void au1x00_fixup_config_od(void)
1338 {
1339         /*
1340          * c0_config.od (bit 19) was write only (and read as 0)
1341          * on the early revisions of Alchemy SOCs.  It disables the bus
1342          * transaction overlapping and needs to be set to fix various errata.
1343          */
1344         switch (read_c0_prid()) {
1345         case 0x00030100: /* Au1000 DA */
1346         case 0x00030201: /* Au1000 HA */
1347         case 0x00030202: /* Au1000 HB */
1348         case 0x01030200: /* Au1500 AB */
1349         /*
1350          * Au1100 errata actually keeps silence about this bit, so we set it
1351          * just in case for those revisions that require it to be set according
1352          * to the (now gone) cpu table.
1353          */
1354         case 0x02030200: /* Au1100 AB */
1355         case 0x02030201: /* Au1100 BA */
1356         case 0x02030202: /* Au1100 BC */
1357                 set_c0_config(1 << 19);
1358                 break;
1359         }
1360 }
1361
1362 /* CP0 hazard avoidance. */
1363 #define NXP_BARRIER()                                                   \
1364          __asm__ __volatile__(                                          \
1365         ".set noreorder\n\t"                                            \
1366         "nop; nop; nop; nop; nop; nop;\n\t"                             \
1367         ".set reorder\n\t")
1368
1369 static void nxp_pr4450_fixup_config(void)
1370 {
1371         unsigned long config0;
1372
1373         config0 = read_c0_config();
1374
1375         /* clear all three cache coherency fields */
1376         config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1377         config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1378                     ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1379                     ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1380         write_c0_config(config0);
1381         NXP_BARRIER();
1382 }
1383
1384 static int cca = -1;
1385
1386 static int __init cca_setup(char *str)
1387 {
1388         get_option(&str, &cca);
1389
1390         return 0;
1391 }
1392
1393 early_param("cca", cca_setup);
1394
1395 static void coherency_setup(void)
1396 {
1397         if (cca < 0 || cca > 7)
1398                 cca = read_c0_config() & CONF_CM_CMASK;
1399         _page_cachable_default = cca << _CACHE_SHIFT;
1400
1401         pr_debug("Using cache attribute %d\n", cca);
1402         change_c0_config(CONF_CM_CMASK, cca);
1403
1404         /*
1405          * c0_status.cu=0 specifies that updates by the sc instruction use
1406          * the coherency mode specified by the TLB; 1 means cachable
1407          * coherent update on write will be used.  Not all processors have
1408          * this bit and; some wire it to zero, others like Toshiba had the
1409          * silly idea of putting something else there ...
1410          */
1411         switch (current_cpu_type()) {
1412         case CPU_R4000PC:
1413         case CPU_R4000SC:
1414         case CPU_R4000MC:
1415         case CPU_R4400PC:
1416         case CPU_R4400SC:
1417         case CPU_R4400MC:
1418                 clear_c0_config(CONF_CU);
1419                 break;
1420         /*
1421          * We need to catch the early Alchemy SOCs with
1422          * the write-only co_config.od bit and set it back to one on:
1423          * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1424          */
1425         case CPU_ALCHEMY:
1426                 au1x00_fixup_config_od();
1427                 break;
1428
1429         case PRID_IMP_PR4450:
1430                 nxp_pr4450_fixup_config();
1431                 break;
1432         }
1433 }
1434
1435 static void r4k_cache_error_setup(void)
1436 {
1437         extern char __weak except_vec2_generic;
1438         extern char __weak except_vec2_sb1;
1439
1440         switch (current_cpu_type()) {
1441         case CPU_SB1:
1442         case CPU_SB1A:
1443                 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1444                 break;
1445
1446         default:
1447                 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1448                 break;
1449         }
1450 }
1451
1452 void r4k_cache_init(void)
1453 {
1454         extern void build_clear_page(void);
1455         extern void build_copy_page(void);
1456         struct cpuinfo_mips *c = &current_cpu_data;
1457
1458         probe_pcache();
1459         setup_scache();
1460
1461         r4k_blast_dcache_page_setup();
1462         r4k_blast_dcache_page_indexed_setup();
1463         r4k_blast_dcache_setup();
1464         r4k_blast_icache_page_setup();
1465         r4k_blast_icache_page_indexed_setup();
1466         r4k_blast_icache_setup();
1467         r4k_blast_scache_page_setup();
1468         r4k_blast_scache_page_indexed_setup();
1469         r4k_blast_scache_setup();
1470
1471         /*
1472          * Some MIPS32 and MIPS64 processors have physically indexed caches.
1473          * This code supports virtually indexed processors and will be
1474          * unnecessarily inefficient on physically indexed processors.
1475          */
1476         if (c->dcache.linesz)
1477                 shm_align_mask = max_t( unsigned long,
1478                                         c->dcache.sets * c->dcache.linesz - 1,
1479                                         PAGE_SIZE - 1);
1480         else
1481                 shm_align_mask = PAGE_SIZE-1;
1482
1483         __flush_cache_vmap      = r4k__flush_cache_vmap;
1484         __flush_cache_vunmap    = r4k__flush_cache_vunmap;
1485
1486         flush_cache_all         = cache_noop;
1487         __flush_cache_all       = r4k___flush_cache_all;
1488         flush_cache_mm          = r4k_flush_cache_mm;
1489         flush_cache_page        = r4k_flush_cache_page;
1490         flush_cache_range       = r4k_flush_cache_range;
1491
1492         __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1493
1494         flush_cache_sigtramp    = r4k_flush_cache_sigtramp;
1495         flush_icache_all        = r4k_flush_icache_all;
1496         local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
1497         flush_data_cache_page   = r4k_flush_data_cache_page;
1498         flush_icache_range      = r4k_flush_icache_range;
1499         local_flush_icache_range        = local_r4k_flush_icache_range;
1500
1501 #if defined(CONFIG_DMA_NONCOHERENT)
1502         if (coherentio) {
1503                 _dma_cache_wback_inv    = (void *)cache_noop;
1504                 _dma_cache_wback        = (void *)cache_noop;
1505                 _dma_cache_inv          = (void *)cache_noop;
1506         } else {
1507                 _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1508                 _dma_cache_wback        = r4k_dma_cache_wback_inv;
1509                 _dma_cache_inv          = r4k_dma_cache_inv;
1510         }
1511 #endif
1512
1513         build_clear_page();
1514         build_copy_page();
1515
1516         /*
1517          * We want to run CMP kernels on core with and without coherent
1518          * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1519          * or not to flush caches.
1520          */
1521         local_r4k___flush_cache_all(NULL);
1522
1523         coherency_setup();
1524         board_cache_error_setup = r4k_cache_error_setup;
1525 }