Merge branch 'gup_flag-cleanups'
[cascardo/linux.git] / arch / mips / mm / page.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007  Maciej W. Rozycki
8  * Copyright (C) 2008  Thiemo Seufer
9  * Copyright (C) 2012  MIPS Technologies, Inc.
10  */
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/proc_fs.h>
16
17 #include <asm/bugs.h>
18 #include <asm/cacheops.h>
19 #include <asm/cpu-type.h>
20 #include <asm/inst.h>
21 #include <asm/io.h>
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/prefetch.h>
25 #include <asm/bootinfo.h>
26 #include <asm/mipsregs.h>
27 #include <asm/mmu_context.h>
28 #include <asm/cpu.h>
29 #include <asm/war.h>
30
31 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
32 #include <asm/sibyte/sb1250.h>
33 #include <asm/sibyte/sb1250_regs.h>
34 #include <asm/sibyte/sb1250_dma.h>
35 #endif
36
37 #include <asm/uasm.h>
38
39 /* Registers used in the assembled routines. */
40 #define ZERO 0
41 #define AT 2
42 #define A0 4
43 #define A1 5
44 #define A2 6
45 #define T0 8
46 #define T1 9
47 #define T2 10
48 #define T3 11
49 #define T9 25
50 #define RA 31
51
52 /* Handle labels (which must be positive integers). */
53 enum label_id {
54         label_clear_nopref = 1,
55         label_clear_pref,
56         label_copy_nopref,
57         label_copy_pref_both,
58         label_copy_pref_store,
59 };
60
61 UASM_L_LA(_clear_nopref)
62 UASM_L_LA(_clear_pref)
63 UASM_L_LA(_copy_nopref)
64 UASM_L_LA(_copy_pref_both)
65 UASM_L_LA(_copy_pref_store)
66
67 /* We need one branch and therefore one relocation per target label. */
68 static struct uasm_label labels[5];
69 static struct uasm_reloc relocs[5];
70
71 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
72 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
73
74 /*
75  * R6 has a limited offset of the pref instruction.
76  * Skip it if the offset is more than 9 bits.
77  */
78 #define _uasm_i_pref(a, b, c, d)                \
79 do {                                            \
80         if (cpu_has_mips_r6) {                  \
81                 if (c <= 0xff && c >= -0x100)   \
82                         uasm_i_pref(a, b, c, d);\
83         } else {                                \
84                 uasm_i_pref(a, b, c, d);        \
85         }                                       \
86 } while(0)
87
88 static int pref_bias_clear_store;
89 static int pref_bias_copy_load;
90 static int pref_bias_copy_store;
91
92 static u32 pref_src_mode;
93 static u32 pref_dst_mode;
94
95 static int clear_word_size;
96 static int copy_word_size;
97
98 static int half_clear_loop_size;
99 static int half_copy_loop_size;
100
101 static int cache_line_size;
102 #define cache_line_mask() (cache_line_size - 1)
103
104 static inline void
105 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
106 {
107         if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
108                 if (off > 0x7fff) {
109                         uasm_i_lui(buf, T9, uasm_rel_hi(off));
110                         uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
111                 } else
112                         uasm_i_addiu(buf, T9, ZERO, off);
113                 uasm_i_daddu(buf, reg1, reg2, T9);
114         } else {
115                 if (off > 0x7fff) {
116                         uasm_i_lui(buf, T9, uasm_rel_hi(off));
117                         uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
118                         UASM_i_ADDU(buf, reg1, reg2, T9);
119                 } else
120                         UASM_i_ADDIU(buf, reg1, reg2, off);
121         }
122 }
123
124 static void set_prefetch_parameters(void)
125 {
126         if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
127                 clear_word_size = 8;
128         else
129                 clear_word_size = 4;
130
131         if (cpu_has_64bit_gp_regs)
132                 copy_word_size = 8;
133         else
134                 copy_word_size = 4;
135
136         /*
137          * The pref's used here are using "streaming" hints, which cause the
138          * copied data to be kicked out of the cache sooner.  A page copy often
139          * ends up copying a lot more data than is commonly used, so this seems
140          * to make sense in terms of reducing cache pollution, but I've no real
141          * performance data to back this up.
142          */
143         if (cpu_has_prefetch) {
144                 /*
145                  * XXX: Most prefetch bias values in here are based on
146                  * guesswork.
147                  */
148                 cache_line_size = cpu_dcache_line_size();
149                 switch (current_cpu_type()) {
150                 case CPU_R5500:
151                 case CPU_TX49XX:
152                         /* These processors only support the Pref_Load. */
153                         pref_bias_copy_load = 256;
154                         break;
155
156                 case CPU_R10000:
157                 case CPU_R12000:
158                 case CPU_R14000:
159                 case CPU_R16000:
160                         /*
161                          * Those values have been experimentally tuned for an
162                          * Origin 200.
163                          */
164                         pref_bias_clear_store = 512;
165                         pref_bias_copy_load = 256;
166                         pref_bias_copy_store = 256;
167                         pref_src_mode = Pref_LoadStreamed;
168                         pref_dst_mode = Pref_StoreStreamed;
169                         break;
170
171                 case CPU_SB1:
172                 case CPU_SB1A:
173                         pref_bias_clear_store = 128;
174                         pref_bias_copy_load = 128;
175                         pref_bias_copy_store = 128;
176                         /*
177                          * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
178                          * hints are broken.
179                          */
180                         if (current_cpu_type() == CPU_SB1 &&
181                             (current_cpu_data.processor_id & 0xff) < 0x02) {
182                                 pref_src_mode = Pref_Load;
183                                 pref_dst_mode = Pref_Store;
184                         } else {
185                                 pref_src_mode = Pref_LoadStreamed;
186                                 pref_dst_mode = Pref_StoreStreamed;
187                         }
188                         break;
189
190                 case CPU_LOONGSON3:
191                         /* Loongson-3 only support the Pref_Load/Pref_Store. */
192                         pref_bias_clear_store = 128;
193                         pref_bias_copy_load = 128;
194                         pref_bias_copy_store = 128;
195                         pref_src_mode = Pref_Load;
196                         pref_dst_mode = Pref_Store;
197                         break;
198
199                 default:
200                         pref_bias_clear_store = 128;
201                         pref_bias_copy_load = 256;
202                         pref_bias_copy_store = 128;
203                         pref_src_mode = Pref_LoadStreamed;
204                         if (cpu_has_mips_r6)
205                                 /*
206                                  * Bit 30 (Pref_PrepareForStore) has been
207                                  * removed from MIPS R6. Use bit 5
208                                  * (Pref_StoreStreamed).
209                                  */
210                                 pref_dst_mode = Pref_StoreStreamed;
211                         else
212                                 pref_dst_mode = Pref_PrepareForStore;
213                         break;
214                 }
215         } else {
216                 if (cpu_has_cache_cdex_s)
217                         cache_line_size = cpu_scache_line_size();
218                 else if (cpu_has_cache_cdex_p)
219                         cache_line_size = cpu_dcache_line_size();
220         }
221         /*
222          * Too much unrolling will overflow the available space in
223          * clear_space_array / copy_page_array.
224          */
225         half_clear_loop_size = min(16 * clear_word_size,
226                                    max(cache_line_size >> 1,
227                                        4 * clear_word_size));
228         half_copy_loop_size = min(16 * copy_word_size,
229                                   max(cache_line_size >> 1,
230                                       4 * copy_word_size));
231 }
232
233 static void build_clear_store(u32 **buf, int off)
234 {
235         if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
236                 uasm_i_sd(buf, ZERO, off, A0);
237         } else {
238                 uasm_i_sw(buf, ZERO, off, A0);
239         }
240 }
241
242 static inline void build_clear_pref(u32 **buf, int off)
243 {
244         if (off & cache_line_mask())
245                 return;
246
247         if (pref_bias_clear_store) {
248                 _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
249                             A0);
250         } else if (cache_line_size == (half_clear_loop_size << 1)) {
251                 if (cpu_has_cache_cdex_s) {
252                         uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
253                 } else if (cpu_has_cache_cdex_p) {
254                         if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
255                                 uasm_i_nop(buf);
256                                 uasm_i_nop(buf);
257                                 uasm_i_nop(buf);
258                                 uasm_i_nop(buf);
259                         }
260
261                         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
262                                 uasm_i_lw(buf, ZERO, ZERO, AT);
263
264                         uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
265                 }
266         }
267 }
268
269 extern u32 __clear_page_start;
270 extern u32 __clear_page_end;
271 extern u32 __copy_page_start;
272 extern u32 __copy_page_end;
273
274 void build_clear_page(void)
275 {
276         int off;
277         u32 *buf = &__clear_page_start;
278         struct uasm_label *l = labels;
279         struct uasm_reloc *r = relocs;
280         int i;
281         static atomic_t run_once = ATOMIC_INIT(0);
282
283         if (atomic_xchg(&run_once, 1)) {
284                 return;
285         }
286
287         memset(labels, 0, sizeof(labels));
288         memset(relocs, 0, sizeof(relocs));
289
290         set_prefetch_parameters();
291
292         /*
293          * This algorithm makes the following assumptions:
294          *   - The prefetch bias is a multiple of 2 words.
295          *   - The prefetch bias is less than one page.
296          */
297         BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
298         BUG_ON(PAGE_SIZE < pref_bias_clear_store);
299
300         off = PAGE_SIZE - pref_bias_clear_store;
301         if (off > 0xffff || !pref_bias_clear_store)
302                 pg_addiu(&buf, A2, A0, off);
303         else
304                 uasm_i_ori(&buf, A2, A0, off);
305
306         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
307                 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
308
309         off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
310                                 * cache_line_size : 0;
311         while (off) {
312                 build_clear_pref(&buf, -off);
313                 off -= cache_line_size;
314         }
315         uasm_l_clear_pref(&l, buf);
316         do {
317                 build_clear_pref(&buf, off);
318                 build_clear_store(&buf, off);
319                 off += clear_word_size;
320         } while (off < half_clear_loop_size);
321         pg_addiu(&buf, A0, A0, 2 * off);
322         off = -off;
323         do {
324                 build_clear_pref(&buf, off);
325                 if (off == -clear_word_size)
326                         uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
327                 build_clear_store(&buf, off);
328                 off += clear_word_size;
329         } while (off < 0);
330
331         if (pref_bias_clear_store) {
332                 pg_addiu(&buf, A2, A0, pref_bias_clear_store);
333                 uasm_l_clear_nopref(&l, buf);
334                 off = 0;
335                 do {
336                         build_clear_store(&buf, off);
337                         off += clear_word_size;
338                 } while (off < half_clear_loop_size);
339                 pg_addiu(&buf, A0, A0, 2 * off);
340                 off = -off;
341                 do {
342                         if (off == -clear_word_size)
343                                 uasm_il_bne(&buf, &r, A0, A2,
344                                             label_clear_nopref);
345                         build_clear_store(&buf, off);
346                         off += clear_word_size;
347                 } while (off < 0);
348         }
349
350         uasm_i_jr(&buf, RA);
351         uasm_i_nop(&buf);
352
353         BUG_ON(buf > &__clear_page_end);
354
355         uasm_resolve_relocs(relocs, labels);
356
357         pr_debug("Synthesized clear page handler (%u instructions).\n",
358                  (u32)(buf - &__clear_page_start));
359
360         pr_debug("\t.set push\n");
361         pr_debug("\t.set noreorder\n");
362         for (i = 0; i < (buf - &__clear_page_start); i++)
363                 pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
364         pr_debug("\t.set pop\n");
365 }
366
367 static void build_copy_load(u32 **buf, int reg, int off)
368 {
369         if (cpu_has_64bit_gp_regs) {
370                 uasm_i_ld(buf, reg, off, A1);
371         } else {
372                 uasm_i_lw(buf, reg, off, A1);
373         }
374 }
375
376 static void build_copy_store(u32 **buf, int reg, int off)
377 {
378         if (cpu_has_64bit_gp_regs) {
379                 uasm_i_sd(buf, reg, off, A0);
380         } else {
381                 uasm_i_sw(buf, reg, off, A0);
382         }
383 }
384
385 static inline void build_copy_load_pref(u32 **buf, int off)
386 {
387         if (off & cache_line_mask())
388                 return;
389
390         if (pref_bias_copy_load)
391                 _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
392 }
393
394 static inline void build_copy_store_pref(u32 **buf, int off)
395 {
396         if (off & cache_line_mask())
397                 return;
398
399         if (pref_bias_copy_store) {
400                 _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
401                             A0);
402         } else if (cache_line_size == (half_copy_loop_size << 1)) {
403                 if (cpu_has_cache_cdex_s) {
404                         uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
405                 } else if (cpu_has_cache_cdex_p) {
406                         if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
407                                 uasm_i_nop(buf);
408                                 uasm_i_nop(buf);
409                                 uasm_i_nop(buf);
410                                 uasm_i_nop(buf);
411                         }
412
413                         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
414                                 uasm_i_lw(buf, ZERO, ZERO, AT);
415
416                         uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
417                 }
418         }
419 }
420
421 void build_copy_page(void)
422 {
423         int off;
424         u32 *buf = &__copy_page_start;
425         struct uasm_label *l = labels;
426         struct uasm_reloc *r = relocs;
427         int i;
428         static atomic_t run_once = ATOMIC_INIT(0);
429
430         if (atomic_xchg(&run_once, 1)) {
431                 return;
432         }
433
434         memset(labels, 0, sizeof(labels));
435         memset(relocs, 0, sizeof(relocs));
436
437         set_prefetch_parameters();
438
439         /*
440          * This algorithm makes the following assumptions:
441          *   - All prefetch biases are multiples of 8 words.
442          *   - The prefetch biases are less than one page.
443          *   - The store prefetch bias isn't greater than the load
444          *     prefetch bias.
445          */
446         BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
447         BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
448         BUG_ON(PAGE_SIZE < pref_bias_copy_load);
449         BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
450
451         off = PAGE_SIZE - pref_bias_copy_load;
452         if (off > 0xffff || !pref_bias_copy_load)
453                 pg_addiu(&buf, A2, A0, off);
454         else
455                 uasm_i_ori(&buf, A2, A0, off);
456
457         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
458                 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
459
460         off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
461                                 cache_line_size : 0;
462         while (off) {
463                 build_copy_load_pref(&buf, -off);
464                 off -= cache_line_size;
465         }
466         off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
467                                 cache_line_size : 0;
468         while (off) {
469                 build_copy_store_pref(&buf, -off);
470                 off -= cache_line_size;
471         }
472         uasm_l_copy_pref_both(&l, buf);
473         do {
474                 build_copy_load_pref(&buf, off);
475                 build_copy_load(&buf, T0, off);
476                 build_copy_load_pref(&buf, off + copy_word_size);
477                 build_copy_load(&buf, T1, off + copy_word_size);
478                 build_copy_load_pref(&buf, off + 2 * copy_word_size);
479                 build_copy_load(&buf, T2, off + 2 * copy_word_size);
480                 build_copy_load_pref(&buf, off + 3 * copy_word_size);
481                 build_copy_load(&buf, T3, off + 3 * copy_word_size);
482                 build_copy_store_pref(&buf, off);
483                 build_copy_store(&buf, T0, off);
484                 build_copy_store_pref(&buf, off + copy_word_size);
485                 build_copy_store(&buf, T1, off + copy_word_size);
486                 build_copy_store_pref(&buf, off + 2 * copy_word_size);
487                 build_copy_store(&buf, T2, off + 2 * copy_word_size);
488                 build_copy_store_pref(&buf, off + 3 * copy_word_size);
489                 build_copy_store(&buf, T3, off + 3 * copy_word_size);
490                 off += 4 * copy_word_size;
491         } while (off < half_copy_loop_size);
492         pg_addiu(&buf, A1, A1, 2 * off);
493         pg_addiu(&buf, A0, A0, 2 * off);
494         off = -off;
495         do {
496                 build_copy_load_pref(&buf, off);
497                 build_copy_load(&buf, T0, off);
498                 build_copy_load_pref(&buf, off + copy_word_size);
499                 build_copy_load(&buf, T1, off + copy_word_size);
500                 build_copy_load_pref(&buf, off + 2 * copy_word_size);
501                 build_copy_load(&buf, T2, off + 2 * copy_word_size);
502                 build_copy_load_pref(&buf, off + 3 * copy_word_size);
503                 build_copy_load(&buf, T3, off + 3 * copy_word_size);
504                 build_copy_store_pref(&buf, off);
505                 build_copy_store(&buf, T0, off);
506                 build_copy_store_pref(&buf, off + copy_word_size);
507                 build_copy_store(&buf, T1, off + copy_word_size);
508                 build_copy_store_pref(&buf, off + 2 * copy_word_size);
509                 build_copy_store(&buf, T2, off + 2 * copy_word_size);
510                 build_copy_store_pref(&buf, off + 3 * copy_word_size);
511                 if (off == -(4 * copy_word_size))
512                         uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
513                 build_copy_store(&buf, T3, off + 3 * copy_word_size);
514                 off += 4 * copy_word_size;
515         } while (off < 0);
516
517         if (pref_bias_copy_load - pref_bias_copy_store) {
518                 pg_addiu(&buf, A2, A0,
519                          pref_bias_copy_load - pref_bias_copy_store);
520                 uasm_l_copy_pref_store(&l, buf);
521                 off = 0;
522                 do {
523                         build_copy_load(&buf, T0, off);
524                         build_copy_load(&buf, T1, off + copy_word_size);
525                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
526                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
527                         build_copy_store_pref(&buf, off);
528                         build_copy_store(&buf, T0, off);
529                         build_copy_store_pref(&buf, off + copy_word_size);
530                         build_copy_store(&buf, T1, off + copy_word_size);
531                         build_copy_store_pref(&buf, off + 2 * copy_word_size);
532                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
533                         build_copy_store_pref(&buf, off + 3 * copy_word_size);
534                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
535                         off += 4 * copy_word_size;
536                 } while (off < half_copy_loop_size);
537                 pg_addiu(&buf, A1, A1, 2 * off);
538                 pg_addiu(&buf, A0, A0, 2 * off);
539                 off = -off;
540                 do {
541                         build_copy_load(&buf, T0, off);
542                         build_copy_load(&buf, T1, off + copy_word_size);
543                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
544                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
545                         build_copy_store_pref(&buf, off);
546                         build_copy_store(&buf, T0, off);
547                         build_copy_store_pref(&buf, off + copy_word_size);
548                         build_copy_store(&buf, T1, off + copy_word_size);
549                         build_copy_store_pref(&buf, off + 2 * copy_word_size);
550                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
551                         build_copy_store_pref(&buf, off + 3 * copy_word_size);
552                         if (off == -(4 * copy_word_size))
553                                 uasm_il_bne(&buf, &r, A2, A0,
554                                             label_copy_pref_store);
555                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
556                         off += 4 * copy_word_size;
557                 } while (off < 0);
558         }
559
560         if (pref_bias_copy_store) {
561                 pg_addiu(&buf, A2, A0, pref_bias_copy_store);
562                 uasm_l_copy_nopref(&l, buf);
563                 off = 0;
564                 do {
565                         build_copy_load(&buf, T0, off);
566                         build_copy_load(&buf, T1, off + copy_word_size);
567                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
568                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
569                         build_copy_store(&buf, T0, off);
570                         build_copy_store(&buf, T1, off + copy_word_size);
571                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
572                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
573                         off += 4 * copy_word_size;
574                 } while (off < half_copy_loop_size);
575                 pg_addiu(&buf, A1, A1, 2 * off);
576                 pg_addiu(&buf, A0, A0, 2 * off);
577                 off = -off;
578                 do {
579                         build_copy_load(&buf, T0, off);
580                         build_copy_load(&buf, T1, off + copy_word_size);
581                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
582                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
583                         build_copy_store(&buf, T0, off);
584                         build_copy_store(&buf, T1, off + copy_word_size);
585                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
586                         if (off == -(4 * copy_word_size))
587                                 uasm_il_bne(&buf, &r, A2, A0,
588                                             label_copy_nopref);
589                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
590                         off += 4 * copy_word_size;
591                 } while (off < 0);
592         }
593
594         uasm_i_jr(&buf, RA);
595         uasm_i_nop(&buf);
596
597         BUG_ON(buf > &__copy_page_end);
598
599         uasm_resolve_relocs(relocs, labels);
600
601         pr_debug("Synthesized copy page handler (%u instructions).\n",
602                  (u32)(buf - &__copy_page_start));
603
604         pr_debug("\t.set push\n");
605         pr_debug("\t.set noreorder\n");
606         for (i = 0; i < (buf - &__copy_page_start); i++)
607                 pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
608         pr_debug("\t.set pop\n");
609 }
610
611 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
612 extern void clear_page_cpu(void *page);
613 extern void copy_page_cpu(void *to, void *from);
614
615 /*
616  * Pad descriptors to cacheline, since each is exclusively owned by a
617  * particular CPU.
618  */
619 struct dmadscr {
620         u64 dscr_a;
621         u64 dscr_b;
622         u64 pad_a;
623         u64 pad_b;
624 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
625
626 void sb1_dma_init(void)
627 {
628         int i;
629
630         for (i = 0; i < DM_NUM_CHANNELS; i++) {
631                 const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
632                                      V_DM_DSCR_BASE_RINGSZ(1);
633                 void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
634
635                 __raw_writeq(base_val, base_reg);
636                 __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
637                 __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
638         }
639 }
640
641 void clear_page(void *page)
642 {
643         u64 to_phys = CPHYSADDR((unsigned long)page);
644         unsigned int cpu = smp_processor_id();
645
646         /* if the page is not in KSEG0, use old way */
647         if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
648                 return clear_page_cpu(page);
649
650         page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
651                                  M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
652         page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
653         __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
654
655         /*
656          * Don't really want to do it this way, but there's no
657          * reliable way to delay completion detection.
658          */
659         while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
660                  & M_DM_DSCR_BASE_INTERRUPT))
661                 ;
662         __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
663 }
664
665 void copy_page(void *to, void *from)
666 {
667         u64 from_phys = CPHYSADDR((unsigned long)from);
668         u64 to_phys = CPHYSADDR((unsigned long)to);
669         unsigned int cpu = smp_processor_id();
670
671         /* if any page is not in KSEG0, use old way */
672         if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
673             || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
674                 return copy_page_cpu(to, from);
675
676         page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
677                                  M_DM_DSCRA_INTERRUPT;
678         page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
679         __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
680
681         /*
682          * Don't really want to do it this way, but there's no
683          * reliable way to delay completion detection.
684          */
685         while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
686                  & M_DM_DSCR_BASE_INTERRUPT))
687                 ;
688         __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
689 }
690
691 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */