tree-wide: replace config_enabled() with IS_ENABLED()
[cascardo/linux.git] / arch / mips / kernel / pm-cps.c
1 /*
2  * Copyright (C) 2014 Imagination Technologies
3  * Author: Paul Burton <paul.burton@imgtec.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation;  either version 2 of the  License, or (at your
8  * option) any later version.
9  */
10
11 #include <linux/init.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14
15 #include <asm/asm-offsets.h>
16 #include <asm/cacheflush.h>
17 #include <asm/cacheops.h>
18 #include <asm/idle.h>
19 #include <asm/mips-cm.h>
20 #include <asm/mips-cpc.h>
21 #include <asm/mipsmtregs.h>
22 #include <asm/pm.h>
23 #include <asm/pm-cps.h>
24 #include <asm/smp-cps.h>
25 #include <asm/uasm.h>
26
27 /*
28  * cps_nc_entry_fn - type of a generated non-coherent state entry function
29  * @online: the count of online coupled VPEs
30  * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
31  *
32  * The code entering & exiting non-coherent states is generated at runtime
33  * using uasm, in order to ensure that the compiler cannot insert a stray
34  * memory access at an unfortunate time and to allow the generation of optimal
35  * core-specific code particularly for cache routines. If coupled_coherence
36  * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
37  * returns the number of VPEs that were in the wait state at the point this
38  * VPE left it. Returns garbage if coupled_coherence is zero or this is not
39  * the entry function for CPS_PM_NC_WAIT.
40  */
41 typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
42
43 /*
44  * The entry point of the generated non-coherent idle state entry/exit
45  * functions. Actually per-core rather than per-CPU.
46  */
47 static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
48                                   nc_asm_enter);
49
50 /* Bitmap indicating which states are supported by the system */
51 DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
52
53 /*
54  * Indicates the number of coupled VPEs ready to operate in a non-coherent
55  * state. Actually per-core rather than per-CPU.
56  */
57 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
58 static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
59
60 /* Indicates online CPUs coupled with the current CPU */
61 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
62
63 /*
64  * Used to synchronize entry to deep idle states. Actually per-core rather
65  * than per-CPU.
66  */
67 static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
68
69 /* Saved CPU state across the CPS_PM_POWER_GATED state */
70 DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
71
72 /* A somewhat arbitrary number of labels & relocs for uasm */
73 static struct uasm_label labels[32] __initdata;
74 static struct uasm_reloc relocs[32] __initdata;
75
76 /* CPU dependant sync types */
77 static unsigned stype_intervention;
78 static unsigned stype_memory;
79 static unsigned stype_ordering;
80
81 enum mips_reg {
82         zero, at, v0, v1, a0, a1, a2, a3,
83         t0, t1, t2, t3, t4, t5, t6, t7,
84         s0, s1, s2, s3, s4, s5, s6, s7,
85         t8, t9, k0, k1, gp, sp, fp, ra,
86 };
87
88 bool cps_pm_support_state(enum cps_pm_state state)
89 {
90         return test_bit(state, state_support);
91 }
92
93 static void coupled_barrier(atomic_t *a, unsigned online)
94 {
95         /*
96          * This function is effectively the same as
97          * cpuidle_coupled_parallel_barrier, which can't be used here since
98          * there's no cpuidle device.
99          */
100
101         if (!coupled_coherence)
102                 return;
103
104         smp_mb__before_atomic();
105         atomic_inc(a);
106
107         while (atomic_read(a) < online)
108                 cpu_relax();
109
110         if (atomic_inc_return(a) == online * 2) {
111                 atomic_set(a, 0);
112                 return;
113         }
114
115         while (atomic_read(a) > online)
116                 cpu_relax();
117 }
118
119 int cps_pm_enter_state(enum cps_pm_state state)
120 {
121         unsigned cpu = smp_processor_id();
122         unsigned core = current_cpu_data.core;
123         unsigned online, left;
124         cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
125         u32 *core_ready_count, *nc_core_ready_count;
126         void *nc_addr;
127         cps_nc_entry_fn entry;
128         struct core_boot_config *core_cfg;
129         struct vpe_boot_config *vpe_cfg;
130
131         /* Check that there is an entry function for this state */
132         entry = per_cpu(nc_asm_enter, core)[state];
133         if (!entry)
134                 return -EINVAL;
135
136         /* Calculate which coupled CPUs (VPEs) are online */
137 #ifdef CONFIG_MIPS_MT
138         if (cpu_online(cpu)) {
139                 cpumask_and(coupled_mask, cpu_online_mask,
140                             &cpu_sibling_map[cpu]);
141                 online = cpumask_weight(coupled_mask);
142                 cpumask_clear_cpu(cpu, coupled_mask);
143         } else
144 #endif
145         {
146                 cpumask_clear(coupled_mask);
147                 online = 1;
148         }
149
150         /* Setup the VPE to run mips_cps_pm_restore when started again */
151         if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
152                 /* Power gating relies upon CPS SMP */
153                 if (!mips_cps_smp_in_use())
154                         return -EINVAL;
155
156                 core_cfg = &mips_cps_core_bootcfg[core];
157                 vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
158                 vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
159                 vpe_cfg->gp = (unsigned long)current_thread_info();
160                 vpe_cfg->sp = 0;
161         }
162
163         /* Indicate that this CPU might not be coherent */
164         cpumask_clear_cpu(cpu, &cpu_coherent_mask);
165         smp_mb__after_atomic();
166
167         /* Create a non-coherent mapping of the core ready_count */
168         core_ready_count = per_cpu(ready_count, core);
169         nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
170                                    (unsigned long)core_ready_count);
171         nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
172         nc_core_ready_count = nc_addr;
173
174         /* Ensure ready_count is zero-initialised before the assembly runs */
175         ACCESS_ONCE(*nc_core_ready_count) = 0;
176         coupled_barrier(&per_cpu(pm_barrier, core), online);
177
178         /* Run the generated entry code */
179         left = entry(online, nc_core_ready_count);
180
181         /* Remove the non-coherent mapping of ready_count */
182         kunmap_noncoherent();
183
184         /* Indicate that this CPU is definitely coherent */
185         cpumask_set_cpu(cpu, &cpu_coherent_mask);
186
187         /*
188          * If this VPE is the first to leave the non-coherent wait state then
189          * it needs to wake up any coupled VPEs still running their wait
190          * instruction so that they return to cpuidle, which can then complete
191          * coordination between the coupled VPEs & provide the governor with
192          * a chance to reflect on the length of time the VPEs were in the
193          * idle state.
194          */
195         if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
196                 arch_send_call_function_ipi_mask(coupled_mask);
197
198         return 0;
199 }
200
201 static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
202                                          struct uasm_reloc **pr,
203                                          const struct cache_desc *cache,
204                                          unsigned op, int lbl)
205 {
206         unsigned cache_size = cache->ways << cache->waybit;
207         unsigned i;
208         const unsigned unroll_lines = 32;
209
210         /* If the cache isn't present this function has it easy */
211         if (cache->flags & MIPS_CACHE_NOT_PRESENT)
212                 return;
213
214         /* Load base address */
215         UASM_i_LA(pp, t0, (long)CKSEG0);
216
217         /* Calculate end address */
218         if (cache_size < 0x8000)
219                 uasm_i_addiu(pp, t1, t0, cache_size);
220         else
221                 UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
222
223         /* Start of cache op loop */
224         uasm_build_label(pl, *pp, lbl);
225
226         /* Generate the cache ops */
227         for (i = 0; i < unroll_lines; i++) {
228                 if (cpu_has_mips_r6) {
229                         uasm_i_cache(pp, op, 0, t0);
230                         uasm_i_addiu(pp, t0, t0, cache->linesz);
231                 } else {
232                         uasm_i_cache(pp, op, i * cache->linesz, t0);
233                 }
234         }
235
236         if (!cpu_has_mips_r6)
237                 /* Update the base address */
238                 uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
239
240         /* Loop if we haven't reached the end address yet */
241         uasm_il_bne(pp, pr, t0, t1, lbl);
242         uasm_i_nop(pp);
243 }
244
245 static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
246                                     struct uasm_reloc **pr,
247                                     const struct cpuinfo_mips *cpu_info,
248                                     int lbl)
249 {
250         unsigned i, fsb_size = 8;
251         unsigned num_loads = (fsb_size * 3) / 2;
252         unsigned line_stride = 2;
253         unsigned line_size = cpu_info->dcache.linesz;
254         unsigned perf_counter, perf_event;
255         unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
256
257         /*
258          * Determine whether this CPU requires an FSB flush, and if so which
259          * performance counter/event reflect stalls due to a full FSB.
260          */
261         switch (__get_cpu_type(cpu_info->cputype)) {
262         case CPU_INTERAPTIV:
263                 perf_counter = 1;
264                 perf_event = 51;
265                 break;
266
267         case CPU_PROAPTIV:
268                 /* Newer proAptiv cores don't require this workaround */
269                 if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
270                         return 0;
271
272                 /* On older ones it's unavailable */
273                 return -1;
274
275         /* CPUs which do not require the workaround */
276         case CPU_P5600:
277         case CPU_I6400:
278                 return 0;
279
280         default:
281                 WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
282                 return -1;
283         }
284
285         /*
286          * Ensure that the fill/store buffer (FSB) is not holding the results
287          * of a prefetch, since if it is then the CPC sequencer may become
288          * stuck in the D3 (ClrBus) state whilst entering a low power state.
289          */
290
291         /* Preserve perf counter setup */
292         uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
293         uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
294
295         /* Setup perf counter to count FSB full pipeline stalls */
296         uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
297         uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
298         uasm_i_ehb(pp);
299         uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
300         uasm_i_ehb(pp);
301
302         /* Base address for loads */
303         UASM_i_LA(pp, t0, (long)CKSEG0);
304
305         /* Start of clear loop */
306         uasm_build_label(pl, *pp, lbl);
307
308         /* Perform some loads to fill the FSB */
309         for (i = 0; i < num_loads; i++)
310                 uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
311
312         /*
313          * Invalidate the new D-cache entries so that the cache will need
314          * refilling (via the FSB) if the loop is executed again.
315          */
316         for (i = 0; i < num_loads; i++) {
317                 uasm_i_cache(pp, Hit_Invalidate_D,
318                              i * line_size * line_stride, t0);
319                 uasm_i_cache(pp, Hit_Writeback_Inv_SD,
320                              i * line_size * line_stride, t0);
321         }
322
323         /* Completion barrier */
324         uasm_i_sync(pp, stype_memory);
325         uasm_i_ehb(pp);
326
327         /* Check whether the pipeline stalled due to the FSB being full */
328         uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
329
330         /* Loop if it didn't */
331         uasm_il_beqz(pp, pr, t1, lbl);
332         uasm_i_nop(pp);
333
334         /* Restore perf counter 1. The count may well now be wrong... */
335         uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
336         uasm_i_ehb(pp);
337         uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
338         uasm_i_ehb(pp);
339
340         return 0;
341 }
342
343 static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
344                                        struct uasm_reloc **pr,
345                                        unsigned r_addr, int lbl)
346 {
347         uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
348         uasm_build_label(pl, *pp, lbl);
349         uasm_i_ll(pp, t1, 0, r_addr);
350         uasm_i_or(pp, t1, t1, t0);
351         uasm_i_sc(pp, t1, 0, r_addr);
352         uasm_il_beqz(pp, pr, t1, lbl);
353         uasm_i_nop(pp);
354 }
355
356 static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
357 {
358         struct uasm_label *l = labels;
359         struct uasm_reloc *r = relocs;
360         u32 *buf, *p;
361         const unsigned r_online = a0;
362         const unsigned r_nc_count = a1;
363         const unsigned r_pcohctl = t7;
364         const unsigned max_instrs = 256;
365         unsigned cpc_cmd;
366         int err;
367         enum {
368                 lbl_incready = 1,
369                 lbl_poll_cont,
370                 lbl_secondary_hang,
371                 lbl_disable_coherence,
372                 lbl_flush_fsb,
373                 lbl_invicache,
374                 lbl_flushdcache,
375                 lbl_hang,
376                 lbl_set_cont,
377                 lbl_secondary_cont,
378                 lbl_decready,
379         };
380
381         /* Allocate a buffer to hold the generated code */
382         p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
383         if (!buf)
384                 return NULL;
385
386         /* Clear labels & relocs ready for (re)use */
387         memset(labels, 0, sizeof(labels));
388         memset(relocs, 0, sizeof(relocs));
389
390         if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
391                 /* Power gating relies upon CPS SMP */
392                 if (!mips_cps_smp_in_use())
393                         goto out_err;
394
395                 /*
396                  * Save CPU state. Note the non-standard calling convention
397                  * with the return address placed in v0 to avoid clobbering
398                  * the ra register before it is saved.
399                  */
400                 UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
401                 uasm_i_jalr(&p, v0, t0);
402                 uasm_i_nop(&p);
403         }
404
405         /*
406          * Load addresses of required CM & CPC registers. This is done early
407          * because they're needed in both the enable & disable coherence steps
408          * but in the coupled case the enable step will only run on one VPE.
409          */
410         UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
411
412         if (coupled_coherence) {
413                 /* Increment ready_count */
414                 uasm_i_sync(&p, stype_ordering);
415                 uasm_build_label(&l, p, lbl_incready);
416                 uasm_i_ll(&p, t1, 0, r_nc_count);
417                 uasm_i_addiu(&p, t2, t1, 1);
418                 uasm_i_sc(&p, t2, 0, r_nc_count);
419                 uasm_il_beqz(&p, &r, t2, lbl_incready);
420                 uasm_i_addiu(&p, t1, t1, 1);
421
422                 /* Ordering barrier */
423                 uasm_i_sync(&p, stype_ordering);
424
425                 /*
426                  * If this is the last VPE to become ready for non-coherence
427                  * then it should branch below.
428                  */
429                 uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
430                 uasm_i_nop(&p);
431
432                 if (state < CPS_PM_POWER_GATED) {
433                         /*
434                          * Otherwise this is not the last VPE to become ready
435                          * for non-coherence. It needs to wait until coherence
436                          * has been disabled before proceeding, which it will do
437                          * by polling for the top bit of ready_count being set.
438                          */
439                         uasm_i_addiu(&p, t1, zero, -1);
440                         uasm_build_label(&l, p, lbl_poll_cont);
441                         uasm_i_lw(&p, t0, 0, r_nc_count);
442                         uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
443                         uasm_i_ehb(&p);
444                         uasm_i_yield(&p, zero, t1);
445                         uasm_il_b(&p, &r, lbl_poll_cont);
446                         uasm_i_nop(&p);
447                 } else {
448                         /*
449                          * The core will lose power & this VPE will not continue
450                          * so it can simply halt here.
451                          */
452                         uasm_i_addiu(&p, t0, zero, TCHALT_H);
453                         uasm_i_mtc0(&p, t0, 2, 4);
454                         uasm_build_label(&l, p, lbl_secondary_hang);
455                         uasm_il_b(&p, &r, lbl_secondary_hang);
456                         uasm_i_nop(&p);
457                 }
458         }
459
460         /*
461          * This is the point of no return - this VPE will now proceed to
462          * disable coherence. At this point we *must* be sure that no other
463          * VPE within the core will interfere with the L1 dcache.
464          */
465         uasm_build_label(&l, p, lbl_disable_coherence);
466
467         /* Invalidate the L1 icache */
468         cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
469                               Index_Invalidate_I, lbl_invicache);
470
471         /* Writeback & invalidate the L1 dcache */
472         cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
473                               Index_Writeback_Inv_D, lbl_flushdcache);
474
475         /* Completion barrier */
476         uasm_i_sync(&p, stype_memory);
477         uasm_i_ehb(&p);
478
479         /*
480          * Disable all but self interventions. The load from COHCTL is defined
481          * by the interAptiv & proAptiv SUMs as ensuring that the operation
482          * resulting from the preceding store is complete.
483          */
484         uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
485         uasm_i_sw(&p, t0, 0, r_pcohctl);
486         uasm_i_lw(&p, t0, 0, r_pcohctl);
487
488         /* Sync to ensure previous interventions are complete */
489         uasm_i_sync(&p, stype_intervention);
490         uasm_i_ehb(&p);
491
492         /* Disable coherence */
493         uasm_i_sw(&p, zero, 0, r_pcohctl);
494         uasm_i_lw(&p, t0, 0, r_pcohctl);
495
496         if (state >= CPS_PM_CLOCK_GATED) {
497                 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
498                                         lbl_flush_fsb);
499                 if (err)
500                         goto out_err;
501
502                 /* Determine the CPC command to issue */
503                 switch (state) {
504                 case CPS_PM_CLOCK_GATED:
505                         cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
506                         break;
507                 case CPS_PM_POWER_GATED:
508                         cpc_cmd = CPC_Cx_CMD_PWRDOWN;
509                         break;
510                 default:
511                         BUG();
512                         goto out_err;
513                 }
514
515                 /* Issue the CPC command */
516                 UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
517                 uasm_i_addiu(&p, t1, zero, cpc_cmd);
518                 uasm_i_sw(&p, t1, 0, t0);
519
520                 if (state == CPS_PM_POWER_GATED) {
521                         /* If anything goes wrong just hang */
522                         uasm_build_label(&l, p, lbl_hang);
523                         uasm_il_b(&p, &r, lbl_hang);
524                         uasm_i_nop(&p);
525
526                         /*
527                          * There's no point generating more code, the core is
528                          * powered down & if powered back up will run from the
529                          * reset vector not from here.
530                          */
531                         goto gen_done;
532                 }
533
534                 /* Completion barrier */
535                 uasm_i_sync(&p, stype_memory);
536                 uasm_i_ehb(&p);
537         }
538
539         if (state == CPS_PM_NC_WAIT) {
540                 /*
541                  * At this point it is safe for all VPEs to proceed with
542                  * execution. This VPE will set the top bit of ready_count
543                  * to indicate to the other VPEs that they may continue.
544                  */
545                 if (coupled_coherence)
546                         cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
547                                             lbl_set_cont);
548
549                 /*
550                  * VPEs which did not disable coherence will continue
551                  * executing, after coherence has been disabled, from this
552                  * point.
553                  */
554                 uasm_build_label(&l, p, lbl_secondary_cont);
555
556                 /* Now perform our wait */
557                 uasm_i_wait(&p, 0);
558         }
559
560         /*
561          * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
562          * will run this. The first will actually re-enable coherence & the
563          * rest will just be performing a rather unusual nop.
564          */
565         uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
566         uasm_i_sw(&p, t0, 0, r_pcohctl);
567         uasm_i_lw(&p, t0, 0, r_pcohctl);
568
569         /* Completion barrier */
570         uasm_i_sync(&p, stype_memory);
571         uasm_i_ehb(&p);
572
573         if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
574                 /* Decrement ready_count */
575                 uasm_build_label(&l, p, lbl_decready);
576                 uasm_i_sync(&p, stype_ordering);
577                 uasm_i_ll(&p, t1, 0, r_nc_count);
578                 uasm_i_addiu(&p, t2, t1, -1);
579                 uasm_i_sc(&p, t2, 0, r_nc_count);
580                 uasm_il_beqz(&p, &r, t2, lbl_decready);
581                 uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
582
583                 /* Ordering barrier */
584                 uasm_i_sync(&p, stype_ordering);
585         }
586
587         if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
588                 /*
589                  * At this point it is safe for all VPEs to proceed with
590                  * execution. This VPE will set the top bit of ready_count
591                  * to indicate to the other VPEs that they may continue.
592                  */
593                 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
594
595                 /*
596                  * This core will be reliant upon another core sending a
597                  * power-up command to the CPC in order to resume operation.
598                  * Thus an arbitrary VPE can't trigger the core leaving the
599                  * idle state and the one that disables coherence might as well
600                  * be the one to re-enable it. The rest will continue from here
601                  * after that has been done.
602                  */
603                 uasm_build_label(&l, p, lbl_secondary_cont);
604
605                 /* Ordering barrier */
606                 uasm_i_sync(&p, stype_ordering);
607         }
608
609         /* The core is coherent, time to return to C code */
610         uasm_i_jr(&p, ra);
611         uasm_i_nop(&p);
612
613 gen_done:
614         /* Ensure the code didn't exceed the resources allocated for it */
615         BUG_ON((p - buf) > max_instrs);
616         BUG_ON((l - labels) > ARRAY_SIZE(labels));
617         BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
618
619         /* Patch branch offsets */
620         uasm_resolve_relocs(relocs, labels);
621
622         /* Flush the icache */
623         local_flush_icache_range((unsigned long)buf, (unsigned long)p);
624
625         return buf;
626 out_err:
627         kfree(buf);
628         return NULL;
629 }
630
631 static int __init cps_gen_core_entries(unsigned cpu)
632 {
633         enum cps_pm_state state;
634         unsigned core = cpu_data[cpu].core;
635         unsigned dlinesz = cpu_data[cpu].dcache.linesz;
636         void *entry_fn, *core_rc;
637
638         for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
639                 if (per_cpu(nc_asm_enter, core)[state])
640                         continue;
641                 if (!test_bit(state, state_support))
642                         continue;
643
644                 entry_fn = cps_gen_entry_code(cpu, state);
645                 if (!entry_fn) {
646                         pr_err("Failed to generate core %u state %u entry\n",
647                                core, state);
648                         clear_bit(state, state_support);
649                 }
650
651                 per_cpu(nc_asm_enter, core)[state] = entry_fn;
652         }
653
654         if (!per_cpu(ready_count, core)) {
655                 core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
656                 if (!core_rc) {
657                         pr_err("Failed allocate core %u ready_count\n", core);
658                         return -ENOMEM;
659                 }
660                 per_cpu(ready_count_alloc, core) = core_rc;
661
662                 /* Ensure ready_count is aligned to a cacheline boundary */
663                 core_rc += dlinesz - 1;
664                 core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
665                 per_cpu(ready_count, core) = core_rc;
666         }
667
668         return 0;
669 }
670
671 static int __init cps_pm_init(void)
672 {
673         unsigned cpu;
674         int err;
675
676         /* Detect appropriate sync types for the system */
677         switch (current_cpu_data.cputype) {
678         case CPU_INTERAPTIV:
679         case CPU_PROAPTIV:
680         case CPU_M5150:
681         case CPU_P5600:
682         case CPU_I6400:
683                 stype_intervention = 0x2;
684                 stype_memory = 0x3;
685                 stype_ordering = 0x10;
686                 break;
687
688         default:
689                 pr_warn("Power management is using heavyweight sync 0\n");
690         }
691
692         /* A CM is required for all non-coherent states */
693         if (!mips_cm_present()) {
694                 pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
695                 goto out;
696         }
697
698         /*
699          * If interrupts were enabled whilst running a wait instruction on a
700          * non-coherent core then the VPE may end up processing interrupts
701          * whilst non-coherent. That would be bad.
702          */
703         if (cpu_wait == r4k_wait_irqoff)
704                 set_bit(CPS_PM_NC_WAIT, state_support);
705         else
706                 pr_warn("pm-cps: non-coherent wait unavailable\n");
707
708         /* Detect whether a CPC is present */
709         if (mips_cpc_present()) {
710                 /* Detect whether clock gating is implemented */
711                 if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
712                         set_bit(CPS_PM_CLOCK_GATED, state_support);
713                 else
714                         pr_warn("pm-cps: CPC does not support clock gating\n");
715
716                 /* Power gating is available with CPS SMP & any CPC */
717                 if (mips_cps_smp_in_use())
718                         set_bit(CPS_PM_POWER_GATED, state_support);
719                 else
720                         pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
721         } else {
722                 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
723         }
724
725         for_each_present_cpu(cpu) {
726                 err = cps_gen_core_entries(cpu);
727                 if (err)
728                         return err;
729         }
730 out:
731         return 0;
732 }
733 arch_initcall(cps_pm_init);