CHROMIUM: ARM: exynos: no duplicate mask/unmask in eint0_15
[cascardo/linux.git] / arch / arm / mach-exynos / common.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Common Codes for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/export.h>
23 #include <linux/irqdomain.h>
24 #include <linux/of_address.h>
25 #include <linux/cpu_pm.h>
26
27 #include <asm/proc-fns.h>
28 #include <asm/exception.h>
29 #include <asm/hardware/cache-l2x0.h>
30 #include <asm/hardware/gic.h>
31 #include <asm/mach/map.h>
32 #include <asm/mach/irq.h>
33 #include <asm/cacheflush.h>
34
35 #include <mach/regs-irq.h>
36 #include <mach/regs-pmu.h>
37 #include <mach/regs-gpio.h>
38 #include <mach/pmu.h>
39
40 #include <plat/cpu.h>
41 #include <plat/clock.h>
42 #include <plat/devs.h>
43 #include <plat/pm.h>
44 #include <plat/sdhci.h>
45 #include <plat/gpio-cfg.h>
46 #include <plat/adc-core.h>
47 #include <plat/fb-core.h>
48 #include <plat/fimc-core.h>
49 #include <plat/iic-core.h>
50 #include <plat/tv-core.h>
51 #include <plat/regs-serial.h>
52
53 #include "common.h"
54 #define L2_AUX_VAL 0x7C470001
55 #define L2_AUX_MASK 0xC200ffff
56
57 static const char name_exynos4210[] = "EXYNOS4210";
58 static const char name_exynos4212[] = "EXYNOS4212";
59 static const char name_exynos4412[] = "EXYNOS4412";
60 static const char name_exynos5250[] = "EXYNOS5250";
61
62 static void exynos4_map_io(void);
63 static void exynos5_map_io(void);
64 static void exynos4_init_clocks(int xtal);
65 static void exynos5_init_clocks(int xtal);
66 static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
67 static int exynos_init(void);
68 static int exynos_init_irq_eint(struct device_node *np,
69                                 struct device_node *parent);
70
71 static struct cpu_table cpu_ids[] __initdata = {
72         {
73                 .idcode         = EXYNOS4210_CPU_ID,
74                 .idmask         = EXYNOS4_CPU_MASK,
75                 .map_io         = exynos4_map_io,
76                 .init_clocks    = exynos4_init_clocks,
77                 .init_uarts     = exynos_init_uarts,
78                 .init           = exynos_init,
79                 .name           = name_exynos4210,
80         }, {
81                 .idcode         = EXYNOS4212_CPU_ID,
82                 .idmask         = EXYNOS4_CPU_MASK,
83                 .map_io         = exynos4_map_io,
84                 .init_clocks    = exynos4_init_clocks,
85                 .init_uarts     = exynos_init_uarts,
86                 .init           = exynos_init,
87                 .name           = name_exynos4212,
88         }, {
89                 .idcode         = EXYNOS4412_CPU_ID,
90                 .idmask         = EXYNOS4_CPU_MASK,
91                 .map_io         = exynos4_map_io,
92                 .init_clocks    = exynos4_init_clocks,
93                 .init_uarts     = exynos_init_uarts,
94                 .init           = exynos_init,
95                 .name           = name_exynos4412,
96         }, {
97                 .idcode         = EXYNOS5250_SOC_ID,
98                 .idmask         = EXYNOS5_SOC_MASK,
99                 .map_io         = exynos5_map_io,
100                 .init_clocks    = exynos5_init_clocks,
101                 .init_uarts     = exynos_init_uarts,
102                 .init           = exynos_init,
103                 .name           = name_exynos5250,
104         },
105 };
106
107 /* Initial IO mappings */
108
109 static struct map_desc exynos_iodesc[] __initdata = {
110         {
111                 .virtual        = (unsigned long)S5P_VA_CHIPID,
112                 .pfn            = __phys_to_pfn(EXYNOS_PA_CHIPID),
113                 .length         = SZ_4K,
114                 .type           = MT_DEVICE,
115         },
116 };
117
118 static struct map_desc exynos4_iodesc[] __initdata = {
119         {
120                 .virtual        = (unsigned long)S3C_VA_SYS,
121                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSCON),
122                 .length         = SZ_64K,
123                 .type           = MT_DEVICE,
124         }, {
125                 .virtual        = (unsigned long)S3C_VA_TIMER,
126                 .pfn            = __phys_to_pfn(EXYNOS4_PA_TIMER),
127                 .length         = SZ_16K,
128                 .type           = MT_DEVICE,
129         }, {
130                 .virtual        = (unsigned long)S3C_VA_WATCHDOG,
131                 .pfn            = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
132                 .length         = SZ_4K,
133                 .type           = MT_DEVICE,
134         }, {
135                 .virtual        = (unsigned long)S5P_VA_SROMC,
136                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
137                 .length         = SZ_4K,
138                 .type           = MT_DEVICE,
139         }, {
140                 .virtual        = (unsigned long)S5P_VA_SYSTIMER,
141                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
142                 .length         = SZ_4K,
143                 .type           = MT_DEVICE,
144         }, {
145                 .virtual        = (unsigned long)S5P_VA_PMU,
146                 .pfn            = __phys_to_pfn(EXYNOS4_PA_PMU),
147                 .length         = SZ_64K,
148                 .type           = MT_DEVICE,
149         }, {
150                 .virtual        = (unsigned long)S5P_VA_COMBINER_BASE,
151                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COMBINER),
152                 .length         = SZ_4K,
153                 .type           = MT_DEVICE,
154         }, {
155                 .virtual        = (unsigned long)S5P_VA_GIC_CPU,
156                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
157                 .length         = SZ_64K,
158                 .type           = MT_DEVICE,
159         }, {
160                 .virtual        = (unsigned long)S5P_VA_GIC_DIST,
161                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
162                 .length         = SZ_64K,
163                 .type           = MT_DEVICE,
164         }, {
165                 .virtual        = (unsigned long)S3C_VA_UART,
166                 .pfn            = __phys_to_pfn(EXYNOS4_PA_UART),
167                 .length         = SZ_512K,
168                 .type           = MT_DEVICE,
169         }, {
170                 .virtual        = (unsigned long)S5P_VA_CMU,
171                 .pfn            = __phys_to_pfn(EXYNOS4_PA_CMU),
172                 .length         = SZ_128K,
173                 .type           = MT_DEVICE,
174         }, {
175                 .virtual        = (unsigned long)S5P_VA_COREPERI_BASE,
176                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COREPERI),
177                 .length         = SZ_8K,
178                 .type           = MT_DEVICE,
179         }, {
180                 .virtual        = (unsigned long)S5P_VA_L2CC,
181                 .pfn            = __phys_to_pfn(EXYNOS4_PA_L2CC),
182                 .length         = SZ_4K,
183                 .type           = MT_DEVICE,
184         }, {
185                 .virtual        = (unsigned long)S5P_VA_DMC0,
186                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC0),
187                 .length         = SZ_64K,
188                 .type           = MT_DEVICE,
189         }, {
190                 .virtual        = (unsigned long)S5P_VA_DMC1,
191                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC1),
192                 .length         = SZ_64K,
193                 .type           = MT_DEVICE,
194         }, {
195                 .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
196                 .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
197                 .length         = SZ_4K,
198                 .type           = MT_DEVICE,
199         }, {
200                 .virtual        = (unsigned long)S5P_VA_AUDSS,
201                 .pfn            = __phys_to_pfn(EXYNOS_PA_AUDSS),
202                 .length         = SZ_4K,
203                 .type           = MT_DEVICE,
204         },
205 };
206
207 static struct map_desc exynos4_iodesc0[] __initdata = {
208         {
209                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
210                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
211                 .length         = SZ_4K,
212                 .type           = MT_DEVICE,
213         },
214 };
215
216 static struct map_desc exynos4_iodesc1[] __initdata = {
217         {
218                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
219                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
220                 .length         = SZ_4K,
221                 .type           = MT_DEVICE,
222         },
223 };
224
225 static struct map_desc exynos5_iodesc[] __initdata = {
226         {
227                 .virtual        = (unsigned long)S3C_VA_SYS,
228                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSCON),
229                 .length         = SZ_64K,
230                 .type           = MT_DEVICE,
231         }, {
232                 .virtual        = (unsigned long)S3C_VA_TIMER,
233                 .pfn            = __phys_to_pfn(EXYNOS5_PA_TIMER),
234                 .length         = SZ_16K,
235                 .type           = MT_DEVICE,
236         }, {
237                 .virtual        = (unsigned long)S3C_VA_WATCHDOG,
238                 .pfn            = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
239                 .length         = SZ_4K,
240                 .type           = MT_DEVICE,
241         }, {
242                 .virtual        = (unsigned long)S5P_VA_SROMC,
243                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SROMC),
244                 .length         = SZ_4K,
245                 .type           = MT_DEVICE,
246         }, {
247                 .virtual        = (unsigned long)S5P_VA_SYSTIMER,
248                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
249                 .length         = SZ_4K,
250                 .type           = MT_DEVICE,
251         }, {
252                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
253                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
254                 .length         = SZ_4K,
255                 .type           = MT_DEVICE,
256         }, {
257                 .virtual        = (unsigned long)S5P_VA_CMU,
258                 .pfn            = __phys_to_pfn(EXYNOS5_PA_CMU),
259                 .length         = 144 * SZ_1K,
260                 .type           = MT_DEVICE,
261         }, {
262                 .virtual        = (unsigned long)S5P_VA_PMU,
263                 .pfn            = __phys_to_pfn(EXYNOS5_PA_PMU),
264                 .length         = SZ_64K,
265                 .type           = MT_DEVICE,
266         }, {
267                 .virtual        = (unsigned long)S5P_VA_COMBINER_BASE,
268                 .pfn            = __phys_to_pfn(EXYNOS5_PA_COMBINER),
269                 .length         = SZ_4K,
270                 .type           = MT_DEVICE,
271         }, {
272                 .virtual        = (unsigned long)S3C_VA_UART,
273                 .pfn            = __phys_to_pfn(EXYNOS5_PA_UART),
274                 .length         = SZ_512K,
275                 .type           = MT_DEVICE,
276         }, {
277                 .virtual        = (unsigned long)S5P_VA_GIC_CPU,
278                 .pfn            = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
279                 .length         = SZ_8K,
280                 .type           = MT_DEVICE,
281         }, {
282                 .virtual        = (unsigned long)S5P_VA_GIC_DIST,
283                 .pfn            = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
284                 .length         = SZ_4K,
285                 .type           = MT_DEVICE,
286         }, {
287                 .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
288                 .pfn            = __phys_to_pfn(EXYNOS5_PA_USB_PHY),
289                 .length         = SZ_256K,
290                 .type           = MT_DEVICE,
291         }, {
292                 .virtual        = (unsigned long)S5P_VA_DRD_PHY,
293                 .pfn            = __phys_to_pfn(EXYNOS5_PA_DRD_PHY),
294                 .length         = SZ_256K,
295                 .type           = MT_DEVICE,
296         }, {
297                 .virtual        = (unsigned long)S5P_VA_AUDSS,
298                 .pfn            = __phys_to_pfn(EXYNOS_PA_AUDSS),
299                 .length         = SZ_4K,
300                 .type           = MT_DEVICE,
301         }, {
302                 .virtual        = (unsigned long)S5P_VA_DREXII,
303                 .pfn            = __phys_to_pfn(EXYNOS5_PA_DREXII),
304                 .length         = SZ_4K,
305                 .type           = MT_DEVICE,
306         },
307 };
308
309 void exynos4_restart(char mode, const char *cmd)
310 {
311         __raw_writel(0x1, S5P_SWRESET);
312 }
313
314 void exynos5_restart(char mode, const char *cmd)
315 {
316         __raw_writel(0x1, EXYNOS_SWRESET);
317 }
318
319 static void wdt_reset_init(void)
320 {
321         unsigned int value;
322
323         value = __raw_readl(EXYNOS5_AUTOMATIC_WDT_RESET_DISABLE);
324         value &= ~EXYNOS5_SYS_WDTRESET;
325         __raw_writel(value, EXYNOS5_AUTOMATIC_WDT_RESET_DISABLE);
326
327         value = __raw_readl(EXYNOS5_MASK_WDT_RESET_REQUEST);
328         value &= ~EXYNOS5_SYS_WDTRESET;
329         __raw_writel(value, EXYNOS5_MASK_WDT_RESET_REQUEST);
330 }
331
332 /*
333  * exynos_map_io
334  *
335  * register the standard cpu IO areas
336  */
337
338 void __init exynos_init_io(struct map_desc *mach_desc, int size)
339 {
340         /* initialize the io descriptors we need for initialization */
341         iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
342         if (mach_desc)
343                 iotable_init(mach_desc, size);
344
345         /* detect cpu id and rev. */
346         s5p_init_cpu(S5P_VA_CHIPID);
347
348         s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
349
350         /* TO support Watch dog reset */
351         wdt_reset_init();
352 }
353
354 static void __init exynos4_map_io(void)
355 {
356         iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
357
358         if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
359                 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
360         else
361                 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
362
363         /* initialize device information early */
364         exynos4_default_sdhci0();
365         exynos4_default_sdhci1();
366         exynos4_default_sdhci2();
367         exynos4_default_sdhci3();
368
369         s3c_adc_setname("samsung-adc-v3");
370
371         s3c_fimc_setname(0, "exynos4-fimc");
372         s3c_fimc_setname(1, "exynos4-fimc");
373         s3c_fimc_setname(2, "exynos4-fimc");
374         s3c_fimc_setname(3, "exynos4-fimc");
375
376         s3c_sdhci_setname(0, "exynos4-sdhci");
377         s3c_sdhci_setname(1, "exynos4-sdhci");
378         s3c_sdhci_setname(2, "exynos4-sdhci");
379         s3c_sdhci_setname(3, "exynos4-sdhci");
380
381         /* The I2C bus controllers are directly compatible with s3c2440 */
382         s3c_i2c0_setname("s3c2440-i2c");
383         s3c_i2c1_setname("s3c2440-i2c");
384         s3c_i2c2_setname("s3c2440-i2c");
385
386         s5p_fb_setname(0, "exynos4-fb");
387         s5p_hdmi_setname("exynos4-hdmi");
388 }
389
390 static void __init exynos5_map_io(void)
391 {
392         iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
393
394         s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
395         s3c_device_i2c0.resource[0].end   = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
396         s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
397         s3c_device_i2c0.resource[1].end   = EXYNOS5_IRQ_IIC;
398
399         s3c_sdhci_setname(0, "exynos4-sdhci");
400         s3c_sdhci_setname(1, "exynos4-sdhci");
401         s3c_sdhci_setname(2, "exynos4-sdhci");
402         s3c_sdhci_setname(3, "exynos4-sdhci");
403
404         /* The I2C bus controllers are directly compatible with s3c2440 */
405         s3c_i2c0_setname("s3c2440-i2c");
406         s3c_i2c1_setname("s3c2440-i2c");
407         s3c_i2c2_setname("s3c2440-i2c");
408 }
409
410 static void __init exynos4_init_clocks(int xtal)
411 {
412         printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
413
414         s3c24xx_register_baseclocks(xtal);
415         s5p_register_clocks(xtal);
416
417         if (soc_is_exynos4210())
418                 exynos4210_register_clocks();
419         else if (soc_is_exynos4212() || soc_is_exynos4412())
420                 exynos4212_register_clocks();
421
422         exynos4_register_clocks();
423         exynos4_setup_clocks();
424         exynos_register_audss_clocks();
425 }
426
427 static void __init exynos5_init_clocks(int xtal)
428 {
429         printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
430
431         s3c24xx_register_baseclocks(xtal);
432         s5p_register_clocks(xtal);
433
434         exynos5_register_clocks();
435         exynos5_setup_clocks();
436         exynos_register_audss_clocks();
437 }
438
439 #define COMBINER_ENABLE_SET     0x0
440 #define COMBINER_ENABLE_CLEAR   0x4
441 #define COMBINER_INT_STATUS     0xC
442
443 static DEFINE_SPINLOCK(irq_controller_lock);
444
445 struct combiner_chip_data {
446         unsigned int irq_offset;
447         unsigned int irq_mask;
448         void __iomem *base;
449         unsigned int gic_irq;
450 #ifdef CONFIG_PM
451         bool saved_on;
452 #endif
453 };
454
455 static struct irq_domain *combiner_irq_domain;
456 static struct combiner_chip_data *combiner_data;
457 static unsigned int rt_max_combiner_nr;
458
459 static inline void __iomem *combiner_base(struct irq_data *data)
460 {
461         struct combiner_chip_data *irq_combiner_data =
462                 irq_data_get_irq_chip_data(data);
463
464         return irq_combiner_data->base;
465 }
466
467 static void combiner_mask_irq(struct irq_data *data)
468 {
469         u32 mask = 1 << (data->hwirq % 32);
470
471         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
472 }
473
474 static void combiner_unmask_irq(struct irq_data *data)
475 {
476         u32 mask = 1 << (data->hwirq % 32);
477
478         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
479 }
480
481 static int combiner_set_affinity(struct irq_data *data, const struct
482                                  cpumask *dest, bool force)
483 {
484         struct combiner_chip_data *chip_data = data->chip_data;
485
486         if (!chip_data)
487                 return -EINVAL;
488
489         return irq_set_affinity(chip_data->gic_irq, dest);
490 }
491
492 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
493 {
494         struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
495         struct irq_chip *chip = irq_get_chip(irq);
496         unsigned int cascade_irq, combiner_irq;
497         unsigned long status;
498
499         chained_irq_enter(chip, desc);
500
501         spin_lock(&irq_controller_lock);
502         status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
503         spin_unlock(&irq_controller_lock);
504         status &= chip_data->irq_mask;
505
506         if (status == 0)
507                 goto out;
508
509         combiner_irq = __ffs(status);
510
511         cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
512         if (unlikely(cascade_irq >= NR_IRQS))
513                 do_bad_IRQ(cascade_irq, desc);
514         else
515                 generic_handle_irq(cascade_irq);
516
517  out:
518         chained_irq_exit(chip, desc);
519 }
520
521 static struct irq_chip combiner_chip = {
522         .name           = "COMBINER",
523         .irq_mask       = combiner_mask_irq,
524         .irq_unmask     = combiner_unmask_irq,
525         .irq_set_affinity = combiner_set_affinity,
526 };
527
528 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
529 {
530         if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
531                 BUG();
532         irq_set_chained_handler(irq, combiner_handle_cascade_irq);
533         combiner_data[combiner_nr].gic_irq = irq;
534 }
535
536 static void __init combiner_init_one(unsigned int combiner_nr,
537                                                 void __iomem *base)
538 {
539         combiner_data[combiner_nr].base = base;
540         combiner_data[combiner_nr].irq_offset = irq_find_mapping(
541                 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
542         combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
543
544         /* Disable all interrupts */
545
546         __raw_writel(combiner_data[combiner_nr].irq_mask,
547                      base + COMBINER_ENABLE_CLEAR);
548 }
549
550 #ifdef CONFIG_OF
551 static int combiner_irq_domain_xlate(struct irq_domain *d,
552                 struct device_node *controller, const u32 *intspec,
553                 unsigned int intsize, unsigned long *out_hwirq,
554                 unsigned int *out_type)
555 {
556         if (d->of_node != controller)
557                 return -EINVAL;
558         if (intsize < 2)
559                 return -EINVAL;
560         *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
561         *out_type = 0;
562         return 0;
563 }
564 #else
565 static int combiner_irq_domain_xlate(struct irq_domain *d,
566                 struct device_node *controller, const u32 *intspec,
567                 unsigned int intsize, unsigned long *out_hwirq,
568                 unsigned int *out_type)
569 {
570         return -EINVAL;
571 }
572 #endif
573
574 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
575                                         irq_hw_number_t hw)
576 {
577         irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
578         irq_set_chip_data(irq, &combiner_data[hw >> 3]);
579         set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
580         return 0;
581 }
582
583 #ifdef CONFIG_PM
584 static void combiner_save(void)
585 {
586         int i;
587
588         for (i = 0; i < rt_max_combiner_nr; i++) {
589                 if (combiner_data[i].irq_mask &
590                     __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET)) {
591                         combiner_data[i].saved_on = true;
592                 } else {
593                         combiner_data[i].saved_on = false;
594                 }
595         }
596 }
597
598 static void combiner_restore(void)
599 {
600         int i;
601
602         for (i = 0; i < rt_max_combiner_nr; i++) {
603                 if (!combiner_data[i].saved_on)
604                         continue;
605
606                 __raw_writel(combiner_data[i].irq_mask,
607                              combiner_data[i].base + COMBINER_ENABLE_SET);
608         }
609 }
610
611
612 static int combiner_notifier(struct notifier_block *self, unsigned long cmd,
613                              void *v)
614 {
615         switch (cmd) {
616         case CPU_PM_ENTER:
617                 combiner_save();
618                 break;
619         case CPU_PM_EXIT:
620                 combiner_restore();
621                 break;
622         }
623
624         return NOTIFY_OK;
625 }
626
627 static struct notifier_block combiner_notifier_block = {
628         .notifier_call = combiner_notifier,
629 };
630 #endif
631
632 static struct irq_domain_ops combiner_irq_domain_ops = {
633         .xlate = combiner_irq_domain_xlate,
634         .map = combiner_irq_domain_map,
635 };
636
637 void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
638 {
639         int i, irq, irq_base;
640         unsigned int nr_irq, soc_max_nr;
641
642         soc_max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
643                 EXYNOS4_MAX_COMBINER_NR;
644
645         if (np) {
646                 if (of_property_read_u32(np, "samsung,combiner-nr",
647                                          &rt_max_combiner_nr)) {
648                         rt_max_combiner_nr = soc_max_nr;
649                         pr_warning("%s: number of combiners not specified, "
650                                    "setting default as %d.\n",
651                                    __func__, rt_max_combiner_nr);
652                 }
653         } else {
654                 rt_max_combiner_nr = soc_max_nr;
655         }
656         if (WARN_ON(rt_max_combiner_nr > soc_max_nr)) {
657                 pr_warning("%s: more combiners specified (%d) than "
658                            "architecture (%d) supports.",
659                            __func__, rt_max_combiner_nr, soc_max_nr);
660                 return;
661         }
662
663         combiner_data = kmalloc(sizeof(struct combiner_chip_data) *
664                                 rt_max_combiner_nr, GFP_KERNEL);
665         if (WARN_ON(!combiner_data)) {
666                 pr_warning("%s: combiner_data memory allocation failed for %d "
667                            "entries", __func__, rt_max_combiner_nr);
668                 return;
669         }
670
671         nr_irq = rt_max_combiner_nr * MAX_IRQ_IN_COMBINER;
672
673         irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
674         if (IS_ERR_VALUE(irq_base)) {
675                 irq_base = COMBINER_IRQ(0, 0);
676                 pr_warning("%s: irq desc alloc failed. Continuing with %d as "
677                                 "linux irq base\n", __func__, irq_base);
678         }
679
680         combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
681                                 &combiner_irq_domain_ops, &combiner_data);
682         if (WARN_ON(!combiner_irq_domain)) {
683                 pr_warning("%s: irq domain init failed\n", __func__);
684                 kfree(combiner_data);
685                 return;
686         }
687
688         for (i = 0; i < rt_max_combiner_nr; i++) {
689                 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
690                 irq = np ? irq_of_parse_and_map(np, i) : IRQ_SPI(i);
691                 combiner_cascade_irq(i, irq);
692         }
693
694 #ifdef CONFIG_PM
695         /* Setup suspend/resume combiner saving */
696         cpu_pm_register_notifier(&combiner_notifier_block);
697 #endif
698 }
699
700 #ifdef CONFIG_OF
701 int __init combiner_of_init(struct device_node *np, struct device_node *parent)
702 {
703         void __iomem *combiner_base;
704
705         combiner_base = of_iomap(np, 0);
706         if (!combiner_base) {
707                 pr_err("%s: failed to map combiner registers\n", __func__);
708                 return -ENXIO;
709         }
710
711         combiner_init(combiner_base, np);
712         return 0;
713 }
714
715 static const struct of_device_id exynos4_dt_irq_match[] = {
716         { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
717         { .compatible = "samsung,exynos4210-combiner",
718                         .data = combiner_of_init, },
719         { .compatible = "samsung,exynos5210-wakeup-eint-map",
720                         .data = exynos_init_irq_eint, },
721         {},
722 };
723 #endif
724
725 void __init exynos4_init_irq(void)
726 {
727         unsigned int gic_bank_offset;
728
729         gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
730
731         if (!of_have_populated_dt())
732                 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
733 #ifdef CONFIG_OF
734         else
735                 of_irq_init(exynos4_dt_irq_match);
736 #endif
737
738         if (!of_have_populated_dt()) {
739                 combiner_init(S5P_VA_COMBINER_BASE, NULL);
740                 exynos_init_irq_eint(NULL, NULL);
741         }
742
743         /*
744          * The parameters of s5p_init_irq() are for VIC init.
745          * Theses parameters should be NULL and 0 because EXYNOS4
746          * uses GIC instead of VIC.
747          */
748         s5p_init_irq(NULL, 0);
749 }
750
751 void __init exynos5_init_irq(void)
752 {
753 #ifdef CONFIG_OF
754         of_irq_init(exynos4_dt_irq_match);
755 #endif
756         /*
757          * The parameters of s5p_init_irq() are for VIC init.
758          * Theses parameters should be NULL and 0 because EXYNOS4
759          * uses GIC instead of VIC.
760          */
761         s5p_init_irq(NULL, 0);
762
763         gic_arch_extn.irq_set_wake = s3c_irq_wake;
764 }
765
766 struct bus_type exynos_subsys = {
767         .name           = "exynos-core",
768         .dev_name       = "exynos-core",
769 };
770
771 static struct device exynos4_dev = {
772         .bus    = &exynos_subsys,
773 };
774
775 static int __init exynos_core_init(void)
776 {
777         return subsys_system_register(&exynos_subsys, NULL);
778 }
779 core_initcall(exynos_core_init);
780
781 #ifdef CONFIG_CACHE_L2X0
782 static int __init exynos4_l2x0_cache_init(void)
783 {
784         int ret;
785
786         if (soc_is_exynos5250())
787                 return 0;
788
789         ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
790         if (!ret) {
791                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
792                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
793                 return 0;
794         }
795
796         if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
797                 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
798                 /* TAG, Data Latency Control: 2 cycles */
799                 l2x0_saved_regs.tag_latency = 0x110;
800
801                 if (soc_is_exynos4212() || soc_is_exynos4412())
802                         l2x0_saved_regs.data_latency = 0x120;
803                 else
804                         l2x0_saved_regs.data_latency = 0x110;
805
806                 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
807                 l2x0_saved_regs.pwr_ctrl =
808                         (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
809
810                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
811
812                 __raw_writel(l2x0_saved_regs.tag_latency,
813                                 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
814                 __raw_writel(l2x0_saved_regs.data_latency,
815                                 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
816
817                 /* L2X0 Prefetch Control */
818                 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
819                                 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
820
821                 /* L2X0 Power Control */
822                 __raw_writel(l2x0_saved_regs.pwr_ctrl,
823                                 S5P_VA_L2CC + L2X0_POWER_CTRL);
824
825                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
826                 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
827         }
828
829         l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
830         return 0;
831 }
832 early_initcall(exynos4_l2x0_cache_init);
833 #endif
834
835 static int __init exynos_init(void)
836 {
837         printk(KERN_INFO "EXYNOS: Initializing architecture\n");
838
839         return device_register(&exynos4_dev);
840 }
841
842 /* uart registration process */
843
844 static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
845 {
846         struct s3c2410_uartcfg *tcfg = cfg;
847         u32 ucnt;
848
849         for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
850                 tcfg->has_fracval = 1;
851
852         if (soc_is_exynos5250())
853                 s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
854         else
855                 s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
856 }
857
858 static void __iomem *exynos_eint_base;
859
860 static DEFINE_SPINLOCK(eint_lock);
861
862 static unsigned int eint0_15_data[16];
863
864 #define EXYNOS_EINT_NR 32
865 static struct irq_domain *irq_domain;
866
867 static inline int exynos4_irq_to_gpio(unsigned int irq)
868 {
869         if (irq < IRQ_EINT(0))
870                 return -EINVAL;
871
872         irq -= IRQ_EINT(0);
873         if (irq < 8)
874                 return EXYNOS4_GPX0(irq);
875
876         irq -= 8;
877         if (irq < 8)
878                 return EXYNOS4_GPX1(irq);
879
880         irq -= 8;
881         if (irq < 8)
882                 return EXYNOS4_GPX2(irq);
883
884         irq -= 8;
885         if (irq < 8)
886                 return EXYNOS4_GPX3(irq);
887
888         return -EINVAL;
889 }
890
891 static inline int exynos5_irq_to_gpio(unsigned int irq)
892 {
893         if (irq < IRQ_EINT(0))
894                 return -EINVAL;
895
896         irq -= IRQ_EINT(0);
897         if (irq < 8)
898                 return EXYNOS5_GPX0(irq);
899
900         irq -= 8;
901         if (irq < 8)
902                 return EXYNOS5_GPX1(irq);
903
904         irq -= 8;
905         if (irq < 8)
906                 return EXYNOS5_GPX2(irq);
907
908         irq -= 8;
909         if (irq < 8)
910                 return EXYNOS5_GPX3(irq);
911
912         return -EINVAL;
913 }
914
915 static unsigned int exynos4_eint0_15_src_int[16] = {
916         EXYNOS4_IRQ_EINT0,
917         EXYNOS4_IRQ_EINT1,
918         EXYNOS4_IRQ_EINT2,
919         EXYNOS4_IRQ_EINT3,
920         EXYNOS4_IRQ_EINT4,
921         EXYNOS4_IRQ_EINT5,
922         EXYNOS4_IRQ_EINT6,
923         EXYNOS4_IRQ_EINT7,
924         EXYNOS4_IRQ_EINT8,
925         EXYNOS4_IRQ_EINT9,
926         EXYNOS4_IRQ_EINT10,
927         EXYNOS4_IRQ_EINT11,
928         EXYNOS4_IRQ_EINT12,
929         EXYNOS4_IRQ_EINT13,
930         EXYNOS4_IRQ_EINT14,
931         EXYNOS4_IRQ_EINT15,
932 };
933
934 static unsigned int exynos5_eint0_15_src_int[16] = {
935         EXYNOS5_IRQ_EINT0,
936         EXYNOS5_IRQ_EINT1,
937         EXYNOS5_IRQ_EINT2,
938         EXYNOS5_IRQ_EINT3,
939         EXYNOS5_IRQ_EINT4,
940         EXYNOS5_IRQ_EINT5,
941         EXYNOS5_IRQ_EINT6,
942         EXYNOS5_IRQ_EINT7,
943         EXYNOS5_IRQ_EINT8,
944         EXYNOS5_IRQ_EINT9,
945         EXYNOS5_IRQ_EINT10,
946         EXYNOS5_IRQ_EINT11,
947         EXYNOS5_IRQ_EINT12,
948         EXYNOS5_IRQ_EINT13,
949         EXYNOS5_IRQ_EINT14,
950         EXYNOS5_IRQ_EINT15,
951 };
952 static inline void exynos_irq_eint_mask(struct irq_data *data)
953 {
954         u32 mask;
955
956         spin_lock(&eint_lock);
957         mask = __raw_readl(EINT_MASK(exynos_eint_base, data->hwirq));
958         mask |= EINT_OFFSET_BIT(data->hwirq);
959         __raw_writel(mask, EINT_MASK(exynos_eint_base, data->hwirq));
960         spin_unlock(&eint_lock);
961 }
962
963 static void exynos_irq_eint_unmask(struct irq_data *data)
964 {
965         u32 mask;
966
967         spin_lock(&eint_lock);
968         mask = __raw_readl(EINT_MASK(exynos_eint_base, data->hwirq));
969         mask &= ~(EINT_OFFSET_BIT(data->hwirq));
970         __raw_writel(mask, EINT_MASK(exynos_eint_base, data->hwirq));
971         spin_unlock(&eint_lock);
972 }
973
974 static inline void exynos_irq_eint_ack(struct irq_data *data)
975 {
976         __raw_writel(EINT_OFFSET_BIT(data->hwirq),
977                      EINT_PEND(exynos_eint_base, data->hwirq));
978 }
979
980 static void exynos_irq_eint_maskack(struct irq_data *data)
981 {
982         exynos_irq_eint_mask(data);
983         exynos_irq_eint_ack(data);
984 }
985
986 static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
987 {
988         int offs = data->hwirq;
989         int shift;
990         u32 ctrl, mask;
991         u32 newvalue = 0;
992
993         switch (type) {
994         case IRQ_TYPE_EDGE_RISING:
995                 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
996                 break;
997
998         case IRQ_TYPE_EDGE_FALLING:
999                 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
1000                 break;
1001
1002         case IRQ_TYPE_EDGE_BOTH:
1003                 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
1004                 break;
1005
1006         case IRQ_TYPE_LEVEL_LOW:
1007                 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
1008                 break;
1009
1010         case IRQ_TYPE_LEVEL_HIGH:
1011                 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
1012                 break;
1013
1014         default:
1015                 printk(KERN_ERR "No such irq type %d", type);
1016                 return -EINVAL;
1017         }
1018
1019         shift = (offs & 0x7) * 4;
1020         mask = 0x7 << shift;
1021
1022         spin_lock(&eint_lock);
1023         ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->hwirq));
1024         ctrl &= ~mask;
1025         ctrl |= newvalue << shift;
1026         __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->hwirq));
1027         spin_unlock(&eint_lock);
1028
1029         if (soc_is_exynos5250())
1030                 s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
1031         else
1032                 s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
1033
1034         return 0;
1035 }
1036
1037 static struct irq_chip exynos_irq_eint = {
1038         .name           = "exynos-eint",
1039         .irq_mask       = exynos_irq_eint_mask,
1040         .irq_unmask     = exynos_irq_eint_unmask,
1041         .irq_mask_ack   = exynos_irq_eint_maskack,
1042         .irq_ack        = exynos_irq_eint_ack,
1043         .irq_set_type   = exynos_irq_eint_set_type,
1044 #ifdef CONFIG_PM
1045         .irq_set_wake   = s3c_irqext_wake,
1046 #endif
1047 };
1048
1049 /*
1050  * exynos4_irq_demux_eint
1051  *
1052  * This function demuxes the IRQ from from EINTs 16 to 31.
1053  * It is designed to be inlined into the specific handler
1054  * s5p_irq_demux_eintX_Y.
1055  *
1056  * Each EINT pend/mask registers handle eight of them.
1057  */
1058 static inline void exynos_irq_demux_eint(unsigned int start)
1059 {
1060         unsigned int irq;
1061
1062         u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
1063         u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
1064
1065         status &= ~mask;
1066         status &= 0xff;
1067
1068         while (status) {
1069                 irq = fls(status) - 1;
1070                 generic_handle_irq(irq_find_mapping(irq_domain, irq + start));
1071                 status &= ~(1 << irq);
1072         }
1073 }
1074
1075 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
1076 {
1077         struct irq_chip *chip = irq_get_chip(irq);
1078         chained_irq_enter(chip, desc);
1079         exynos_irq_demux_eint(16);
1080         exynos_irq_demux_eint(24);
1081         chained_irq_exit(chip, desc);
1082 }
1083
1084 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
1085 {
1086         u32 *irq_data = irq_get_handler_data(irq);
1087         struct irq_chip *chip = irq_get_chip(irq);
1088         int eint_irq;
1089
1090         chained_irq_enter(chip, desc);
1091         eint_irq = irq_find_mapping(irq_domain, *irq_data);
1092         generic_handle_irq(eint_irq);
1093         chained_irq_exit(chip, desc);
1094 }
1095
1096 static int exynos_eint_irq_domain_map(struct irq_domain *d, unsigned int irq,
1097                                         irq_hw_number_t hw)
1098 {
1099         irq_set_chip_and_handler(irq, &exynos_irq_eint, handle_level_irq);
1100         set_irq_flags(irq, IRQF_VALID);
1101         return 0;
1102 }
1103
1104 #ifdef CONFIG_OF
1105 static int exynos_eint_irq_domain_xlate(struct irq_domain *d,
1106                 struct device_node *controller, const u32 *intspec,
1107                 unsigned int intsize, unsigned long *out_hwirq,
1108                 unsigned int *out_type)
1109 {
1110         if (d->of_node != controller)
1111                 return -EINVAL;
1112         if (intsize < 2)
1113                 return -EINVAL;
1114         *out_hwirq = intspec[0];
1115
1116         switch (intspec[1]) {
1117         case S5P_IRQ_TYPE_LEVEL_LOW:
1118                 *out_type = IRQ_TYPE_LEVEL_LOW;
1119                 break;
1120         case S5P_IRQ_TYPE_LEVEL_HIGH:
1121                 *out_type = IRQ_TYPE_LEVEL_HIGH;
1122                 break;
1123         case S5P_IRQ_TYPE_EDGE_FALLING:
1124                 *out_type = IRQ_TYPE_EDGE_FALLING;
1125                 break;
1126         case S5P_IRQ_TYPE_EDGE_RISING:
1127                 *out_type = IRQ_TYPE_EDGE_RISING;
1128                 break;
1129         case S5P_IRQ_TYPE_EDGE_BOTH:
1130                 *out_type = IRQ_TYPE_EDGE_BOTH;
1131                 break;
1132         };
1133
1134         return 0;
1135 }
1136 #else
1137 static int exynos_eint_irq_domain_xlate(struct irq_domain *d,
1138                 struct device_node *controller, const u32 *intspec,
1139                 unsigned int intsize, unsigned long *out_hwirq,
1140                 unsigned int *out_type)
1141 {
1142         return -EINVAL;
1143 }
1144 #endif
1145
1146 static struct irq_domain_ops exynos_eint_irq_domain_ops = {
1147         .xlate = exynos_eint_irq_domain_xlate,
1148         .map = exynos_eint_irq_domain_map,
1149 };
1150
1151 static int __init exynos_init_irq_eint(struct device_node *eint_np,
1152                                         struct device_node *parent)
1153 {
1154         int irq, *src_int, irq_base, irq_eint;
1155         unsigned int paddr;
1156         static unsigned int retry = 0;
1157         static struct device_node *np;
1158
1159         if (retry)
1160                 goto retry_init;
1161
1162         if (!eint_np) {
1163                 paddr = soc_is_exynos5250() ? EXYNOS5_PA_GPIO1 :
1164                                                 EXYNOS4_PA_GPIO2;
1165                 exynos_eint_base = ioremap(paddr, SZ_4K);
1166         } else {
1167                 np = of_get_parent(eint_np);
1168                 exynos_eint_base = of_iomap(np, 0);
1169         }
1170         if (!exynos_eint_base) {
1171                 pr_err("unable to ioremap for EINT base address\n");
1172                 return -ENXIO;
1173         }
1174
1175         irq_base = irq_alloc_descs(IRQ_EINT(0), 1, EXYNOS_EINT_NR, 0);
1176         if (IS_ERR_VALUE(irq_base)) {
1177                 irq_base = IRQ_EINT(0);
1178                 pr_warning("%s: irq desc alloc failed. Continuing with %d as "
1179                                 "linux irq base\n", __func__, irq_base);
1180         }
1181
1182         irq_domain = irq_domain_add_legacy(np, EXYNOS_EINT_NR, irq_base, 0,
1183                                          &exynos_eint_irq_domain_ops, NULL);
1184         if (WARN_ON(!irq_domain)) {
1185                 pr_warning("%s: irq domain init failed\n", __func__);
1186                 return 0;
1187         }
1188
1189         irq_eint = eint_np ? irq_of_parse_and_map(np, 16) : EXYNOS_IRQ_EINT16_31;
1190         irq_set_chained_handler(irq_eint, exynos_irq_demux_eint16_31);
1191
1192 retry_init:
1193         for (irq = 0; irq <= 15; irq++) {
1194                 eint0_15_data[irq] = irq;
1195                 src_int = soc_is_exynos5250() ? exynos5_eint0_15_src_int :
1196                                                 exynos4_eint0_15_src_int;
1197                 irq_eint = eint_np ? irq_of_parse_and_map(np, irq) : src_int[irq];
1198                 if (!irq_eint) {
1199                         of_node_put(np);
1200                         retry = 1;
1201                         return -EAGAIN;
1202                 }
1203                 irq_set_handler_data(irq_eint, &eint0_15_data[irq]);
1204                 irq_set_chained_handler(irq_eint, exynos_irq_eint0_15);
1205         }
1206
1207         return 0;
1208 }