2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Common Codes for EXYNOS
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
21 #include <linux/of_irq.h>
22 #include <linux/export.h>
23 #include <linux/irqdomain.h>
24 #include <linux/of_address.h>
25 #include <linux/cpu_pm.h>
27 #include <asm/proc-fns.h>
28 #include <asm/exception.h>
29 #include <asm/hardware/cache-l2x0.h>
30 #include <asm/hardware/gic.h>
31 #include <asm/mach/map.h>
32 #include <asm/mach/irq.h>
33 #include <asm/cacheflush.h>
35 #include <mach/regs-irq.h>
36 #include <mach/regs-pmu.h>
37 #include <mach/regs-gpio.h>
41 #include <plat/clock.h>
42 #include <plat/devs.h>
44 #include <plat/sdhci.h>
45 #include <plat/gpio-cfg.h>
46 #include <plat/adc-core.h>
47 #include <plat/fb-core.h>
48 #include <plat/fimc-core.h>
49 #include <plat/iic-core.h>
50 #include <plat/tv-core.h>
51 #include <plat/regs-serial.h>
54 #define L2_AUX_VAL 0x7C470001
55 #define L2_AUX_MASK 0xC200ffff
57 static const char name_exynos4210[] = "EXYNOS4210";
58 static const char name_exynos4212[] = "EXYNOS4212";
59 static const char name_exynos4412[] = "EXYNOS4412";
60 static const char name_exynos5250[] = "EXYNOS5250";
62 static void exynos4_map_io(void);
63 static void exynos5_map_io(void);
64 static void exynos4_init_clocks(int xtal);
65 static void exynos5_init_clocks(int xtal);
66 static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
67 static int exynos_init(void);
68 static int exynos_init_irq_eint(struct device_node *np,
69 struct device_node *parent);
71 static struct cpu_table cpu_ids[] __initdata = {
73 .idcode = EXYNOS4210_CPU_ID,
74 .idmask = EXYNOS4_CPU_MASK,
75 .map_io = exynos4_map_io,
76 .init_clocks = exynos4_init_clocks,
77 .init_uarts = exynos_init_uarts,
79 .name = name_exynos4210,
81 .idcode = EXYNOS4212_CPU_ID,
82 .idmask = EXYNOS4_CPU_MASK,
83 .map_io = exynos4_map_io,
84 .init_clocks = exynos4_init_clocks,
85 .init_uarts = exynos_init_uarts,
87 .name = name_exynos4212,
89 .idcode = EXYNOS4412_CPU_ID,
90 .idmask = EXYNOS4_CPU_MASK,
91 .map_io = exynos4_map_io,
92 .init_clocks = exynos4_init_clocks,
93 .init_uarts = exynos_init_uarts,
95 .name = name_exynos4412,
97 .idcode = EXYNOS5250_SOC_ID,
98 .idmask = EXYNOS5_SOC_MASK,
99 .map_io = exynos5_map_io,
100 .init_clocks = exynos5_init_clocks,
101 .init_uarts = exynos_init_uarts,
103 .name = name_exynos5250,
107 /* Initial IO mappings */
109 static struct map_desc exynos_iodesc[] __initdata = {
111 .virtual = (unsigned long)S5P_VA_CHIPID,
112 .pfn = __phys_to_pfn(EXYNOS_PA_CHIPID),
118 static struct map_desc exynos4_iodesc[] __initdata = {
120 .virtual = (unsigned long)S3C_VA_SYS,
121 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
125 .virtual = (unsigned long)S3C_VA_TIMER,
126 .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
130 .virtual = (unsigned long)S3C_VA_WATCHDOG,
131 .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
135 .virtual = (unsigned long)S5P_VA_SROMC,
136 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
140 .virtual = (unsigned long)S5P_VA_SYSTIMER,
141 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
145 .virtual = (unsigned long)S5P_VA_PMU,
146 .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
150 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
151 .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
155 .virtual = (unsigned long)S5P_VA_GIC_CPU,
156 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
160 .virtual = (unsigned long)S5P_VA_GIC_DIST,
161 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
165 .virtual = (unsigned long)S3C_VA_UART,
166 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
170 .virtual = (unsigned long)S5P_VA_CMU,
171 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
175 .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
176 .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
180 .virtual = (unsigned long)S5P_VA_L2CC,
181 .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
185 .virtual = (unsigned long)S5P_VA_DMC0,
186 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
190 .virtual = (unsigned long)S5P_VA_DMC1,
191 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC1),
195 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
196 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
200 .virtual = (unsigned long)S5P_VA_AUDSS,
201 .pfn = __phys_to_pfn(EXYNOS_PA_AUDSS),
207 static struct map_desc exynos4_iodesc0[] __initdata = {
209 .virtual = (unsigned long)S5P_VA_SYSRAM,
210 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
216 static struct map_desc exynos4_iodesc1[] __initdata = {
218 .virtual = (unsigned long)S5P_VA_SYSRAM,
219 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
225 static struct map_desc exynos5_iodesc[] __initdata = {
227 .virtual = (unsigned long)S3C_VA_SYS,
228 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON),
232 .virtual = (unsigned long)S3C_VA_TIMER,
233 .pfn = __phys_to_pfn(EXYNOS5_PA_TIMER),
237 .virtual = (unsigned long)S3C_VA_WATCHDOG,
238 .pfn = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
242 .virtual = (unsigned long)S5P_VA_SROMC,
243 .pfn = __phys_to_pfn(EXYNOS5_PA_SROMC),
247 .virtual = (unsigned long)S5P_VA_SYSTIMER,
248 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
252 .virtual = (unsigned long)S5P_VA_SYSRAM,
253 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
257 .virtual = (unsigned long)S5P_VA_CMU,
258 .pfn = __phys_to_pfn(EXYNOS5_PA_CMU),
259 .length = 144 * SZ_1K,
262 .virtual = (unsigned long)S5P_VA_PMU,
263 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
267 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
268 .pfn = __phys_to_pfn(EXYNOS5_PA_COMBINER),
272 .virtual = (unsigned long)S3C_VA_UART,
273 .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
277 .virtual = (unsigned long)S5P_VA_GIC_CPU,
278 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
282 .virtual = (unsigned long)S5P_VA_GIC_DIST,
283 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
287 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
288 .pfn = __phys_to_pfn(EXYNOS5_PA_USB_PHY),
292 .virtual = (unsigned long)S5P_VA_DRD_PHY,
293 .pfn = __phys_to_pfn(EXYNOS5_PA_DRD_PHY),
297 .virtual = (unsigned long)S5P_VA_AUDSS,
298 .pfn = __phys_to_pfn(EXYNOS_PA_AUDSS),
302 .virtual = (unsigned long)S5P_VA_DREXII,
303 .pfn = __phys_to_pfn(EXYNOS5_PA_DREXII),
309 void exynos4_restart(char mode, const char *cmd)
311 __raw_writel(0x1, S5P_SWRESET);
314 void exynos5_restart(char mode, const char *cmd)
316 __raw_writel(0x1, EXYNOS_SWRESET);
319 static void wdt_reset_init(void)
323 value = __raw_readl(EXYNOS5_AUTOMATIC_WDT_RESET_DISABLE);
324 value &= ~EXYNOS5_SYS_WDTRESET;
325 __raw_writel(value, EXYNOS5_AUTOMATIC_WDT_RESET_DISABLE);
327 value = __raw_readl(EXYNOS5_MASK_WDT_RESET_REQUEST);
328 value &= ~EXYNOS5_SYS_WDTRESET;
329 __raw_writel(value, EXYNOS5_MASK_WDT_RESET_REQUEST);
335 * register the standard cpu IO areas
338 void __init exynos_init_io(struct map_desc *mach_desc, int size)
340 /* initialize the io descriptors we need for initialization */
341 iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
343 iotable_init(mach_desc, size);
345 /* detect cpu id and rev. */
346 s5p_init_cpu(S5P_VA_CHIPID);
348 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
350 /* TO support Watch dog reset */
354 static void __init exynos4_map_io(void)
356 iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
358 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
359 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
361 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
363 /* initialize device information early */
364 exynos4_default_sdhci0();
365 exynos4_default_sdhci1();
366 exynos4_default_sdhci2();
367 exynos4_default_sdhci3();
369 s3c_adc_setname("samsung-adc-v3");
371 s3c_fimc_setname(0, "exynos4-fimc");
372 s3c_fimc_setname(1, "exynos4-fimc");
373 s3c_fimc_setname(2, "exynos4-fimc");
374 s3c_fimc_setname(3, "exynos4-fimc");
376 s3c_sdhci_setname(0, "exynos4-sdhci");
377 s3c_sdhci_setname(1, "exynos4-sdhci");
378 s3c_sdhci_setname(2, "exynos4-sdhci");
379 s3c_sdhci_setname(3, "exynos4-sdhci");
381 /* The I2C bus controllers are directly compatible with s3c2440 */
382 s3c_i2c0_setname("s3c2440-i2c");
383 s3c_i2c1_setname("s3c2440-i2c");
384 s3c_i2c2_setname("s3c2440-i2c");
386 s5p_fb_setname(0, "exynos4-fb");
387 s5p_hdmi_setname("exynos4-hdmi");
390 static void __init exynos5_map_io(void)
392 iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
394 s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
395 s3c_device_i2c0.resource[0].end = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
396 s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
397 s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC;
399 s3c_sdhci_setname(0, "exynos4-sdhci");
400 s3c_sdhci_setname(1, "exynos4-sdhci");
401 s3c_sdhci_setname(2, "exynos4-sdhci");
402 s3c_sdhci_setname(3, "exynos4-sdhci");
404 /* The I2C bus controllers are directly compatible with s3c2440 */
405 s3c_i2c0_setname("s3c2440-i2c");
406 s3c_i2c1_setname("s3c2440-i2c");
407 s3c_i2c2_setname("s3c2440-i2c");
410 static void __init exynos4_init_clocks(int xtal)
412 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
414 s3c24xx_register_baseclocks(xtal);
415 s5p_register_clocks(xtal);
417 if (soc_is_exynos4210())
418 exynos4210_register_clocks();
419 else if (soc_is_exynos4212() || soc_is_exynos4412())
420 exynos4212_register_clocks();
422 exynos4_register_clocks();
423 exynos4_setup_clocks();
424 exynos_register_audss_clocks();
427 static void __init exynos5_init_clocks(int xtal)
429 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
431 s3c24xx_register_baseclocks(xtal);
432 s5p_register_clocks(xtal);
434 exynos5_register_clocks();
435 exynos5_setup_clocks();
436 exynos_register_audss_clocks();
439 #define COMBINER_ENABLE_SET 0x0
440 #define COMBINER_ENABLE_CLEAR 0x4
441 #define COMBINER_INT_STATUS 0xC
443 static DEFINE_SPINLOCK(irq_controller_lock);
445 struct combiner_chip_data {
446 unsigned int irq_offset;
447 unsigned int irq_mask;
449 unsigned int gic_irq;
455 static struct irq_domain *combiner_irq_domain;
456 static struct combiner_chip_data *combiner_data;
457 static unsigned int rt_max_combiner_nr;
459 static inline void __iomem *combiner_base(struct irq_data *data)
461 struct combiner_chip_data *irq_combiner_data =
462 irq_data_get_irq_chip_data(data);
464 return irq_combiner_data->base;
467 static void combiner_mask_irq(struct irq_data *data)
469 u32 mask = 1 << (data->hwirq % 32);
471 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
474 static void combiner_unmask_irq(struct irq_data *data)
476 u32 mask = 1 << (data->hwirq % 32);
478 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
481 static int combiner_set_affinity(struct irq_data *data, const struct
482 cpumask *dest, bool force)
484 struct combiner_chip_data *chip_data = data->chip_data;
489 return irq_set_affinity(chip_data->gic_irq, dest);
492 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
494 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
495 struct irq_chip *chip = irq_get_chip(irq);
496 unsigned int cascade_irq, combiner_irq;
497 unsigned long status;
499 chained_irq_enter(chip, desc);
501 spin_lock(&irq_controller_lock);
502 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
503 spin_unlock(&irq_controller_lock);
504 status &= chip_data->irq_mask;
509 combiner_irq = __ffs(status);
511 cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
512 if (unlikely(cascade_irq >= NR_IRQS))
513 do_bad_IRQ(cascade_irq, desc);
515 generic_handle_irq(cascade_irq);
518 chained_irq_exit(chip, desc);
521 static struct irq_chip combiner_chip = {
523 .irq_mask = combiner_mask_irq,
524 .irq_unmask = combiner_unmask_irq,
525 .irq_set_affinity = combiner_set_affinity,
528 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
530 if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
532 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
533 combiner_data[combiner_nr].gic_irq = irq;
536 static void __init combiner_init_one(unsigned int combiner_nr,
539 combiner_data[combiner_nr].base = base;
540 combiner_data[combiner_nr].irq_offset = irq_find_mapping(
541 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
542 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
544 /* Disable all interrupts */
546 __raw_writel(combiner_data[combiner_nr].irq_mask,
547 base + COMBINER_ENABLE_CLEAR);
551 static int combiner_irq_domain_xlate(struct irq_domain *d,
552 struct device_node *controller, const u32 *intspec,
553 unsigned int intsize, unsigned long *out_hwirq,
554 unsigned int *out_type)
556 if (d->of_node != controller)
560 *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
565 static int combiner_irq_domain_xlate(struct irq_domain *d,
566 struct device_node *controller, const u32 *intspec,
567 unsigned int intsize, unsigned long *out_hwirq,
568 unsigned int *out_type)
574 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
577 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
578 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
579 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
584 static void combiner_save(void)
588 for (i = 0; i < rt_max_combiner_nr; i++) {
589 if (combiner_data[i].irq_mask &
590 __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET)) {
591 combiner_data[i].saved_on = true;
593 combiner_data[i].saved_on = false;
598 static void combiner_restore(void)
602 for (i = 0; i < rt_max_combiner_nr; i++) {
603 if (!combiner_data[i].saved_on)
606 __raw_writel(combiner_data[i].irq_mask,
607 combiner_data[i].base + COMBINER_ENABLE_SET);
612 static int combiner_notifier(struct notifier_block *self, unsigned long cmd,
627 static struct notifier_block combiner_notifier_block = {
628 .notifier_call = combiner_notifier,
632 static struct irq_domain_ops combiner_irq_domain_ops = {
633 .xlate = combiner_irq_domain_xlate,
634 .map = combiner_irq_domain_map,
637 void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
639 int i, irq, irq_base;
640 unsigned int nr_irq, soc_max_nr;
642 soc_max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
643 EXYNOS4_MAX_COMBINER_NR;
646 if (of_property_read_u32(np, "samsung,combiner-nr",
647 &rt_max_combiner_nr)) {
648 rt_max_combiner_nr = soc_max_nr;
649 pr_warning("%s: number of combiners not specified, "
650 "setting default as %d.\n",
651 __func__, rt_max_combiner_nr);
654 rt_max_combiner_nr = soc_max_nr;
656 if (WARN_ON(rt_max_combiner_nr > soc_max_nr)) {
657 pr_warning("%s: more combiners specified (%d) than "
658 "architecture (%d) supports.",
659 __func__, rt_max_combiner_nr, soc_max_nr);
663 combiner_data = kmalloc(sizeof(struct combiner_chip_data) *
664 rt_max_combiner_nr, GFP_KERNEL);
665 if (WARN_ON(!combiner_data)) {
666 pr_warning("%s: combiner_data memory allocation failed for %d "
667 "entries", __func__, rt_max_combiner_nr);
671 nr_irq = rt_max_combiner_nr * MAX_IRQ_IN_COMBINER;
673 irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
674 if (IS_ERR_VALUE(irq_base)) {
675 irq_base = COMBINER_IRQ(0, 0);
676 pr_warning("%s: irq desc alloc failed. Continuing with %d as "
677 "linux irq base\n", __func__, irq_base);
680 combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
681 &combiner_irq_domain_ops, &combiner_data);
682 if (WARN_ON(!combiner_irq_domain)) {
683 pr_warning("%s: irq domain init failed\n", __func__);
684 kfree(combiner_data);
688 for (i = 0; i < rt_max_combiner_nr; i++) {
689 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
690 irq = np ? irq_of_parse_and_map(np, i) : IRQ_SPI(i);
691 combiner_cascade_irq(i, irq);
695 /* Setup suspend/resume combiner saving */
696 cpu_pm_register_notifier(&combiner_notifier_block);
701 int __init combiner_of_init(struct device_node *np, struct device_node *parent)
703 void __iomem *combiner_base;
705 combiner_base = of_iomap(np, 0);
706 if (!combiner_base) {
707 pr_err("%s: failed to map combiner registers\n", __func__);
711 combiner_init(combiner_base, np);
715 static const struct of_device_id exynos4_dt_irq_match[] = {
716 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
717 { .compatible = "samsung,exynos4210-combiner",
718 .data = combiner_of_init, },
719 { .compatible = "samsung,exynos5210-wakeup-eint-map",
720 .data = exynos_init_irq_eint, },
725 void __init exynos4_init_irq(void)
727 unsigned int gic_bank_offset;
729 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
731 if (!of_have_populated_dt())
732 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
735 of_irq_init(exynos4_dt_irq_match);
738 if (!of_have_populated_dt()) {
739 combiner_init(S5P_VA_COMBINER_BASE, NULL);
740 exynos_init_irq_eint(NULL, NULL);
744 * The parameters of s5p_init_irq() are for VIC init.
745 * Theses parameters should be NULL and 0 because EXYNOS4
746 * uses GIC instead of VIC.
748 s5p_init_irq(NULL, 0);
751 void __init exynos5_init_irq(void)
754 of_irq_init(exynos4_dt_irq_match);
757 * The parameters of s5p_init_irq() are for VIC init.
758 * Theses parameters should be NULL and 0 because EXYNOS4
759 * uses GIC instead of VIC.
761 s5p_init_irq(NULL, 0);
763 gic_arch_extn.irq_set_wake = s3c_irq_wake;
766 struct bus_type exynos_subsys = {
767 .name = "exynos-core",
768 .dev_name = "exynos-core",
771 static struct device exynos4_dev = {
772 .bus = &exynos_subsys,
775 static int __init exynos_core_init(void)
777 return subsys_system_register(&exynos_subsys, NULL);
779 core_initcall(exynos_core_init);
781 #ifdef CONFIG_CACHE_L2X0
782 static int __init exynos4_l2x0_cache_init(void)
786 if (soc_is_exynos5250())
789 ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
791 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
792 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
796 if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
797 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
798 /* TAG, Data Latency Control: 2 cycles */
799 l2x0_saved_regs.tag_latency = 0x110;
801 if (soc_is_exynos4212() || soc_is_exynos4412())
802 l2x0_saved_regs.data_latency = 0x120;
804 l2x0_saved_regs.data_latency = 0x110;
806 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
807 l2x0_saved_regs.pwr_ctrl =
808 (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
810 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
812 __raw_writel(l2x0_saved_regs.tag_latency,
813 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
814 __raw_writel(l2x0_saved_regs.data_latency,
815 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
817 /* L2X0 Prefetch Control */
818 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
819 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
821 /* L2X0 Power Control */
822 __raw_writel(l2x0_saved_regs.pwr_ctrl,
823 S5P_VA_L2CC + L2X0_POWER_CTRL);
825 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
826 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
829 l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
832 early_initcall(exynos4_l2x0_cache_init);
835 static int __init exynos_init(void)
837 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
839 return device_register(&exynos4_dev);
842 /* uart registration process */
844 static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
846 struct s3c2410_uartcfg *tcfg = cfg;
849 for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
850 tcfg->has_fracval = 1;
852 if (soc_is_exynos5250())
853 s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
855 s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
858 static void __iomem *exynos_eint_base;
860 static DEFINE_SPINLOCK(eint_lock);
862 static unsigned int eint0_15_data[16];
864 #define EXYNOS_EINT_NR 32
865 static struct irq_domain *irq_domain;
867 static inline int exynos4_irq_to_gpio(unsigned int irq)
869 if (irq < IRQ_EINT(0))
874 return EXYNOS4_GPX0(irq);
878 return EXYNOS4_GPX1(irq);
882 return EXYNOS4_GPX2(irq);
886 return EXYNOS4_GPX3(irq);
891 static inline int exynos5_irq_to_gpio(unsigned int irq)
893 if (irq < IRQ_EINT(0))
898 return EXYNOS5_GPX0(irq);
902 return EXYNOS5_GPX1(irq);
906 return EXYNOS5_GPX2(irq);
910 return EXYNOS5_GPX3(irq);
915 static unsigned int exynos4_eint0_15_src_int[16] = {
934 static unsigned int exynos5_eint0_15_src_int[16] = {
952 static inline void exynos_irq_eint_mask(struct irq_data *data)
956 spin_lock(&eint_lock);
957 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->hwirq));
958 mask |= EINT_OFFSET_BIT(data->hwirq);
959 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->hwirq));
960 spin_unlock(&eint_lock);
963 static void exynos_irq_eint_unmask(struct irq_data *data)
967 spin_lock(&eint_lock);
968 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->hwirq));
969 mask &= ~(EINT_OFFSET_BIT(data->hwirq));
970 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->hwirq));
971 spin_unlock(&eint_lock);
974 static inline void exynos_irq_eint_ack(struct irq_data *data)
976 __raw_writel(EINT_OFFSET_BIT(data->hwirq),
977 EINT_PEND(exynos_eint_base, data->hwirq));
980 static void exynos_irq_eint_maskack(struct irq_data *data)
982 exynos_irq_eint_mask(data);
983 exynos_irq_eint_ack(data);
986 static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
988 int offs = data->hwirq;
994 case IRQ_TYPE_EDGE_RISING:
995 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
998 case IRQ_TYPE_EDGE_FALLING:
999 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
1002 case IRQ_TYPE_EDGE_BOTH:
1003 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
1006 case IRQ_TYPE_LEVEL_LOW:
1007 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
1010 case IRQ_TYPE_LEVEL_HIGH:
1011 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
1015 printk(KERN_ERR "No such irq type %d", type);
1019 shift = (offs & 0x7) * 4;
1020 mask = 0x7 << shift;
1022 spin_lock(&eint_lock);
1023 ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->hwirq));
1025 ctrl |= newvalue << shift;
1026 __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->hwirq));
1027 spin_unlock(&eint_lock);
1029 if (soc_is_exynos5250())
1030 s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
1032 s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
1037 static struct irq_chip exynos_irq_eint = {
1038 .name = "exynos-eint",
1039 .irq_mask = exynos_irq_eint_mask,
1040 .irq_unmask = exynos_irq_eint_unmask,
1041 .irq_mask_ack = exynos_irq_eint_maskack,
1042 .irq_ack = exynos_irq_eint_ack,
1043 .irq_set_type = exynos_irq_eint_set_type,
1045 .irq_set_wake = s3c_irqext_wake,
1050 * exynos4_irq_demux_eint
1052 * This function demuxes the IRQ from from EINTs 16 to 31.
1053 * It is designed to be inlined into the specific handler
1054 * s5p_irq_demux_eintX_Y.
1056 * Each EINT pend/mask registers handle eight of them.
1058 static inline void exynos_irq_demux_eint(unsigned int start)
1062 u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
1063 u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
1069 irq = fls(status) - 1;
1070 generic_handle_irq(irq_find_mapping(irq_domain, irq + start));
1071 status &= ~(1 << irq);
1075 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
1077 struct irq_chip *chip = irq_get_chip(irq);
1078 chained_irq_enter(chip, desc);
1079 exynos_irq_demux_eint(16);
1080 exynos_irq_demux_eint(24);
1081 chained_irq_exit(chip, desc);
1084 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
1086 u32 *irq_data = irq_get_handler_data(irq);
1087 struct irq_chip *chip = irq_get_chip(irq);
1090 chained_irq_enter(chip, desc);
1091 eint_irq = irq_find_mapping(irq_domain, *irq_data);
1092 generic_handle_irq(eint_irq);
1093 chained_irq_exit(chip, desc);
1096 static int exynos_eint_irq_domain_map(struct irq_domain *d, unsigned int irq,
1099 irq_set_chip_and_handler(irq, &exynos_irq_eint, handle_level_irq);
1100 set_irq_flags(irq, IRQF_VALID);
1105 static int exynos_eint_irq_domain_xlate(struct irq_domain *d,
1106 struct device_node *controller, const u32 *intspec,
1107 unsigned int intsize, unsigned long *out_hwirq,
1108 unsigned int *out_type)
1110 if (d->of_node != controller)
1114 *out_hwirq = intspec[0];
1116 switch (intspec[1]) {
1117 case S5P_IRQ_TYPE_LEVEL_LOW:
1118 *out_type = IRQ_TYPE_LEVEL_LOW;
1120 case S5P_IRQ_TYPE_LEVEL_HIGH:
1121 *out_type = IRQ_TYPE_LEVEL_HIGH;
1123 case S5P_IRQ_TYPE_EDGE_FALLING:
1124 *out_type = IRQ_TYPE_EDGE_FALLING;
1126 case S5P_IRQ_TYPE_EDGE_RISING:
1127 *out_type = IRQ_TYPE_EDGE_RISING;
1129 case S5P_IRQ_TYPE_EDGE_BOTH:
1130 *out_type = IRQ_TYPE_EDGE_BOTH;
1137 static int exynos_eint_irq_domain_xlate(struct irq_domain *d,
1138 struct device_node *controller, const u32 *intspec,
1139 unsigned int intsize, unsigned long *out_hwirq,
1140 unsigned int *out_type)
1146 static struct irq_domain_ops exynos_eint_irq_domain_ops = {
1147 .xlate = exynos_eint_irq_domain_xlate,
1148 .map = exynos_eint_irq_domain_map,
1151 static int __init exynos_init_irq_eint(struct device_node *eint_np,
1152 struct device_node *parent)
1154 int irq, *src_int, irq_base, irq_eint;
1156 static unsigned int retry = 0;
1157 static struct device_node *np;
1163 paddr = soc_is_exynos5250() ? EXYNOS5_PA_GPIO1 :
1165 exynos_eint_base = ioremap(paddr, SZ_4K);
1167 np = of_get_parent(eint_np);
1168 exynos_eint_base = of_iomap(np, 0);
1170 if (!exynos_eint_base) {
1171 pr_err("unable to ioremap for EINT base address\n");
1175 irq_base = irq_alloc_descs(IRQ_EINT(0), 1, EXYNOS_EINT_NR, 0);
1176 if (IS_ERR_VALUE(irq_base)) {
1177 irq_base = IRQ_EINT(0);
1178 pr_warning("%s: irq desc alloc failed. Continuing with %d as "
1179 "linux irq base\n", __func__, irq_base);
1182 irq_domain = irq_domain_add_legacy(np, EXYNOS_EINT_NR, irq_base, 0,
1183 &exynos_eint_irq_domain_ops, NULL);
1184 if (WARN_ON(!irq_domain)) {
1185 pr_warning("%s: irq domain init failed\n", __func__);
1189 irq_eint = eint_np ? irq_of_parse_and_map(np, 16) : EXYNOS_IRQ_EINT16_31;
1190 irq_set_chained_handler(irq_eint, exynos_irq_demux_eint16_31);
1193 for (irq = 0; irq <= 15; irq++) {
1194 eint0_15_data[irq] = irq;
1195 src_int = soc_is_exynos5250() ? exynos5_eint0_15_src_int :
1196 exynos4_eint0_15_src_int;
1197 irq_eint = eint_np ? irq_of_parse_and_map(np, irq) : src_int[irq];
1203 irq_set_handler_data(irq_eint, &eint0_15_data[irq]);
1204 irq_set_chained_handler(irq_eint, exynos_irq_eint0_15);